diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index a588771a581..403f67d6782 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,5 +1,6 @@ Fixes # **Special notes for your reviewer**: @@ -32,7 +38,6 @@ Fixes # If no, just write "NONE". If yes, a release note is required: Enter your extended release note in the block below. If the PR requires additional action from users switching to the new release, include the string "action required". -2. --> ```release-note diff --git a/CHANGELOG-1.10.md b/CHANGELOG-1.10.md index 07ceddde31c..f7bf2a33cd6 100644 --- a/CHANGELOG-1.10.md +++ b/CHANGELOG-1.10.md @@ -1,82 +1,89 @@ -- [v1.10.10](#v11010) - - [Downloads for v1.10.10](#downloads-for-v11010) +- [v1.10.11](#v11011) + - [Downloads for v1.10.11](#downloads-for-v11011) - [Client Binaries](#client-binaries) - [Server Binaries](#server-binaries) - [Node Binaries](#node-binaries) - - [Changelog since v1.10.9](#changelog-since-v1109) + - [Changelog since v1.10.10](#changelog-since-v11010) - [Other notable changes](#other-notable-changes) -- [v1.10.9](#v1109) - - [Downloads for v1.10.9](#downloads-for-v1109) +- [v1.10.10](#v11010) + - [Downloads for v1.10.10](#downloads-for-v11010) - [Client Binaries](#client-binaries-1) - [Server Binaries](#server-binaries-1) - [Node Binaries](#node-binaries-1) - - [Changelog since v1.10.8](#changelog-since-v1108) + - [Changelog since v1.10.9](#changelog-since-v1109) - [Other notable changes](#other-notable-changes-1) -- [v1.10.8](#v1108) - - [Downloads for v1.10.8](#downloads-for-v1108) +- [v1.10.9](#v1109) + - [Downloads for v1.10.9](#downloads-for-v1109) - [Client Binaries](#client-binaries-2) - [Server Binaries](#server-binaries-2) - [Node Binaries](#node-binaries-2) - - [Changelog since v1.10.7](#changelog-since-v1107) + - [Changelog since v1.10.8](#changelog-since-v1108) - [Other notable changes](#other-notable-changes-2) -- [v1.10.7](#v1107) - - [Downloads for v1.10.7](#downloads-for-v1107) +- [v1.10.8](#v1108) + - [Downloads for v1.10.8](#downloads-for-v1108) - [Client Binaries](#client-binaries-3) - [Server Binaries](#server-binaries-3) - [Node Binaries](#node-binaries-3) - - [Changelog since v1.10.6](#changelog-since-v1106) - - [Action Required](#action-required) + - [Changelog since v1.10.7](#changelog-since-v1107) - [Other notable changes](#other-notable-changes-3) -- [v1.10.6](#v1106) - - [Downloads for v1.10.6](#downloads-for-v1106) +- [v1.10.7](#v1107) + - [Downloads for v1.10.7](#downloads-for-v1107) - [Client Binaries](#client-binaries-4) - [Server Binaries](#server-binaries-4) - [Node Binaries](#node-binaries-4) - - [Changelog since v1.10.5](#changelog-since-v1105) - - [Action Required](#action-required-1) + - [Changelog since v1.10.6](#changelog-since-v1106) + - [Action Required](#action-required) - [Other notable changes](#other-notable-changes-4) -- [v1.10.5](#v1105) - - [Downloads for v1.10.5](#downloads-for-v1105) +- [v1.10.6](#v1106) + - [Downloads for v1.10.6](#downloads-for-v1106) - [Client Binaries](#client-binaries-5) - [Server Binaries](#server-binaries-5) - [Node Binaries](#node-binaries-5) - - [Changelog since v1.10.4](#changelog-since-v1104) - - [Action Required](#action-required-2) + - [Changelog since v1.10.5](#changelog-since-v1105) + - [Action Required](#action-required-1) - [Other notable changes](#other-notable-changes-5) -- [v1.10.4](#v1104) - - [Downloads for v1.10.4](#downloads-for-v1104) +- [v1.10.5](#v1105) + - [Downloads for v1.10.5](#downloads-for-v1105) - [Client Binaries](#client-binaries-6) - [Server Binaries](#server-binaries-6) - [Node Binaries](#node-binaries-6) - - [Changelog since v1.10.3](#changelog-since-v1103) + - [Changelog since v1.10.4](#changelog-since-v1104) + - [Action Required](#action-required-2) - [Other notable changes](#other-notable-changes-6) -- [v1.10.3](#v1103) - - [Downloads for v1.10.3](#downloads-for-v1103) +- [v1.10.4](#v1104) + - [Downloads for v1.10.4](#downloads-for-v1104) - [Client Binaries](#client-binaries-7) - [Server Binaries](#server-binaries-7) - [Node Binaries](#node-binaries-7) - - [Changelog since v1.10.2](#changelog-since-v1102) + - [Changelog since v1.10.3](#changelog-since-v1103) - [Other notable changes](#other-notable-changes-7) -- [v1.10.2](#v1102) - - [Downloads for v1.10.2](#downloads-for-v1102) +- [v1.10.3](#v1103) + - [Downloads for v1.10.3](#downloads-for-v1103) - [Client Binaries](#client-binaries-8) - [Server Binaries](#server-binaries-8) - [Node Binaries](#node-binaries-8) - - [Changelog since v1.10.1](#changelog-since-v1101) + - [Changelog since v1.10.2](#changelog-since-v1102) - [Other notable changes](#other-notable-changes-8) -- [v1.10.1](#v1101) - - [Downloads for v1.10.1](#downloads-for-v1101) +- [v1.10.2](#v1102) + - [Downloads for v1.10.2](#downloads-for-v1102) - [Client Binaries](#client-binaries-9) - [Server Binaries](#server-binaries-9) - [Node Binaries](#node-binaries-9) - - [Changelog since v1.10.0](#changelog-since-v1100) + - [Changelog since v1.10.1](#changelog-since-v1101) - [Other notable changes](#other-notable-changes-9) -- [v1.10.0](#v1100) - - [Downloads for v1.10.0](#downloads-for-v1100) +- [v1.10.1](#v1101) + - [Downloads for v1.10.1](#downloads-for-v1101) - [Client Binaries](#client-binaries-10) - [Server Binaries](#server-binaries-10) - [Node Binaries](#node-binaries-10) + - [Changelog since v1.10.0](#changelog-since-v1100) + - [Other notable changes](#other-notable-changes-10) +- [v1.10.0](#v1100) + - [Downloads for v1.10.0](#downloads-for-v1100) + - [Client Binaries](#client-binaries-11) + - [Server Binaries](#server-binaries-11) + - [Node Binaries](#node-binaries-11) - [Major Themes](#major-themes) - [Node](#node) - [Storage](#storage) @@ -90,7 +97,7 @@ - [Before Upgrading](#before-upgrading) - [Known Issues](#known-issues) - [Deprecations](#deprecations) - - [Other Notable Changes](#other-notable-changes-10) + - [Other Notable Changes](#other-notable-changes-11) - [Apps](#apps) - [AWS](#aws) - [Auth](#auth-1) @@ -113,69 +120,125 @@ - [External Dependencies](#external-dependencies) - [v1.10.0-rc.1](#v1100-rc1) - [Downloads for v1.10.0-rc.1](#downloads-for-v1100-rc1) - - [Client Binaries](#client-binaries-11) - - [Server Binaries](#server-binaries-11) - - [Node Binaries](#node-binaries-11) - - [Changelog since v1.10.0-beta.4](#changelog-since-v1100-beta4) - - [Other notable changes](#other-notable-changes-11) -- [v1.10.0-beta.4](#v1100-beta4) - - [Downloads for v1.10.0-beta.4](#downloads-for-v1100-beta4) - [Client Binaries](#client-binaries-12) - [Server Binaries](#server-binaries-12) - [Node Binaries](#node-binaries-12) - - [Changelog since v1.10.0-beta.3](#changelog-since-v1100-beta3) + - [Changelog since v1.10.0-beta.4](#changelog-since-v1100-beta4) - [Other notable changes](#other-notable-changes-12) -- [v1.10.0-beta.3](#v1100-beta3) - - [Downloads for v1.10.0-beta.3](#downloads-for-v1100-beta3) +- [v1.10.0-beta.4](#v1100-beta4) + - [Downloads for v1.10.0-beta.4](#downloads-for-v1100-beta4) - [Client Binaries](#client-binaries-13) - [Server Binaries](#server-binaries-13) - [Node Binaries](#node-binaries-13) - - [Changelog since v1.10.0-beta.2](#changelog-since-v1100-beta2) + - [Changelog since v1.10.0-beta.3](#changelog-since-v1100-beta3) - [Other notable changes](#other-notable-changes-13) -- [v1.10.0-beta.2](#v1100-beta2) - - [Downloads for v1.10.0-beta.2](#downloads-for-v1100-beta2) +- [v1.10.0-beta.3](#v1100-beta3) + - [Downloads for v1.10.0-beta.3](#downloads-for-v1100-beta3) - [Client Binaries](#client-binaries-14) - [Server Binaries](#server-binaries-14) - [Node Binaries](#node-binaries-14) - - [Changelog since v1.10.0-beta.1](#changelog-since-v1100-beta1) - - [Action Required](#action-required-3) + - [Changelog since v1.10.0-beta.2](#changelog-since-v1100-beta2) - [Other notable changes](#other-notable-changes-14) -- [v1.10.0-beta.1](#v1100-beta1) - - [Downloads for v1.10.0-beta.1](#downloads-for-v1100-beta1) +- [v1.10.0-beta.2](#v1100-beta2) + - [Downloads for v1.10.0-beta.2](#downloads-for-v1100-beta2) - [Client Binaries](#client-binaries-15) - [Server Binaries](#server-binaries-15) - [Node Binaries](#node-binaries-15) - - [Changelog since v1.10.0-alpha.3](#changelog-since-v1100-alpha3) - - [Action Required](#action-required-4) + - [Changelog since v1.10.0-beta.1](#changelog-since-v1100-beta1) + - [Action Required](#action-required-3) - [Other notable changes](#other-notable-changes-15) -- [v1.10.0-alpha.3](#v1100-alpha3) - - [Downloads for v1.10.0-alpha.3](#downloads-for-v1100-alpha3) +- [v1.10.0-beta.1](#v1100-beta1) + - [Downloads for v1.10.0-beta.1](#downloads-for-v1100-beta1) - [Client Binaries](#client-binaries-16) - [Server Binaries](#server-binaries-16) - [Node Binaries](#node-binaries-16) - - [Changelog since v1.10.0-alpha.2](#changelog-since-v1100-alpha2) + - [Changelog since v1.10.0-alpha.3](#changelog-since-v1100-alpha3) + - [Action Required](#action-required-4) - [Other notable changes](#other-notable-changes-16) -- [v1.10.0-alpha.2](#v1100-alpha2) - - [Downloads for v1.10.0-alpha.2](#downloads-for-v1100-alpha2) +- [v1.10.0-alpha.3](#v1100-alpha3) + - [Downloads for v1.10.0-alpha.3](#downloads-for-v1100-alpha3) - [Client Binaries](#client-binaries-17) - [Server Binaries](#server-binaries-17) - [Node Binaries](#node-binaries-17) - - [Changelog since v1.10.0-alpha.1](#changelog-since-v1100-alpha1) - - [Action Required](#action-required-5) + - [Changelog since v1.10.0-alpha.2](#changelog-since-v1100-alpha2) - [Other notable changes](#other-notable-changes-17) -- [v1.10.0-alpha.1](#v1100-alpha1) - - [Downloads for v1.10.0-alpha.1](#downloads-for-v1100-alpha1) +- [v1.10.0-alpha.2](#v1100-alpha2) + - [Downloads for v1.10.0-alpha.2](#downloads-for-v1100-alpha2) - [Client Binaries](#client-binaries-18) - [Server Binaries](#server-binaries-18) - [Node Binaries](#node-binaries-18) + - [Changelog since v1.10.0-alpha.1](#changelog-since-v1100-alpha1) + - [Action Required](#action-required-5) + - [Other notable changes](#other-notable-changes-18) +- [v1.10.0-alpha.1](#v1100-alpha1) + - [Downloads for v1.10.0-alpha.1](#downloads-for-v1100-alpha1) + - [Client Binaries](#client-binaries-19) + - [Server Binaries](#server-binaries-19) + - [Node Binaries](#node-binaries-19) - [Changelog since v1.9.0](#changelog-since-v190) - [Action Required](#action-required-6) - - [Other notable changes](#other-notable-changes-18) + - [Other notable changes](#other-notable-changes-19) +# v1.10.11 + +[Documentation](https://docs.k8s.io) + +## Downloads for v1.10.11 + + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.10.11/kubernetes.tar.gz) | `92ba95aea1e28ad0edd4504e6bd3ccd22201e55c4496076d4d864ab15340850041d8c5ee1ff14525b3239fd05a295c6ac3752fa73a7f091e01eea241b5eec8b4` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.10.11/kubernetes-src.tar.gz) | `4b929d645c04977b5b5cc6d292203aafca562fde585c71bc71fe09d4b0fe42192d9bbeb943130770bbf841c1079d3d952269f576950dd71c6d0e87f6451cb445` + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.10.11/kubernetes-client-darwin-386.tar.gz) | `513f6b2c858b926b45cd78d83ec03d179094ea9723165a84456a732c3085a52c2066b2f2750b098227b510b9a7036e19e23c73b751d30d0172a32fae09ea162e` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.10.11/kubernetes-client-darwin-amd64.tar.gz) | `c49ad1cc56060d776405bca866183fc67ac1018b89cdc779168b9c41af6d30a999d89e144306685a2f9f1a4daf728af85bc2218bd1e5fbe497daded32c0b3f6b` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.10.11/kubernetes-client-linux-386.tar.gz) | `b9f5a240fe84e2c418e97d0548b7e72c3f6ad8320e717f85fcd12caed8c1d545e87f049255ddaa566c25368dc8b4ac80728e7c0e904a70e7dd8e71d728cc3d73` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.10.11/kubernetes-client-linux-amd64.tar.gz) | `75eb71c9d1bd3b697ac1fbdf247df52fcc8b5e03e8f6182a674623f5020122bf1883b0ed59d85a2434dfdb93e42439d640e456683c0d37948dcd011b84889303` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.10.11/kubernetes-client-linux-arm.tar.gz) | `5063acdac8d495a62ad64fa519045be346697510642e14f746b8da00b8b44ec1fd06b96fe97c095e754e6f652823bc44687d17cb80c5fd9cacdf243ad8640551` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.10.11/kubernetes-client-linux-arm64.tar.gz) | `e2a3457ef39d7d820f17f95559d1ffbd36d5f041bb9da65892c5727dd67ff274bb5275a482dae069189662e01413d569442a0be7c76f65b7d2537747927566d8` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.10.11/kubernetes-client-linux-ppc64le.tar.gz) | `df8b0f9610f50be81770b1a66142390a2d3630e24cc2bd9bb5bf88b4a4eeee636f2dec12655c47b1ec40c3670b2628ae11811bdf94a22605a90b2d4fb1b0d3d3` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.10.11/kubernetes-client-linux-s390x.tar.gz) | `d70cde5b029ef2af48d114ca8af3345c3f31ee57f243b598a5d2461c990c466e254b84e52754052ae129a453c6ecb1dbf5bf118bf37f4179a2627b3132e1bcbe` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.10.11/kubernetes-client-windows-386.tar.gz) | `4a599bfb3079df2ac50f96d95f0f57275599b34310f96b229d418d51abe963c886c6662e2c9c43886ce2dc47889ff80fcb28e4bddaacf75e25c15c4a8f35f71e` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.10.11/kubernetes-client-windows-amd64.tar.gz) | `1feb680268aa249f1a2bd24ba2a5c837125dbec99e4fbe6b3a31169a032c6670b253f45a49f65490fe751c5fe8d0189444273afcaa753812ed722fd088c234c4` + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.10.11/kubernetes-server-linux-amd64.tar.gz) | `8b6fd92708fc3e296a50b0b11afed5005b285902f9c9cc2a48264d9ee4acd34d358e3885698d46f0c486da626091692b3e4eedbec360f8a432a0baf90f6069fd` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.10.11/kubernetes-server-linux-arm.tar.gz) | `f78b0cc50b7a7df4cde89f5136b2588f7ae8852c538cf177f3efba2f8b229ee497cc65ff103d7386fcc86fcb7b873446311bc858c3b7a1d1e2e7895e4027044c` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.10.11/kubernetes-server-linux-arm64.tar.gz) | `f4d318574a256132a067af0f087215aadd602f3833ea0f66e27e7bcb4e71a125a479b033b67eb7d708faa211240c85f700ec84ecd9e994d818dae97bf197bc82` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.10.11/kubernetes-server-linux-ppc64le.tar.gz) | `3207519994c9112eea02c0876a7a93afd723cc5979c195f05d1a87ed45f465195b729221055c9eef3237f3cce2569361437743ea2910a9f0edb34dcae3ebef2b` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.10.11/kubernetes-server-linux-s390x.tar.gz) | `ce1b2061a07147025dd244d38c65375b8707c9f58ce9ef4c205f9c56bab77afce04fd525163c61c7eb144cf8348f3e334ebc13652b0494c54bfe984b96faec17` + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.10.11/kubernetes-node-linux-amd64.tar.gz) | `1609818d925e88c28ec01938c81785301ee79cc93a771cc22ecccbfbf45c5b7c7d47bc3c660f0343978e5269839a08cb3d56284916d5b4c5bf5d816aebf0f19b` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.10.11/kubernetes-node-linux-arm.tar.gz) | `1829fe50dd8cd02dff5fad304a01cff6abb48aa376a0c6c2ce61264fa48ccac334491a79492ef1786b9f0cd61573c5983f4ab7706b5f7c5ae87b6d4509616100` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.10.11/kubernetes-node-linux-arm64.tar.gz) | `d7edc4b2a89cf0483780f35085b49b6ebabb30c38bbb2845e55b13b42decca7cc3ea79fe95f32b15931f3d38574c865f5d26f4edcfc0ec28c88b4f28953ee651` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.10.11/kubernetes-node-linux-ppc64le.tar.gz) | `92ac92c5a841e8c788df91c09ee6ed00d580283b8d287cd2d5ca238971506ef1fff22d2e43820f4fa7ff19475d4337000b7348827795386a23cf876b71d4efe5` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.10.11/kubernetes-node-linux-s390x.tar.gz) | `56c5fd8a3a721f81813c145f70e5580dd1757af3e0afcf6a9bb793a7442ab71996e640d3d00e81b54dd5a3b1b70456b2e3747b70070f5c5a4afd0b0ba1b51e1d` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.10.11/kubernetes-node-windows-amd64.tar.gz) | `9c7dc2aaab8258a86bc93ae7ec142131de77cf032e0dd548e3846897d9dd2905bbb45f5a279a4ccbdc406a2f2f53427ce1877ee65dc9eb72bc133488a7eec5d2` + +## Changelog since v1.10.10 + +### Other notable changes + +* CVE-2018-1002105: Fix critical security issue in kube-apiserver upgrade request proxy handler ([#71411](https://github.com/kubernetes/kubernetes/issues/71411), [@liggitt](https://github.com/liggitt)) + + + # v1.10.10 [Documentation](https://docs.k8s.io) @@ -1020,17 +1083,17 @@ filename | sha256 hash ### Node -Many of the changes within SIG-Node revolve around control. With the beta release of the `kubelet.config.k8s.io` API group, a significant subset of Kubelet configuration can now be [configured via a versioned config file](https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/). Kubernetes v1.10 adds alpha support for the ability to [configure whether containers in a pod should share a single process namespace](https://github.com/kubernetes/features/issues/495), and the CRI has been upgraded to v1alpha2, which adds [support for Windows Container Configuration](https://github.com/kubernetes/features/issues/547). Kubernetes v1.10 also ships with the beta release of the [CRI validation test suite](https://github.com/kubernetes/features/issues/292). +Many of the changes within SIG-Node revolve around control. With the beta release of the `kubelet.config.k8s.io` API group, a significant subset of Kubelet configuration can now be [configured via a versioned config file](https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/). Kubernetes v1.10 adds alpha support for the ability to [configure whether containers in a pod should share a single process namespace](https://github.com/kubernetes/enhancements/issues/495), and the CRI has been upgraded to v1alpha2, which adds [support for Windows Container Configuration](https://github.com/kubernetes/enhancements/issues/547). Kubernetes v1.10 also ships with the beta release of the [CRI validation test suite](https://github.com/kubernetes/enhancements/issues/292). The Resource Management Working Group graduated three features to beta in the 1.10 release. First, [CPU Manager](https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/), which allows users to request exclusive CPU cores. This helps performance in a variety of use-cases, including network latency sensitive applications, as well as applications that benefit from CPU cache residency. Next, [Huge Pages](https://kubernetes.io/docs/tasks/manage-hugepages/scheduling-hugepages/), which allows pods to consume either 2Mi or 1Gi Huge Pages. This benefits applications that consume large amounts of memory. Use of Huge Pages is a common tuning recommendation for databases and JVMs. Finally, the [Device Plugin](https://kubernetes.io/docs/concepts/cluster-administration/device-plugins/) feature, which provides a framework for vendors to advertise their resources to the Kubelet without changing Kubernetes core code. Targeted devices include GPUs, High-performance NICs, FPGAs, InfiniBand, and other similar computing resources that may require vendor specific initialization and setup. ### Storage -This release brings additional power to both local storage and Persistent Volumes. [Mount namespace propagation](https://github.com/kubernetes/features/issues/432) allows a container to mount a volume as rslave so that host mounts can be seen inside the container, or as rshared so that mounts made inside a container can be seen by the host. (Note that this is [not supported on Windows](https://github.com/kubernetes/kubernetes/pull/60275).) [Local Ephemeral Storage Capacity Isolation](https://github.com/kubernetes/features/issues/361) makes it possible to set requests and limits on ephemeral local storage resources. In addition, you can now create [Local Persistent Storage](https://github.com/kubernetes/features/issues/121), which enables PersistentVolumes to be created with locally attached disks, and not just network volumes. +This release brings additional power to both local storage and Persistent Volumes. [Mount namespace propagation](https://github.com/kubernetes/enhancements/issues/432) allows a container to mount a volume as rslave so that host mounts can be seen inside the container, or as rshared so that mounts made inside a container can be seen by the host. (Note that this is [not supported on Windows](https://github.com/kubernetes/kubernetes/pull/60275).) [Local Ephemeral Storage Capacity Isolation](https://github.com/kubernetes/enhancements/issues/361) makes it possible to set requests and limits on ephemeral local storage resources. In addition, you can now create [Local Persistent Storage](https://github.com/kubernetes/enhancements/issues/121), which enables PersistentVolumes to be created with locally attached disks, and not just network volumes. -On the Persistent Volumes side, this release [Prevents deletion of Persistent Volume Claims that are used by a pod](https://github.com/kubernetes/features/issues/498) and [Persistent Volumes that are bound to a Persistent Volume Claim](https://github.com/kubernetes/features/issues/499), making it impossible to delete storage that is in use by a pod. +On the Persistent Volumes side, this release [Prevents deletion of Persistent Volume Claims that are used by a pod](https://github.com/kubernetes/enhancements/issues/498) and [Persistent Volumes that are bound to a Persistent Volume Claim](https://github.com/kubernetes/enhancements/issues/499), making it impossible to delete storage that is in use by a pod. -This release also includes [Topology Aware Volume Scheduling](https://github.com/kubernetes/features/issues/490) for local persistent volumes, the stable release of [Detailed storage metrics of internal state](https://github.com/kubernetes/features/issues/496), and beta support for [Out-of-tree CSI Volume Plugins](https://github.com/kubernetes/features/issues/178). +This release also includes [Topology Aware Volume Scheduling](https://github.com/kubernetes/enhancements/issues/490) for local persistent volumes, the stable release of [Detailed storage metrics of internal state](https://github.com/kubernetes/enhancements/issues/496), and beta support for [Out-of-tree CSI Volume Plugins](https://github.com/kubernetes/enhancements/issues/178). ### Windows @@ -1042,23 +1105,23 @@ SIG-OpenStack updated the OpenStack provider to use newer APIs, consolidated com ### API-machinery -[API Aggregation](https://github.com/kubernetes/features/issues/263) has been upgraded to "stable" in Kubernetes 1.10, so you can use it in production. Webhooks have seen numerous improvements, including alpha [Support for self-hosting authorizer webhooks](https://github.com/kubernetes/features/issues/516). +[API Aggregation](https://github.com/kubernetes/enhancements/issues/263) has been upgraded to "stable" in Kubernetes 1.10, so you can use it in production. Webhooks have seen numerous improvements, including alpha [Support for self-hosting authorizer webhooks](https://github.com/kubernetes/enhancements/issues/516). ### Auth -This release lays the groundwork for new authentication methods, including the alpha release of [External client-go credential providers](https://github.com/kubernetes/features/issues/541) and the [TokenRequest API](https://github.com/kubernetes/features/issues/542). In addition, [Pod Security Policy](https://github.com/kubernetes/features/issues/5) now lets administrators decide what contexts pods can run in, and gives administrators the ability to [limit node access to the API](https://github.com/kubernetes/features/issues/279). +This release lays the groundwork for new authentication methods, including the alpha release of [External client-go credential providers](https://github.com/kubernetes/enhancements/issues/541) and the [TokenRequest API](https://github.com/kubernetes/enhancements/issues/542). In addition, [Pod Security Policy](https://github.com/kubernetes/enhancements/issues/5) now lets administrators decide what contexts pods can run in, and gives administrators the ability to [limit node access to the API](https://github.com/kubernetes/enhancements/issues/279). ### Azure -Kubernetes 1.10 includes alpha [Azure support for cluster-autoscaler](https://github.com/kubernetes/features/issues/514), as well as [support for Azure Virtual Machine Scale Sets](https://github.com/kubernetes/features/issues/513). +Kubernetes 1.10 includes alpha [Azure support for cluster-autoscaler](https://github.com/kubernetes/enhancements/issues/514), as well as [support for Azure Virtual Machine Scale Sets](https://github.com/kubernetes/enhancements/issues/513). ### CLI -This release includes a change to [kubectl get and describe to work better with extensions](https://github.com/kubernetes/features/issues/515), as the server, rather than the client, returns this information for a smoother user experience. +This release includes a change to [kubectl get and describe to work better with extensions](https://github.com/kubernetes/enhancements/issues/515), as the server, rather than the client, returns this information for a smoother user experience. ### Network -In terms of networking, Kubernetes 1.10 is about control. Users now have beta support for the ability to [configure a pod's resolv.conf](https://github.com/kubernetes/features/issues/504), rather than relying on the cluster DNS, as well as [configuring the NodePort IP address](https://github.com/kubernetes/features/issues/539). You can also [switch the default DNS plugin to CoreDNS](https://github.com/kubernetes/features/issues/427) (beta). +In terms of networking, Kubernetes 1.10 is about control. Users now have beta support for the ability to [configure a pod's resolv.conf](https://github.com/kubernetes/enhancements/issues/504), rather than relying on the cluster DNS, as well as [configuring the NodePort IP address](https://github.com/kubernetes/enhancements/issues/539). You can also [switch the default DNS plugin to CoreDNS](https://github.com/kubernetes/enhancements/issues/427) (beta). ## Before Upgrading @@ -1204,7 +1267,7 @@ If no featureGates was specified in `kubeadm-config`, just change `featureGates: * VolumeScheduling and LocalPersistentVolume features are beta and enabled by default. The PersistentVolume NodeAffinity alpha annotation is deprecated and will be removed in a future release. ([#59391](https://github.com/kubernetes/kubernetes/pull/59391), [@msau42](https://github.com/msau42)) -* The alpha Accelerators feature gate is deprecated and will be removed in v1.11. Please use device plugins ([https://github.com/kubernetes/features/issues/368](https://github.com/kubernetes/features/issues/368)) instead. They can be enabled using the DevicePlugins feature gate. ([#57384](https://github.com/kubernetes/kubernetes/pull/57384), [@mindprince](https://github.com/mindprince)) +* The alpha Accelerators feature gate is deprecated and will be removed in v1.11. Please use device plugins ([https://github.com/kubernetes/enhancements/issues/368](https://github.com/kubernetes/enhancements/issues/368)) instead. They can be enabled using the DevicePlugins feature gate. ([#57384](https://github.com/kubernetes/kubernetes/pull/57384), [@mindprince](https://github.com/mindprince)) * The ability to use kubectl scale jobs is deprecated. All other scale operations remain in place, but the ability to scale jobs will be removed in a future release. ([#60139](https://github.com/kubernetes/kubernetes/pull/60139), [@soltysh](https://github.com/soltysh)) diff --git a/CHANGELOG-1.11.md b/CHANGELOG-1.11.md index 399bb245664..eaafc5dfd47 100644 --- a/CHANGELOG-1.11.md +++ b/CHANGELOG-1.11.md @@ -1,40 +1,54 @@ -- [v1.11.4](#v1114) - - [Downloads for v1.11.4](#downloads-for-v1114) +- [v1.11.6](#v1116) + - [Downloads for v1.11.6](#downloads-for-v1116) - [Client Binaries](#client-binaries) - [Server Binaries](#server-binaries) - [Node Binaries](#node-binaries) - - [Changelog since v1.11.3](#changelog-since-v1113) + - [Changelog since v1.11.5](#changelog-since-v1115) - [Other notable changes](#other-notable-changes) -- [v1.11.3](#v1113) - - [Downloads for v1.11.3](#downloads-for-v1113) +- [v1.11.5](#v1115) + - [Downloads for v1.11.5](#downloads-for-v1115) - [Client Binaries](#client-binaries-1) - [Server Binaries](#server-binaries-1) - [Node Binaries](#node-binaries-1) - - [Changelog since v1.11.2](#changelog-since-v1112) - - [Action Required](#action-required) + - [Changelog since v1.11.4](#changelog-since-v1114) - [Other notable changes](#other-notable-changes-1) -- [v1.11.2](#v1112) - - [Downloads for v1.11.2](#downloads-for-v1112) +- [v1.11.4](#v1114) + - [Downloads for v1.11.4](#downloads-for-v1114) - [Client Binaries](#client-binaries-2) - [Server Binaries](#server-binaries-2) - [Node Binaries](#node-binaries-2) - - [Changelog since v1.11.1](#changelog-since-v1111) - - [Action Required](#action-required-1) + - [Changelog since v1.11.3](#changelog-since-v1113) - [Other notable changes](#other-notable-changes-2) -- [v1.11.1](#v1111) - - [Downloads for v1.11.1](#downloads-for-v1111) +- [v1.11.3](#v1113) + - [Downloads for v1.11.3](#downloads-for-v1113) - [Client Binaries](#client-binaries-3) - [Server Binaries](#server-binaries-3) - [Node Binaries](#node-binaries-3) - - [Changelog since v1.11.0](#changelog-since-v1110) - - [Action Required](#action-required-2) + - [Changelog since v1.11.2](#changelog-since-v1112) + - [Action Required](#action-required) - [Other notable changes](#other-notable-changes-3) -- [v1.11.0](#v1110) - - [Downloads for v1.11.0](#downloads-for-v1110) +- [v1.11.2](#v1112) + - [Downloads for v1.11.2](#downloads-for-v1112) - [Client Binaries](#client-binaries-4) - [Server Binaries](#server-binaries-4) - [Node Binaries](#node-binaries-4) + - [Changelog since v1.11.1](#changelog-since-v1111) + - [Action Required](#action-required-1) + - [Other notable changes](#other-notable-changes-4) +- [v1.11.1](#v1111) + - [Downloads for v1.11.1](#downloads-for-v1111) + - [Client Binaries](#client-binaries-5) + - [Server Binaries](#server-binaries-5) + - [Node Binaries](#node-binaries-5) + - [Changelog since v1.11.0](#changelog-since-v1110) + - [Action Required](#action-required-2) + - [Other notable changes](#other-notable-changes-5) +- [v1.11.0](#v1110) + - [Downloads for v1.11.0](#downloads-for-v1110) + - [Client Binaries](#client-binaries-6) + - [Server Binaries](#server-binaries-6) + - [Node Binaries](#node-binaries-6) - [Kubernetes 1.11 Release Notes](#kubernetes-111-release-notes) - [Urgent Upgrade Notes](#urgent-upgrade-notes) - [(No, really, you MUST do this before you upgrade)](#no-really-you-must-do-this-before-you-upgrade) @@ -57,7 +71,7 @@ - [Graduated to Stable/GA](#graduated-to-stablega) - [Graduated to Beta](#graduated-to-beta) - [New alpha features](#new-alpha-features) - - [Other Notable Changes](#other-notable-changes-4) + - [Other Notable Changes](#other-notable-changes-6) - [SIG API Machinery](#sig-api-machinery-1) - [SIG Apps](#sig-apps) - [SIG Auth](#sig-auth-1) @@ -81,62 +95,206 @@ - [Non-user-facing changes](#non-user-facing-changes) - [v1.11.0-rc.3](#v1110-rc3) - [Downloads for v1.11.0-rc.3](#downloads-for-v1110-rc3) - - [Client Binaries](#client-binaries-5) - - [Server Binaries](#server-binaries-5) - - [Node Binaries](#node-binaries-5) - - [Changelog since v1.11.0-rc.2](#changelog-since-v1110-rc2) - - [Other notable changes](#other-notable-changes-5) -- [v1.11.0-rc.2](#v1110-rc2) - - [Downloads for v1.11.0-rc.2](#downloads-for-v1110-rc2) - - [Client Binaries](#client-binaries-6) - - [Server Binaries](#server-binaries-6) - - [Node Binaries](#node-binaries-6) - - [Changelog since v1.11.0-rc.1](#changelog-since-v1110-rc1) - - [Other notable changes](#other-notable-changes-6) -- [v1.11.0-rc.1](#v1110-rc1) - - [Downloads for v1.11.0-rc.1](#downloads-for-v1110-rc1) - [Client Binaries](#client-binaries-7) - [Server Binaries](#server-binaries-7) - [Node Binaries](#node-binaries-7) - - [Changelog since v1.11.0-beta.2](#changelog-since-v1110-beta2) - - [Action Required](#action-required-3) + - [Changelog since v1.11.0-rc.2](#changelog-since-v1110-rc2) - [Other notable changes](#other-notable-changes-7) -- [v1.11.0-beta.2](#v1110-beta2) - - [Downloads for v1.11.0-beta.2](#downloads-for-v1110-beta2) +- [v1.11.0-rc.2](#v1110-rc2) + - [Downloads for v1.11.0-rc.2](#downloads-for-v1110-rc2) - [Client Binaries](#client-binaries-8) - [Server Binaries](#server-binaries-8) - [Node Binaries](#node-binaries-8) - - [Changelog since v1.11.0-beta.1](#changelog-since-v1110-beta1) - - [Action Required](#action-required-4) + - [Changelog since v1.11.0-rc.1](#changelog-since-v1110-rc1) - [Other notable changes](#other-notable-changes-8) -- [v1.11.0-beta.1](#v1110-beta1) - - [Downloads for v1.11.0-beta.1](#downloads-for-v1110-beta1) +- [v1.11.0-rc.1](#v1110-rc1) + - [Downloads for v1.11.0-rc.1](#downloads-for-v1110-rc1) - [Client Binaries](#client-binaries-9) - [Server Binaries](#server-binaries-9) - [Node Binaries](#node-binaries-9) - - [Changelog since v1.11.0-alpha.2](#changelog-since-v1110-alpha2) - - [Action Required](#action-required-5) + - [Changelog since v1.11.0-beta.2](#changelog-since-v1110-beta2) + - [Action Required](#action-required-3) - [Other notable changes](#other-notable-changes-9) -- [v1.11.0-alpha.2](#v1110-alpha2) - - [Downloads for v1.11.0-alpha.2](#downloads-for-v1110-alpha2) +- [v1.11.0-beta.2](#v1110-beta2) + - [Downloads for v1.11.0-beta.2](#downloads-for-v1110-beta2) - [Client Binaries](#client-binaries-10) - [Server Binaries](#server-binaries-10) - [Node Binaries](#node-binaries-10) - - [Changelog since v1.11.0-alpha.1](#changelog-since-v1110-alpha1) + - [Changelog since v1.11.0-beta.1](#changelog-since-v1110-beta1) + - [Action Required](#action-required-4) - [Other notable changes](#other-notable-changes-10) -- [v1.11.0-alpha.1](#v1110-alpha1) - - [Downloads for v1.11.0-alpha.1](#downloads-for-v1110-alpha1) +- [v1.11.0-beta.1](#v1110-beta1) + - [Downloads for v1.11.0-beta.1](#downloads-for-v1110-beta1) - [Client Binaries](#client-binaries-11) - [Server Binaries](#server-binaries-11) - [Node Binaries](#node-binaries-11) + - [Changelog since v1.11.0-alpha.2](#changelog-since-v1110-alpha2) + - [Action Required](#action-required-5) + - [Other notable changes](#other-notable-changes-11) +- [v1.11.0-alpha.2](#v1110-alpha2) + - [Downloads for v1.11.0-alpha.2](#downloads-for-v1110-alpha2) + - [Client Binaries](#client-binaries-12) + - [Server Binaries](#server-binaries-12) + - [Node Binaries](#node-binaries-12) + - [Changelog since v1.11.0-alpha.1](#changelog-since-v1110-alpha1) + - [Other notable changes](#other-notable-changes-12) +- [v1.11.0-alpha.1](#v1110-alpha1) + - [Downloads for v1.11.0-alpha.1](#downloads-for-v1110-alpha1) + - [Client Binaries](#client-binaries-13) + - [Server Binaries](#server-binaries-13) + - [Node Binaries](#node-binaries-13) - [Changelog since v1.10.0](#changelog-since-v1100) - [Action Required](#action-required-6) - - [Other notable changes](#other-notable-changes-11) + - [Other notable changes](#other-notable-changes-13) +# v1.11.6 + +[Documentation](https://docs.k8s.io) + +## Downloads for v1.11.6 + + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.11.6/kubernetes.tar.gz) | `78e865de9aa6065a48027fc2c9c5e9e957c36dd69d1859e29db283875562cf11d54cb50910151e320f08554a5a530f9af83e8c437bd78e8feef038514be303a9` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.11.6/kubernetes-src.tar.gz) | `e9309ebcc5ca68c86ff623c45af0a91c4d9b11d4a39df6f6df98101aea22422ada4e4be243eabf8e7134fe38ff899da9f504c7abed6d482d0222ada0e0b65d81` + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.11.6/kubernetes-client-darwin-386.tar.gz) | `b5b3bcf9aab8694647bb03d13c83fdc5250d29f52cdf3ff9e52147f8be5fad9713242ad6996a84bcb6aaa76739abc392a8774b14c12d6896923ec0671eda84d9` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.11.6/kubernetes-client-darwin-amd64.tar.gz) | `dc25f6121501d79c3871848dc91a4528e24031ef7c7c52f85e1706e5d83b41ae1aabb33824ddbb456283f204d61853cf2daed6dd433dbb2ed16f67f7b32c8e52` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.11.6/kubernetes-client-linux-386.tar.gz) | `8e8e22e03126c8de297ca45b7e1b292ac1c0fd1e5cc119964de2332457c63df16a2aa43fd3dcef6cff4224324dddcc497932533c3fed5c55a0d097bb364dc8e9` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.11.6/kubernetes-client-linux-amd64.tar.gz) | `6048e7ccb5c084f7582511cba3bd0db2c8ccb237d409956d08cb3c20735e076b52bc3a0aa3d7cce8d1d8d980e5e3bb72ebe25107f17c1ec86a1841cfadd7dbc7` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.11.6/kubernetes-client-linux-arm.tar.gz) | `3fcc09d617949c09fc0a470633460b534092f45e47b74365bbed243f582817f0a63a73ff39c90e298894334085ec3c32d8a75b1c742ac782beff4f5ed36fb070` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.11.6/kubernetes-client-linux-arm64.tar.gz) | `5fc0c5d1f1580080ac383cdef61ce11d21845a84e2f196ccbc826ec565ad34755ec746be741ed46de93d782d1e5f1ea9991a8583cbe6f34222d56ff4f7aef67a` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.11.6/kubernetes-client-linux-ppc64le.tar.gz) | `95f986284501b681ba45158383248dacdb42b95445776509844a34e7939116c61db3c7b37fb0b390a8ff5d0a5354ec5e9e98048117f2e716c28e87bcef9e19d7` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.11.6/kubernetes-client-linux-s390x.tar.gz) | `a83f75d6698b8586c7974d2fb67b7f25aaf591ee6b17381b0d415a170941969c65e97df679b4efc6df7ea4f1424339ceb210b5e64c12bee923988cd5a24761e7` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.11.6/kubernetes-client-windows-386.tar.gz) | `f083073409ef17c29a4791163d6ca0cbb19d61dbd79f8057c2e7056951638d6600b7047b7638394d1f1847179897c30e3c5666665d937fca50f2221fbfa43cef` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.11.6/kubernetes-client-windows-amd64.tar.gz) | `3a8198a787c2efc74e40e7eaa2c66ff76b3ae13656ba49382de0fbda6ff21bbe17b5b8cf7672b2e538e017a9773ad172df3cbdb4e1c800b535cb84e5f66078ea` + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.11.6/kubernetes-server-linux-amd64.tar.gz) | `4aa44b92ecaed54ff3535687729d8a3e4421a9c3772937b7a26bb813d1cce06785ad4a746c40654bf3b3f57b1ece1691a244220693ed0db402b2430bd94119af` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.11.6/kubernetes-server-linux-arm.tar.gz) | `56f2b429e48858bbac7a28a46c4a59b942fbcad14da8c89d997f87b84268a7f5e8e7b591f326b3e51cdac2ec86ed4bb20eb1de62fdc595f1cea6acbc9117c8e1` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.11.6/kubernetes-server-linux-arm64.tar.gz) | `afd2aa213c6485591faf734d58415aef44f887bd4da4a895f37f091665c372ebeecc8c3daeafc47864ae376b457a6d134d535d021f5601a7e8dddbbe2c8f7ac1` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.11.6/kubernetes-server-linux-ppc64le.tar.gz) | `0ecf8feafb05746a43fc8e9cd822dbee294f9e02323851b3b9cdf27eba0f40dcbf5a047a54f09f248496191688fe1f8422dc9a02973644bf8853427e589b2707` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.11.6/kubernetes-server-linux-s390x.tar.gz) | `88a450b30aa213b30092447761eadd6c8440a406a10819400f365edde42f3ddfb39dd330981fd5d5f6a1d5c15a623ef9e89e97f8571a5762a8b7e5b5b4db9a82` + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.11.6/kubernetes-node-linux-amd64.tar.gz) | `5627a6382277e34ad5240b41740b5bb300dd86cbceb9fe8967da07650939b1a5e899dbb2da63bba6202e2ca95dbdd9492b8ce9285a5e57ddbf3b6b52f8f8a8f8` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.11.6/kubernetes-node-linux-arm.tar.gz) | `d6d497bd02eb7b749663f600b11ce5364d79e1a2e7229c9ed9abaf25d99de20f41caa88b1358940986faacdfe4c110afe3f8e63f102fb26b11166611cbe43b89` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.11.6/kubernetes-node-linux-arm64.tar.gz) | `bd23aced861869fb857bf36754882274f3c7353eccdbc9fc7f1b3e24ab80254efb0de6d79783e2438a1eda12d51b807786507c000d5752001b605efac0607a6d` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.11.6/kubernetes-node-linux-ppc64le.tar.gz) | `fb78c12993b800b235fe31a0d21172aeb207b2d2d78bb84a6e4cb1b8536bf3e55b2c139d4f17c3967ebbcb8c296dca5b931b7dcff28a04fa49e71a2e9604be05` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.11.6/kubernetes-node-linux-s390x.tar.gz) | `15945c55c3665d9694b0a6476e53af5a372742ef59c49a8678141a545db0491f52cd1cc2bc9a9d1a9b3f8f6ad51228651e59bc4a9703e8f8c90d05409e3a92a0` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.11.6/kubernetes-node-windows-amd64.tar.gz) | `3bf95932081fe7f62741f0ad652d0bef94aa61961a6af4b097b0a92a80deffa2c24b3a8e4a14a881c4474b84af46b6212b1ea5377cba125c7c9eb242adf2cc92` + +## Changelog since v1.11.5 + +### Other notable changes + +* Scheduler only activates unschedulable pods if node's scheduling related properties change. ([#71551](https://github.com/kubernetes/kubernetes/pull/71551), [@mlmhl](https://github.com/mlmhl)) +* Disable proxy to loopback and linklocal ([#71980](https://github.com/kubernetes/kubernetes/pull/71980), [@micahhausler](https://github.com/micahhausler)) +* Scheduler only activates unschedulable pods if node's scheduling related properties change. ([#71551](https://github.com/kubernetes/kubernetes/pull/71551), [@mlmhl](https://github.com/mlmhl)) +* fix issue: vm sku restriction policy does not work in azure disk attach/detach ([#71941](https://github.com/kubernetes/kubernetes/pull/71941), [@andyzhangx](https://github.com/andyzhangx)) +* Include CRD for BGPConfigurations, needed for calico 2.x to 3.x upgrade. ([#71868](https://github.com/kubernetes/kubernetes/pull/71868), [@satyasm](https://github.com/satyasm)) +* On GCI, NPD starts to monitor kubelet, docker, containerd crashlooping, read-only filesystem and corrupt docker overlay2 issues. ([#71522](https://github.com/kubernetes/kubernetes/pull/71522), [@wangzhen127](https://github.com/wangzhen127)) +* UDP connections now support graceful termination in IPVS mode ([#71515](https://github.com/kubernetes/kubernetes/pull/71515), [@lbernail](https://github.com/lbernail)) +* Fix a potential bug that scheduler preempts unnecessary pods. ([#70898](https://github.com/kubernetes/kubernetes/pull/70898), [@Huang-Wei](https://github.com/Huang-Wei)) +* Only use the first IP address got from instance metadata. This is because Azure CNI would set up a list of IP addresses in instance metadata, while only the first one is the Node's IP. ([#71736](https://github.com/kubernetes/kubernetes/pull/71736), [@feiskyer](https://github.com/feiskyer)) +* Fix scheduling starvation of pods in cluster with large number of unschedulable pods. ([#71488](https://github.com/kubernetes/kubernetes/pull/71488), [@bsalamat](https://github.com/bsalamat)) +* Fixes an issue with stuck connections handling error responses ([#71419](https://github.com/kubernetes/kubernetes/pull/71419), [@liggitt](https://github.com/liggitt)) +* Ensure orphan public IPs on Azure deleted when service recreated with the same name. ([#70463](https://github.com/kubernetes/kubernetes/pull/70463), [@feiskyer](https://github.com/feiskyer)) +* Upgrade Stackdriver Logging Agent addon image to 0.6-1.6.0-1 to use Fluentd v1.2. This provides nanoseconds timestamp granularity for logs. ([#70954](https://github.com/kubernetes/kubernetes/pull/70954), [@qingling128](https://github.com/qingling128)) +* [GCE] Filter out spammy audit logs from cluster autoscaler. ([#70696](https://github.com/kubernetes/kubernetes/pull/70696), [@loburm](https://github.com/loburm)) +* Fix a scheduler panic due to internal cache inconsistency ([#71063](https://github.com/kubernetes/kubernetes/pull/71063), [@Huang-Wei](https://github.com/Huang-Wei)) +* apiserver: fixes handling and logging of panics in REST handlers to prevent crashes ([#71076](https://github.com/kubernetes/kubernetes/pull/71076), [@liggitt](https://github.com/liggitt)) +* Upgrade golang.org/x/net image to release-branch.go1.10 ([#70663](https://github.com/kubernetes/kubernetes/pull/70663), [@wenjiaswe](https://github.com/wenjiaswe)) +* Fixes a bug in previous releases where a pod could be placed inside another pod's cgroup when specifying --cgroup-root ([#70678](https://github.com/kubernetes/kubernetes/pull/70678), [@dashpole](https://github.com/dashpole)) +* Fixes ability for admin/edit/view users to see controller revisions, needed for kubectl rollout commands ([#70699](https://github.com/kubernetes/kubernetes/pull/70699), [@liggitt](https://github.com/liggitt)) +* remove retry operation on attach/detach azure disk ([#70568](https://github.com/kubernetes/kubernetes/pull/70568), [@andyzhangx](https://github.com/andyzhangx)) +* Fix cloud-controller-manager crash when using OpenStack provider and PersistentVolume initializing controller ([#70459](https://github.com/kubernetes/kubernetes/pull/70459), [@mvladev](https://github.com/mvladev)) +* Update defaultbackend to 1.5: move /metrics to a private endpoint (:10254). ([#69383](https://github.com/kubernetes/kubernetes/pull/69383), [@bowei](https://github.com/bowei)) +* Allows changing nodeName in endpoint update. ([#68575](https://github.com/kubernetes/kubernetes/pull/68575), [@prameshj](https://github.com/prameshj)) +* Improve Azure instance metadata handling by adding caches. ([#70353](https://github.com/kubernetes/kubernetes/pull/70353), [@feiskyer](https://github.com/feiskyer)) + + + +# v1.11.5 + +[Documentation](https://docs.k8s.io) + +## Downloads for v1.11.5 + + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.11.5/kubernetes.tar.gz) | `42d12fec828546af526b1dd9e5a54fd72cebc458b4e0e3ad9ac50d592ca6be3971553cd9415f9e883f49556c2214f62b03d15646781ecba2a136ac036c48ebe2` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.11.5/kubernetes-src.tar.gz) | `c7df3c980bae5bdc9e865846a32b3e8a44650cf1f860eb2a5e444e7b4e07d3702c09a47cc98857527c2acd6a44817149b07eedc87574cccd46d29d621f9567e1` + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.11.5/kubernetes-client-darwin-386.tar.gz) | `8044fd8afaa1113f6b11f9a24d6236faec281f245f9fcd773e6a70af7388cf7962ac346bc078c587c629c9408edf0348e04ef1d10eb66eeb5e6cfdaa6c6a09fd` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.11.5/kubernetes-client-darwin-amd64.tar.gz) | `40fdf051aaa3455f3ca289e8b5c6e8835f53f6e034aa622e91a6264b5eca91fb314d49cc2d4d2f48c1469240207f049b4f37f4c155d8307f161f758da51377d9` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.11.5/kubernetes-client-linux-386.tar.gz) | `d947642bd65b08315b998d32afe3b3c03dbc9bda5d0a81fb1364127eae72ba84e6682c7b0d3be6a55ab8c0fa1a4731253438d08de64c6c007fee3c75f5a198fc` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.11.5/kubernetes-client-linux-amd64.tar.gz) | `7028d357f65603398c35b7578793a153248e17c2ad631541a587f4ae13ef93f058db130390eea4820c2fd7707509ed0eb581cb129790b12680e869829a6fc241` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.11.5/kubernetes-client-linux-arm.tar.gz) | `141a65303ed15b53f8bbf19031d19e257a8a02bf735b95099f8f1904c51499a33c18f416c40c15fae50ee8754b269e7ea15df53825b8e62d8c653344be70a2fe` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.11.5/kubernetes-client-linux-arm64.tar.gz) | `35d8e64f641b140e954fc2c79232b73cddec5863a70dc72b700a9eb0f7b245e821b6f6a7ae9465c3b0ce7bcf5ed4432585f2c4b5e3b3984b425a629d190893e1` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.11.5/kubernetes-client-linux-ppc64le.tar.gz) | `3aa8cc0ef637fd834d5495cf1cd30a4cb2285ee473fd0f9abea71eee559661a45cd6f6e4791a2ab27e2b4e060c642b1c228806aed1bc100ab5c79f052a8cef9c` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.11.5/kubernetes-client-linux-s390x.tar.gz) | `834367f2f24df60b3786fb526321a7c8efc36f187de6677c8e3f3e90afe0b87b9eff1d6cc4d7e3bf80d65243200bf0e9099808f4d0f5214e98aaabba8c99e012` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.11.5/kubernetes-client-windows-386.tar.gz) | `ff049e98859eb7d1126a2108a51c33f8643a17bc0f5c46990312ac1cbdb20a9bfb9f3aa13e68b55a462c2f7d97eef0d6a2419d6c1539f9475a3fbce8b2cda3d2` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.11.5/kubernetes-client-windows-amd64.tar.gz) | `f813af0f54917dd73b943367ea62f9c412f09a3fa74d8b6fc834f2e2496d4dc323a1670efe41496afd306563b5f9c230dba35323c9c88cbf2743a0c12d28cbf6` + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.11.5/kubernetes-server-linux-amd64.tar.gz) | `52ba9a169932e509db13b0ef839c82cc7834b1e8444ab7e59bd99ec95c31996e0dc3fd0c8fcc9c74e116cb74abfaeed8f6c666c62bf305289ce0aca4e3c4cbf0` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.11.5/kubernetes-server-linux-arm.tar.gz) | `7751c9d6f13944748d4ee514afa53a8c6e897a68edfa0441198d31860fb8b131343e05b9e4da2a234b088429e27e147908e3adcf4ab9a89f6eb3d19dda385465` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.11.5/kubernetes-server-linux-arm64.tar.gz) | `37b29b0742e1d2c83d2003282f9b0e9fec066dda2b463cc365e66ef69ecaec80404ca0a340fda3c62e324f088f8041abbec35a57ad20d2020f4715a59aa18ce8` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.11.5/kubernetes-server-linux-ppc64le.tar.gz) | `383a2023249542c75d8ab19e53ae9b27369a55ec81551103ae0e126a761f3e4dd76cbc904961f7ae34e03d3466df8edf66da479cacdded2c7463fdeee9e4fc08` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.11.5/kubernetes-server-linux-s390x.tar.gz) | `aa63b5eeffec85c2e3ba58eb6c92bdb24d65fe6f987be5d0db2f24a4538a2d3fa4876d6e6f7456cd8ec88c93ab63a0d45385fc1b919cd70f34741a70a8dbd0aa` + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.11.5/kubernetes-node-linux-amd64.tar.gz) | `fcaea8d5a89ee29e52f59157d746f469078471b316b71e28197986ffe07b419d4459dc889faa7897fe34b86d97dd77e7673ba1a348730d68012e703c5e6e6114` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.11.5/kubernetes-node-linux-arm.tar.gz) | `b6fe554a7a2d2c2123e9362abaa86f0d4434139d2c1f60c7a47d5ba00afd68e2a2f7d8b29a6ac724d7700606fbd692f91b57c1d6e07828a1bfa82e698fcb2908` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.11.5/kubernetes-node-linux-arm64.tar.gz) | `5d88c504cae76098783d00d17c684c505774f32941d0be476c68e293ea9baf29248ba4fabe13577b04be14f49fcdf0eb5624a1267e866aa3fe0035144acb88a4` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.11.5/kubernetes-node-linux-ppc64le.tar.gz) | `2238cb556ea3e8d5fe9b58b7c460331c0c2f1390e894d1cf7dfe5f0d9ce779ca181119283955dff2b8412742b12dee2a2bf4914a388c250b5708c58f35ec8ccf` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.11.5/kubernetes-node-linux-s390x.tar.gz) | `6780f4b5898cfc3685abb46269349a065d28e91f1f29c993ebc7cd769949f675cc2655cfe9a0b0fc9f2e11bc833582bb48e3b52c1b7e3388fc63278155ebc143` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.11.5/kubernetes-node-windows-amd64.tar.gz) | `0e5cd07333516f53cf728d9223e82c02bf5a0aa83f45392bbf371a676d422b04cb9a388e255ce346886e75e2cd28331be0fb7ee4babd07ba14205d13ede18351` + +## Changelog since v1.11.4 + +### Other notable changes + +* CVE-2018-1002105: Fix critical security issue in kube-apiserver upgrade request proxy handler ([#71411](https://github.com/kubernetes/kubernetes/issues/71411), [@liggitt](https://github.com/liggitt)) +* IPVS proxier mode now support connection based graceful termination. ([#66012](https://github.com/kubernetes/kubernetes/pull/66012), [@Lion-Wei](https://github.com/Lion-Wei)) +* Update Cluster Autoscaler to 1.3.4 ([#70285](https://github.com/kubernetes/kubernetes/pull/70285), [@losipiuk](https://github.com/losipiuk)) +* Fix cluster autoscaler addon permissions so it can access batch/job. ([#69858](https://github.com/kubernetes/kubernetes/pull/69858), [@losipiuk](https://github.com/losipiuk)) +* Do not remove shutdown nodes from api-server for vSphere. ([#70291](https://github.com/kubernetes/kubernetes/pull/70291), [@gnufied](https://github.com/gnufied)) + * Fixes volume detach from shutdown nodes. +* Enable insertId generation, and update Stackdriver Logging Agent image to 0.5-1.5.36-1-k8s. This help reduce log duplication and guarantee log order. ([#68920](https://github.com/kubernetes/kubernetes/pull/68920), [@qingling128](https://github.com/qingling128)) +* fix azure disk attachment error on Linux ([#70002](https://github.com/kubernetes/kubernetes/pull/70002), [@andyzhangx](https://github.com/andyzhangx)) +* GCE/GKE load balancer health check default interval changes from 2 seconds to 8 seconds, unhealthyThreshold to 3. ([#70099](https://github.com/kubernetes/kubernetes/pull/70099), [@grayluck](https://github.com/grayluck)) + * Health check parameters are configurable to be bigger than default values. + + + # v1.11.4 [Documentation](https://docs.k8s.io) @@ -734,30 +892,30 @@ controllerManagerExtraVolumes: * Support for the `alpha.kubernetes.io/nvidia-gpu` resource, which was deprecated in 1.10, has been removed. Please use the resource exposed by DevicePlugins instead (`nvidia.com/gpu`). ([#61498](https://github.com/kubernetes/kubernetes/pull/61498), [@mindprince](https://github.com/mindprince)) * The `kube-cloud-controller-manager` flag `--service-account-private-key-file` has been removed. Use `--use-service-account-credentials` instead. ([#60875](https://github.com/kubernetes/kubernetes/pull/60875), [@charrywanganthony](https://github.com/charrywanganthony)) * The rknetes code, which was deprecated in 1.10, has been removed. Use rktlet and CRI instead. ([#61432](https://github.com/kubernetes/kubernetes/pull/61432), [@filbranden](https://github.com/filbranden)) -* DaemonSet scheduling associated with the alpha ScheduleDaemonSetPods feature flag has been emoved. See https://github.com/kubernetes/features/issues/548 for feature status. ([#61411](https://github.com/kubernetes/kubernetes/pull/61411), [@liggitt](https://github.com/liggitt)) +* DaemonSet scheduling associated with the alpha ScheduleDaemonSetPods feature flag has been emoved. See https://github.com/kubernetes/enhancements/issues/548 for feature status. ([#61411](https://github.com/kubernetes/kubernetes/pull/61411), [@liggitt](https://github.com/liggitt)) * The `METADATA_AGENT_VERSION` configuration option has been removed to keep metadata agent version consistent across Kubernetes deployments. ([#63000](https://github.com/kubernetes/kubernetes/pull/63000), [@kawych](https://github.com/kawych)) * The deprecated `--service-account-private-key-file` flag has been removed from the cloud-controller-manager. The flag is still present and supported in the kube-controller-manager. ([#65182](https://github.com/kubernetes/kubernetes/pull/65182), [@liggitt](https://github.com/liggitt)) * Removed alpha functionality that allowed the controller manager to approve kubelet server certificates. This functionality should be replaced by automating validation and approval of node server certificate signing requests. ([#62471](https://github.com/kubernetes/kubernetes/pull/62471), [@mikedanese](https://github.com/mikedanese)) #### Graduated to Stable/GA -* IPVS-based in-cluster load balancing is now GA ([ref](https://github.com/kubernetes/features/issues/265)) -* Enable CoreDNS as a DNS plugin for Kubernetes ([ref](https://github.com/kubernetes/features/issues/427)) +* IPVS-based in-cluster load balancing is now GA ([ref](https://github.com/kubernetes/enhancements/issues/265)) +* Enable CoreDNS as a DNS plugin for Kubernetes ([ref](https://github.com/kubernetes/enhancements/issues/427)) * Azure Go SDK is now GA ([#63063](https://github.com/kubernetes/kubernetes/pull/63063), [@feiskyer](https://github.com/feiskyer)) -* ClusterRole aggregation is now GA ([ref](https://github.com/kubernetes/features/issues/502)) -* CRI validation test suite is now GA ([ref](https://github.com/kubernetes/features/issues/292)) -* StorageObjectInUseProtection is now GA ([ref](https://github.com/kubernetes/features/issues/498)) and ([ref](https://github.com/kubernetes/features/issues/499)) +* ClusterRole aggregation is now GA ([ref](https://github.com/kubernetes/enhancements/issues/502)) +* CRI validation test suite is now GA ([ref](https://github.com/kubernetes/enhancements/issues/292)) +* StorageObjectInUseProtection is now GA ([ref](https://github.com/kubernetes/enhancements/issues/498)) and ([ref](https://github.com/kubernetes/enhancements/issues/499)) #### Graduated to Beta -* Supporting out-of-tree/external cloud providers is now considered beta ([ref](https://github.com/kubernetes/features/issues/88)) -* Resizing PersistentVolumes after pod restart is now considered beta. ([ref](https://github.com/kubernetes/features/issues/284)) -* sysctl support is now considered beta ([ref](https://github.com/kubernetes/features/issues/34)) -* Support for Azure Virtual Machine Scale Sets is now considered beta. ([ref](https://github.com/kubernetes/features/issues/513)) -* Azure support for Cluster Autoscaler is now considered beta. ([ref](https://github.com/kubernetes/features/issues/514)) -* The ability to limit a node's access to the API is now considered beta. ([ref](https://github.com/kubernetes/features/issues/279)) -* CustomResource versioning is now considered beta. ([ref](https://github.com/kubernetes/features/issues/544)) -* Windows container configuration in CRI is now considered beta ([ref](https://github.com/kubernetes/features/issues/547)) -* CRI logging and stats are now considered beta ([ref](https://github.com/kubernetes/features/issues/552)) +* Supporting out-of-tree/external cloud providers is now considered beta ([ref](https://github.com/kubernetes/enhancements/issues/88)) +* Resizing PersistentVolumes after pod restart is now considered beta. ([ref](https://github.com/kubernetes/enhancements/issues/284)) +* sysctl support is now considered beta ([ref](https://github.com/kubernetes/enhancements/issues/34)) +* Support for Azure Virtual Machine Scale Sets is now considered beta. ([ref](https://github.com/kubernetes/enhancements/issues/513)) +* Azure support for Cluster Autoscaler is now considered beta. ([ref](https://github.com/kubernetes/enhancements/issues/514)) +* The ability to limit a node's access to the API is now considered beta. ([ref](https://github.com/kubernetes/enhancements/issues/279)) +* CustomResource versioning is now considered beta. ([ref](https://github.com/kubernetes/enhancements/issues/544)) +* Windows container configuration in CRI is now considered beta ([ref](https://github.com/kubernetes/enhancements/issues/547)) +* CRI logging and stats are now considered beta ([ref](https://github.com/kubernetes/enhancements/issues/552)) * The dynamic Kubelet config feature is now beta, and the DynamicKubeletConfig feature gate is on by default. In order to use dynamic Kubelet config, ensure that the Kubelet's --dynamic-config-dir option is set. ([#64275](https://github.com/kubernetes/kubernetes/pull/64275), [@mtaufen](https://github.com/mtaufen)) * The Sysctls experimental feature has been promoted to beta (enabled by default via the `Sysctls` feature flag). PodSecurityPolicy and Pod objects now have fields for specifying and controlling sysctls. Alpha sysctl annotations will be ignored by 1.11+ kubelets. All alpha sysctl annotations in existing deployments must be converted to API fields to be effective. ([#6371](https://github.com/kubernetes/kubernetes/pull/63717), [@ingvagabund](https://github.com/ingvagabund)) * Volume expansion is now considered Beta. ([#64288](https://github.com/kubernetes/kubernetes/pull/64288), [@gnufied](https://github.com/gnufied)) @@ -769,12 +927,12 @@ controllerManagerExtraVolumes: ### New alpha features -* kube-scheduler can now schedule DaemonSet pods ([ref](https://github.com/kubernetes/features/issues/548)) -* You can now resize PersistentVolumes without taking them offline ([ref](https://github.com/kubernetes/features/issues/531)) -* You can now set a maximum volume count ([ref](https://github.com/kubernetes/features/issues/554)) -* You can now do environment variable expansion in a subpath mount. ([ref](https://github.com/kubernetes/features/issues/559)) -* You can now run containers in a pod as a particular group. ([ref](https://github.com/kubernetes/features/issues/213)) -You can now bind tokens to service requests. ([ref](https://github.com/kubernetes/features/issues/542)) +* kube-scheduler can now schedule DaemonSet pods ([ref](https://github.com/kubernetes/enhancements/issues/548)) +* You can now resize PersistentVolumes without taking them offline ([ref](https://github.com/kubernetes/enhancements/issues/531)) +* You can now set a maximum volume count ([ref](https://github.com/kubernetes/enhancements/issues/554)) +* You can now do environment variable expansion in a subpath mount. ([ref](https://github.com/kubernetes/enhancements/issues/559)) +* You can now run containers in a pod as a particular group. ([ref](https://github.com/kubernetes/enhancements/issues/213)) +You can now bind tokens to service requests. ([ref](https://github.com/kubernetes/enhancements/issues/542)) * The --experimental-qos-reserve kubelet flags has been replaced by the alpha level --qos-reserved flag or the QOSReserved field in the kubeletconfig, and requires the QOSReserved feature gate to be enabled. ([#62509](https://github.com/kubernetes/kubernetes/pull/62509), [@sjenning](https://github.com/sjenning)) ## Other Notable Changes @@ -2110,7 +2268,7 @@ filename | sha256 hash * Implement preemption for extender with a verb and new interface ([#58717](https://github.com/kubernetes/kubernetes/pull/58717), [@resouer](https://github.com/resouer)) * `kube-cloud-controller-manager` flag `--service-account-private-key-file` is removed in v1.11 ([#60875](https://github.com/kubernetes/kubernetes/pull/60875), [@charrywanganthony](https://github.com/charrywanganthony)) * kubeadm: Add the writable boolean option to kubeadm config. The option works on a per-volume basis for *ExtraVolumes config keys. ([#60428](https://github.com/kubernetes/kubernetes/pull/60428), [@rosti](https://github.com/rosti)) -* DaemonSet scheduling associated with the alpha ScheduleDaemonSetPods feature flag has been removed from the 1.10 release. See https://github.com/kubernetes/features/issues/548 for feature status. ([#61411](https://github.com/kubernetes/kubernetes/pull/61411), [@liggitt](https://github.com/liggitt)) +* DaemonSet scheduling associated with the alpha ScheduleDaemonSetPods feature flag has been removed from the 1.10 release. See https://github.com/kubernetes/enhancements/issues/548 for feature status. ([#61411](https://github.com/kubernetes/kubernetes/pull/61411), [@liggitt](https://github.com/liggitt)) * Bugfix for erroneous upgrade needed messaging in kubernetes worker charm. ([#60873](https://github.com/kubernetes/kubernetes/pull/60873), [@wwwtyro](https://github.com/wwwtyro)) * Fix data race in node lifecycle controller ([#60831](https://github.com/kubernetes/kubernetes/pull/60831), [@resouer](https://github.com/resouer)) * Nodes are not deleted from kubernetes anymore if node is shutdown in Openstack. ([#59931](https://github.com/kubernetes/kubernetes/pull/59931), [@zetaab](https://github.com/zetaab)) diff --git a/CHANGELOG-1.12.md b/CHANGELOG-1.12.md index 40e84d5a352..f9a56437215 100644 --- a/CHANGELOG-1.12.md +++ b/CHANGELOG-1.12.md @@ -1,23 +1,38 @@ -- [v1.12.2](#v1122) - - [Downloads for v1.12.2](#downloads-for-v1122) +- [v1.12.4](#v1124) + - [Downloads for v1.12.4](#downloads-for-v1124) - [Client Binaries](#client-binaries) - [Server Binaries](#server-binaries) - [Node Binaries](#node-binaries) - - [Changelog since v1.12.1](#changelog-since-v1121) + - [Changelog since v1.12.3](#changelog-since-v1123) + - [Action Required](#action-required) - [Other notable changes](#other-notable-changes) -- [v1.12.1](#v1121) - - [Downloads for v1.12.1](#downloads-for-v1121) +- [v1.12.3](#v1123) + - [Downloads for v1.12.3](#downloads-for-v1123) - [Client Binaries](#client-binaries-1) - [Server Binaries](#server-binaries-1) - [Node Binaries](#node-binaries-1) - - [Changelog since v1.12.0](#changelog-since-v1120) + - [Changelog since v1.12.2](#changelog-since-v1122) - [Other notable changes](#other-notable-changes-1) -- [v1.12.0](#v1120) - - [Downloads for v1.12.0](#downloads-for-v1120) +- [v1.12.2](#v1122) + - [Downloads for v1.12.2](#downloads-for-v1122) - [Client Binaries](#client-binaries-2) - [Server Binaries](#server-binaries-2) - [Node Binaries](#node-binaries-2) + - [Changelog since v1.12.1](#changelog-since-v1121) + - [Other notable changes](#other-notable-changes-2) +- [v1.12.1](#v1121) + - [Downloads for v1.12.1](#downloads-for-v1121) + - [Client Binaries](#client-binaries-3) + - [Server Binaries](#server-binaries-3) + - [Node Binaries](#node-binaries-3) + - [Changelog since v1.12.0](#changelog-since-v1120) + - [Other notable changes](#other-notable-changes-3) +- [v1.12.0](#v1120) + - [Downloads for v1.12.0](#downloads-for-v1120) + - [Client Binaries](#client-binaries-4) + - [Server Binaries](#server-binaries-4) + - [Node Binaries](#node-binaries-4) - [Known Issues](#known-issues) - [Major Themes](#major-themes) - [SIG API Machinery](#sig-api-machinery) @@ -35,11 +50,11 @@ - [SIG-storage](#sig-storage) - [SIG-vmware](#sig-vmware) - [SIG-windows](#sig-windows) - - [Action Required](#action-required) + - [Action Required](#action-required-1) - [Deprecations and removals](#deprecations-and-removals) - [New Features](#new-features) - [API Changes](#api-changes) - - [Other Notable Changes](#other-notable-changes-2) + - [Other Notable Changes](#other-notable-changes-4) - [SIG API Machinery](#sig-api-machinery-1) - [SIG Apps](#sig-apps) - [SIG Auth](#sig-auth) @@ -58,54 +73,204 @@ - [SIG Storage](#sig-storage-1) - [SIG VMWare](#sig-vmware-1) - [SIG Windows](#sig-windows-1) - - [Other Notable Changes](#other-notable-changes-3) + - [Other Notable Changes](#other-notable-changes-5) - [Bug Fixes](#bug-fixes) - [Not Very Notable (that is, non-user-facing)](#not-very-notable-that-is-non-user-facing) - [External Dependencies](#external-dependencies) - [v1.12.0-rc.2](#v1120-rc2) - [Downloads for v1.12.0-rc.2](#downloads-for-v1120-rc2) - - [Client Binaries](#client-binaries-3) - - [Server Binaries](#server-binaries-3) - - [Node Binaries](#node-binaries-3) - - [Changelog since v1.12.0-rc.1](#changelog-since-v1120-rc1) - - [Other notable changes](#other-notable-changes-4) -- [v1.12.0-rc.1](#v1120-rc1) - - [Downloads for v1.12.0-rc.1](#downloads-for-v1120-rc1) - - [Client Binaries](#client-binaries-4) - - [Server Binaries](#server-binaries-4) - - [Node Binaries](#node-binaries-4) - - [Changelog since v1.12.0-beta.2](#changelog-since-v1120-beta2) - - [Action Required](#action-required-1) - - [Other notable changes](#other-notable-changes-5) -- [v1.12.0-beta.2](#v1120-beta2) - - [Downloads for v1.12.0-beta.2](#downloads-for-v1120-beta2) - [Client Binaries](#client-binaries-5) - [Server Binaries](#server-binaries-5) - [Node Binaries](#node-binaries-5) - - [Changelog since v1.12.0-beta.1](#changelog-since-v1120-beta1) - - [Action Required](#action-required-2) + - [Changelog since v1.12.0-rc.1](#changelog-since-v1120-rc1) - [Other notable changes](#other-notable-changes-6) -- [v1.12.0-beta.1](#v1120-beta1) - - [Downloads for v1.12.0-beta.1](#downloads-for-v1120-beta1) +- [v1.12.0-rc.1](#v1120-rc1) + - [Downloads for v1.12.0-rc.1](#downloads-for-v1120-rc1) - [Client Binaries](#client-binaries-6) - [Server Binaries](#server-binaries-6) - [Node Binaries](#node-binaries-6) - - [Changelog since v1.12.0-alpha.1](#changelog-since-v1120-alpha1) - - [Action Required](#action-required-3) + - [Changelog since v1.12.0-beta.2](#changelog-since-v1120-beta2) + - [Action Required](#action-required-2) - [Other notable changes](#other-notable-changes-7) -- [v1.12.0-alpha.1](#v1120-alpha1) - - [Downloads for v1.12.0-alpha.1](#downloads-for-v1120-alpha1) +- [v1.12.0-beta.2](#v1120-beta2) + - [Downloads for v1.12.0-beta.2](#downloads-for-v1120-beta2) - [Client Binaries](#client-binaries-7) - [Server Binaries](#server-binaries-7) - [Node Binaries](#node-binaries-7) - - [Changelog since v1.11.0](#changelog-since-v1110) - - [Action Required](#action-required-4) + - [Changelog since v1.12.0-beta.1](#changelog-since-v1120-beta1) + - [Action Required](#action-required-3) - [Other notable changes](#other-notable-changes-8) +- [v1.12.0-beta.1](#v1120-beta1) + - [Downloads for v1.12.0-beta.1](#downloads-for-v1120-beta1) + - [Client Binaries](#client-binaries-8) + - [Server Binaries](#server-binaries-8) + - [Node Binaries](#node-binaries-8) + - [Changelog since v1.12.0-alpha.1](#changelog-since-v1120-alpha1) + - [Action Required](#action-required-4) + - [Other notable changes](#other-notable-changes-9) +- [v1.12.0-alpha.1](#v1120-alpha1) + - [Downloads for v1.12.0-alpha.1](#downloads-for-v1120-alpha1) + - [Client Binaries](#client-binaries-9) + - [Server Binaries](#server-binaries-9) + - [Node Binaries](#node-binaries-9) + - [Changelog since v1.11.0](#changelog-since-v1110) + - [Action Required](#action-required-5) + - [Other notable changes](#other-notable-changes-10) +# v1.12.4 + +[Documentation](https://docs.k8s.io) + +## Downloads for v1.12.4 + + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.12.4/kubernetes.tar.gz) | `35fd7a207cf3b6a5d569b1aad2fbccaf82ae394e6c91d3b1861b9e73b5069ca83aee8d5cdaa2e65f727579124d94ca9e886d0f4439f37b3204a1ac51db930cb0` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.12.4/kubernetes-src.tar.gz) | `ec7a67dfd82b0e8dd5020ebd3f059c38bb751bbb868b91410516cdde260f5a768ce4237a272c78f8a6b3fab6e1f4d13a7d3b88da7cdd81cc93abfcf8f5da6121` + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.12.4/kubernetes-client-darwin-386.tar.gz) | `96568f7d8f9583565345841b56c14ede552028d43b9425ed59bac343fab54e4a522d435b31e62e7cfa877e323b78c54a161b6f9ecd5ba200887a0ab7bafe472d` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.12.4/kubernetes-client-darwin-amd64.tar.gz) | `ba9c3d8186da6c4d41e1cfd19b0b9e317bba9b41124a46d6fb6820bd564059bca5d480639e3b2c415dd0d40817e56a050db8949ffeaf23e1a79a62ab4b907eeb` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.12.4/kubernetes-client-linux-386.tar.gz) | `4c90bf1f267545ae835d2c098829b15a4b093be8b6212bd30e4f9eade6bd25334ec54638f5b8917a19543438ccce55a211adec47ed7f3d779092ab2de5c5633e` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.12.4/kubernetes-client-linux-amd64.tar.gz) | `68edc349340f94d30c44f190eaf894d0df5dc6cda6e875335d7b34ea02febe05efaf8239cbf3a3153914ad9cc3c771a94aec521c75b1b5665bdd3d7741977ab3` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.12.4/kubernetes-client-linux-arm.tar.gz) | `a18faf32226c95dc864b5f4d728eb1d9bf84ee972fd690555c63c92f56f42c6e0dc65d60b4d59f9d549b3176af6ad3eece478d999dd2a6a64f116c12c3ae018c` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.12.4/kubernetes-client-linux-arm64.tar.gz) | `ca70191ee9801de4721a367a7bf751b6b3b0347c48d8fddc1c898a87ed345c7bf32d3237123a4ab3776b619a383e7cd534b9a8a02be4bb00da781bb3272b5713` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.12.4/kubernetes-client-linux-ppc64le.tar.gz) | `4a9c55bc8c63a0a4ee606dcc80736af8504f194a92a737684cb8f8c65cf9d7ca3663b0dccb934ff48a7f7fa98c0d3225eb85930d09e089122a64bc81a221a46d` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.12.4/kubernetes-client-linux-s390x.tar.gz) | `088f9cee592db55de3cc7c382dca7eb118670952a68d5f91e7722e1c4a3f81e7c5b0cb800f805ba15d0d444864f4b8c9788d5b98f4ab94c7c53a065296d09182` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.12.4/kubernetes-client-windows-386.tar.gz) | `56f52a924d212e01c04ebf57163c8cb709e65d87cce5131bb2766f876f5cd82ab757108e75d8ac7c64fb0b088d86feb8fbdc5da42fc47a2fe06ee9f42c276631` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.12.4/kubernetes-client-windows-amd64.tar.gz) | `d2e3f2976cce4779539ea5ce09a3745293cfa63b872d449ae49a0b811b202a7223223d9fa16f91dc5901aab0a84ac3dd1eb05beb632000833aa7c612e243f3cd` + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.12.4/kubernetes-server-linux-amd64.tar.gz) | `d5b4a448c03800c146a8abe924349fa5bb3ee4b1215f2c154171812325d056f488269bd29a8ff25dd3a16ca7a1f8804f5dcea141d82e7a1d991b778f84fa5bc5` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.12.4/kubernetes-server-linux-arm.tar.gz) | `e1a3f1e6d8b20482a1bfd37081f45758022930e13bf13f8a06e4f65fb9fc01d138511e5a4368bbdef342744f9d9db4e892149f94ad9481ee147e98a319e26acc` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.12.4/kubernetes-server-linux-arm64.tar.gz) | `d10b4b6e4ccb6652a6b64b0b32751a7ed151baf8054fcad8c9ff8e477d0c4a4a8386f2d433462570f78b5750ef0c3e641896cd542391d6928566958a12f6d0c7` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.12.4/kubernetes-server-linux-ppc64le.tar.gz) | `7076feb091b9b9c01af7bb3a23cdd8b7a4716c7da454029551dc6d2862813b4b5460ea70e4ba0a604ede3ea2f4fa012e12f97adbaf31938e74d063e797aaff0a` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.12.4/kubernetes-server-linux-s390x.tar.gz) | `5d2faa0309f0d6ed88f242ce621588cd6fb3c2c7e87230b5452cda8c86b649407c0dc7ef2ad0ef78f63aade7054edaac4656ee31560a0feceeca5991e44a11b4` + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.12.4/kubernetes-node-linux-amd64.tar.gz) | `060304784430f3200c337edbd63dd46e04b2fa32cbb82a3f39e020ce157b6c3634e2ca5817f8e4f26c7f498086e20e173625a73093ee6f0262d36dfb19238eb9` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.12.4/kubernetes-node-linux-arm.tar.gz) | `a4bb2548075a04decec14d672921ac99ff90afbae3bc74ccdc778fdb15087ceef3b9a691871fccda36aae078181fd13f9dce1c8d82e1f40991354266006d37ee` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.12.4/kubernetes-node-linux-arm64.tar.gz) | `a9e8d364f55d63c3b42f3441b64a1995d5820886d0963389fa086b5083db30a0bd7412e12ce49d6d777587a983d6acd2b66888bd202228fef8b7efb4a4b9da9c` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.12.4/kubernetes-node-linux-ppc64le.tar.gz) | `4aed63ade9f735439a11e87ffeadd2bb41302d07fe4e5168cb2fb2b241b2cf51a9015ad181426ca8b94fcda3878a7a896f23f6c8468cc645828c00f1f81aa5e0` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.12.4/kubernetes-node-linux-s390x.tar.gz) | `bd64af61473b1e1cee99df44455ee588a5fbb5a1348d3e17af60673024d5591dece97fde5c546fdd58222888665d0d6fb2a22cd56d8bf8ca371a6aa6780583c1` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.12.4/kubernetes-node-windows-amd64.tar.gz) | `5944e1fcf835464d1efb1c7e21b11c1701a4b236cd2be176139d5cadfbaf91840de674c5e40470ceb3c82139950b4dccb95cd39abae89deaa9d8ab96d696d8b0` + +## Changelog since v1.12.3 + +### Action Required + +* ACTION REQUIRED: The Node.Status.Volumes.Attached.DevicePath fields is deprecated for CSI volumes and will be unset in a future release ([#71095](https://github.com/kubernetes/kubernetes/pull/71095), [@msau42](https://github.com/msau42)) + +### Other notable changes + +* fix kubelet log flushing issue in azure disk ([#71990](https://github.com/kubernetes/kubernetes/pull/71990), [@andyzhangx](https://github.com/andyzhangx)) +* Disable proxy to loopback and linklocal ([#71980](https://github.com/kubernetes/kubernetes/pull/71980), [@micahhausler](https://github.com/micahhausler)) +* fix issue: vm sku restriction policy does not work in azure disk attach/detach ([#71941](https://github.com/kubernetes/kubernetes/pull/71941), [@andyzhangx](https://github.com/andyzhangx)) +* Scheduler only activates unschedulable pods if node's scheduling related properties change. ([#71551](https://github.com/kubernetes/kubernetes/pull/71551), [@mlmhl](https://github.com/mlmhl)) +* UDP connections now support graceful termination in IPVS mode ([#71515](https://github.com/kubernetes/kubernetes/pull/71515), [@lbernail](https://github.com/lbernail)) +* Fixes an issue where Azure VMSS instances not existing in Azure were not being deleted by the Cloud Controller Manager. ([#71597](https://github.com/kubernetes/kubernetes/pull/71597), [@marc-sensenich](https://github.com/marc-sensenich)) +* Include CRD for BGPConfigurations, needed for calico 2.x to 3.x upgrade. ([#71868](https://github.com/kubernetes/kubernetes/pull/71868), [@satyasm](https://github.com/satyasm)) +* On GCI, NPD starts to monitor kubelet, docker, containerd crashlooping, read-only filesystem and corrupt docker overlay2 issues. ([#71522](https://github.com/kubernetes/kubernetes/pull/71522), [@wangzhen127](https://github.com/wangzhen127)) +* Only use the first IP address got from instance metadata. This is because Azure CNI would set up a list of IP addresses in instance metadata, while only the first one is the Node's IP. ([#71736](https://github.com/kubernetes/kubernetes/pull/71736), [@feiskyer](https://github.com/feiskyer)) +* kube-controller-manager: fixed issue display help for the deprecated insecure --port flag ([#71601](https://github.com/kubernetes/kubernetes/pull/71601), [@liggitt](https://github.com/liggitt)) +* Fixes apiserver nil pointer panics when requesting v2beta1 autoscaling object metrics ([#71744](https://github.com/kubernetes/kubernetes/pull/71744), [@yue9944882](https://github.com/yue9944882)) +* Fix a potential bug that scheduler preempts unnecessary pods. ([#70898](https://github.com/kubernetes/kubernetes/pull/70898), [@Huang-Wei](https://github.com/Huang-Wei)) +* The kube-apiserver's healthz now takes in an optional query parameter which allows you to disable health checks from causing healthz failures. ([#70676](https://github.com/kubernetes/kubernetes/pull/70676), [@logicalhan](https://github.com/logicalhan)) +* Fix scheduling starvation of pods in cluster with large number of unschedulable pods. ([#71488](https://github.com/kubernetes/kubernetes/pull/71488), [@bsalamat](https://github.com/bsalamat)) +* Upgrade Stackdriver Logging Agent addon image to 0.6-1.6.0-1 to use Fluentd v1.2. This provides nanoseconds timestamp granularity for logs. ([#70954](https://github.com/kubernetes/kubernetes/pull/70954), [@qingling128](https://github.com/qingling128)) +* fix detach azure disk issue due to dirty cache ([#71495](https://github.com/kubernetes/kubernetes/pull/71495), [@andyzhangx](https://github.com/andyzhangx)) +* Fixes ability for admin/edit/view users to see controller revisions, needed for kubectl rollout commands ([#70699](https://github.com/kubernetes/kubernetes/pull/70699), [@liggitt](https://github.com/liggitt)) +* Upgrade golang.org/x/net image to release-branch.go1.10 ([#70663](https://github.com/kubernetes/kubernetes/pull/70663), [@wenjiaswe](https://github.com/wenjiaswe)) +* [GCE] Filter out spammy audit logs from cluster autoscaler. ([#70696](https://github.com/kubernetes/kubernetes/pull/70696), [@loburm](https://github.com/loburm)) +* Correctly default Audience in the kubelet for TokenRequestProjections. ([#71007](https://github.com/kubernetes/kubernetes/pull/71007), [@mikedanese](https://github.com/mikedanese)) +* fix azure disk attach/detach failed forever issue ([#71377](https://github.com/kubernetes/kubernetes/pull/71377), [@andyzhangx](https://github.com/andyzhangx)) +* Fix a scheduler panic due to internal cache inconsistency ([#71063](https://github.com/kubernetes/kubernetes/pull/71063), [@Huang-Wei](https://github.com/Huang-Wei)) +* apiserver: fixes handling and logging of panics in REST handlers to prevent crashes ([#71076](https://github.com/kubernetes/kubernetes/pull/71076), [@liggitt](https://github.com/liggitt)) +* Fixes an issue with stuck connections handling error responses ([#71419](https://github.com/kubernetes/kubernetes/pull/71419), [@liggitt](https://github.com/liggitt)) + + + +# v1.12.3 + +[Documentation](https://docs.k8s.io) + +## Downloads for v1.12.3 + + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.12.3/kubernetes.tar.gz) | `f4bad1ae3632c715dd4be50e960faba890307e2c8e906edd59389d69a2352b58f093b554b5830de0583214a4efaeee8e6d3e3860fe8f39d4c1ba30927bee9009` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.12.3/kubernetes-src.tar.gz) | `30d8367049e71241336e11e018948bd7ad90cf27ff1007b8132b4f928284ae778e708d61b641e8bf499b8fa13e825be2865193343a982f125e00fbf055746724` + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.12.3/kubernetes-client-darwin-386.tar.gz) | `90991e3c3aa72dd9b33fc9f7ba2b986f126fb1f547f25edb8cc1e561da76c3d1c2e988c93ad470bc2ced520e447d961af67d71caba2578ead23135362cabc78f` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.12.3/kubernetes-client-darwin-amd64.tar.gz) | `bbbfaedfc043cf8be809af52901ab31721a90e6234834cda7e874266f3e2f47028cd143b18e7248aaaf60d431fe11901913f8236b098c4f0dc6616f0569dc604` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.12.3/kubernetes-client-linux-386.tar.gz) | `15a5c37e9deffabe4c35bede095d9df02a59ab4b29a94042ee03122bde8b20faabd7d24644a99085473772e2073ebb9070987295b1ae476743b447f767031c29` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.12.3/kubernetes-client-linux-amd64.tar.gz) | `277f6f6420b7554ddc099eb0d31d11240d71acce906cf5f1214881f26662012c1ec0d8e50ad07b9c3e8f40d35b308ca732f096c022a75eb2b81f90cbc4f39d44` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.12.3/kubernetes-client-linux-arm.tar.gz) | `8e58ec3aa8e9b6ea38fbc075dee0e90e36d48aa567d4459175ed223d1c907d4e433ef5bc292416d8c13841114fff4d95cbd478401b308527461aca545a94867c` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.12.3/kubernetes-client-linux-arm64.tar.gz) | `10144f52577d0a83f0b26fe6c5e299777a2bcbcb022a53d4d0ad95e25d1604b09512b537a3dab7e967f00c11e8a96de24b9ffcb57f5608fff9cd35d04fa9e9cc` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.12.3/kubernetes-client-linux-ppc64le.tar.gz) | `db67e83bfa51346c7f8de2cf6ca3b90add6b3e766086e81ec7ea3dc9491b6db7dbcfc9522e187672f6b38954cb8cedc600c11adb711e047115bfe0706a83cdb3` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.12.3/kubernetes-client-linux-s390x.tar.gz) | `93f9c886e0dffd021da14a83e9047a8276f4db51c51b460743e22f64f0ab58a6e8f508849fcff15a2eef2bbb75e352fdce280d7a2ba3b28ab25dd6d9dba2ef51` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.12.3/kubernetes-client-windows-386.tar.gz) | `d523e5c950f53213db4544e0491444ed749deec93749fbfdf02b68d6e9bb84b015020917a6f11e4836ebfd85e8d0ef1634509fc8ba51bdc1d7eddeea4e0ab02b` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.12.3/kubernetes-client-windows-amd64.tar.gz) | `1be6053d44b91dd4cb24acb584487a26321e1806573c793177569879f4a165fa3daa491ac3bf91f49be602ef5811415b131e56277ed7862894b7681d635446be` + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.12.3/kubernetes-server-linux-amd64.tar.gz) | `13363365457ff7527f92c4d371b6dedc6e77553f596e694bb8479511fded9ab8694ae4540752268f82acbc3485606bd07042e3677da45156c850cc9e75bbd2a1` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.12.3/kubernetes-server-linux-arm.tar.gz) | `b73846110f47fb5bbb9861bc4bd9bfc12be1e1d2306426044e0b08e288d3f512ed1c4bec0e8e3d2d009cf92f94b5c0642d663d7d70055e47d2d1b1710674bbcf` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.12.3/kubernetes-server-linux-arm64.tar.gz) | `88ce03ba915a05f64ba56b5e1fc8feb02c1dc9b2c5244e794c7bde7d32ccf55933337298680c9d57c8f481910dc6e052fe340a241374a2989c1db8e503227210` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.12.3/kubernetes-server-linux-ppc64le.tar.gz) | `82274a9a2a151ab8262fe729b13828b32737bdd579ee2411c4fb3618f40be899b0132029ccc99accc3cd7527106d9edaa4f7dd457e93a9d29ba84a330fb7d352` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.12.3/kubernetes-server-linux-s390x.tar.gz) | `215e1102f1310bc3125e6ffba3db3730c817e0aae31d69189d9522de77563e4c8341a4fee6135788db549753c3b194eb08df52836e26169a1c1d3733e308eaa7` + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.12.3/kubernetes-node-linux-amd64.tar.gz) | `09441ec75f0aba7f537238cfdd7ec2790f52f383b730d6ecf2f40ff11d0ad7aba084a89976fe3addf71a4668439fce3903ba546a46608e366e720130fc81cca2` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.12.3/kubernetes-node-linux-arm.tar.gz) | `3ae8c3f223175022211185df9fe27d3b910bb3669df312a5413379dfafe9870f190857a2639ab958ed18278f0323fd7c6881c87f95f08b078c066e77aadc0372` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.12.3/kubernetes-node-linux-arm64.tar.gz) | `ab8ad3e4994aa603a68ee4312f444e586c0d547e22ff2d45725b1a72d9f417812bf573e241715a4975f50e20c252b34abb85fc75f8f4ca8f8b030ed9ed21f21f` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.12.3/kubernetes-node-linux-ppc64le.tar.gz) | `153ba602142730bbbc81c96b20cbf03b9ce6e746922a655b27952b30326de90756489389c894244abdc9d707ccb364f0c4d7cc4d92acdeedf2cfd3fdce1e9f89` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.12.3/kubernetes-node-linux-s390x.tar.gz) | `8a5a1794d0ac82a5351abbddc815eff458514f1f023d0d0a5919891ce79fdd8474b637cb04e5600ccedcba67cfd60c27efe1a83ee4525ca3e98b65d04ffd3f55` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.12.3/kubernetes-node-windows-amd64.tar.gz) | `90458d17d494d95f4e914c897f5d6bffcea1279bd5600a944f3277baf5d50ab7ce354cc5bd5000c885489a3950e3b3cf0fc07c9bbbe178d6df05bc4b0afdab2a` + +## Changelog since v1.12.2 + +### Other notable changes + +* CVE-2018-1002105: Fix critical security issue in kube-apiserver upgrade request proxy handler ([#71411](https://github.com/kubernetes/kubernetes/issues/71411), [@liggitt](https://github.com/liggitt)) +* remove retry operation on attach/detach azure disk ([#70568](https://github.com/kubernetes/kubernetes/pull/70568), [@andyzhangx](https://github.com/andyzhangx)) +* Fix CSI volume limits not showing up in node's capacity and allocatable ([#70540](https://github.com/kubernetes/kubernetes/pull/70540), [@gnufied](https://github.com/gnufied)) +* kubeadm: fix a panic when calling "alpha phase certs renew all --use-api=false" ([#70768](https://github.com/kubernetes/kubernetes/pull/70768), [@neolit123](https://github.com/neolit123)) +* Update Cluster Autoscaler to 1.12.1 ([#70705](https://github.com/kubernetes/kubernetes/pull/70705), [@losipiuk](https://github.com/losipiuk)) +* Improve Azure instance metadata handling by adding caches. ([#70353](https://github.com/kubernetes/kubernetes/pull/70353), [@feiskyer](https://github.com/feiskyer)) +* Ensure orphan public IPs on Azure deleted when service recreated with the same name. ([#70463](https://github.com/kubernetes/kubernetes/pull/70463), [@feiskyer](https://github.com/feiskyer)) +* fix azure disk attachment error on Linux ([#70002](https://github.com/kubernetes/kubernetes/pull/70002), [@andyzhangx](https://github.com/andyzhangx)) +* Fix cloud-controller-manager crash when using OpenStack provider and PersistentVolume initializing controller ([#70459](https://github.com/kubernetes/kubernetes/pull/70459), [@mvladev](https://github.com/mvladev)) +* Corrects check for non-Azure managed nodes with the Azure cloud provider ([#70135](https://github.com/kubernetes/kubernetes/pull/70135), [@marc-sensenich](https://github.com/marc-sensenich)) +* GCE/GKE load balancer health check default interval changes from 2 seconds to 8 seconds, unhealthyThreshold to 3. ([#70099](https://github.com/kubernetes/kubernetes/pull/70099), [@grayluck](https://github.com/grayluck)) + * Health check parameters are configurable to be bigger than default values. + + + # v1.12.2 [Documentation](https://docs.k8s.io) @@ -306,7 +471,7 @@ filename | sha512 hash ## Known Issues -- Feature [#566](https://github.com/kubernetes/kubernetes/issues/566) enabling CoreDNS as the default for kube-up deployments was dropped from the release due to a scalability memory resource consumption issue observed. If a cluster operator is considering using CoreDNS on a cluster greater than 2000 nodes, it may be necessary to give more consideration to CoreDNS pod memory resource limits and experimentally measure that memory usage versus cluster resource availability. +- Feature [#566](https://github.com/kubernetes/enhancements/issues/566) enabling CoreDNS as the default for kube-up deployments was dropped from the release due to a scalability memory resource consumption issue observed. If a cluster operator is considering using CoreDNS on a cluster greater than 2000 nodes, it may be necessary to give more consideration to CoreDNS pod memory resource limits and experimentally measure that memory usage versus cluster resource availability. - kube-controller-manager currently needs a writable `--cert-dir` (default is `/var/run/kubernetes`) for generating self-signed certificates, when no `--tls-cert-file` or `--tls-private-key-file` are provided. - The `system:kube-controller-manager` ClusterRole lacks permission to `get` the `configmap` extension-apiserver-authentication. kube-controller-manager errors if run with a service account bound to the clusterrole. - Runtime handler and Windows npipe protocol are not supported yet in crictl v1.11.x. Those features will be supported in crictl [v1.12.0](https://github.com/kubernetes-sigs/cri-tools/releases/tag/v1.12.0), together with Kubernetes v1.12.1. @@ -444,13 +609,13 @@ SIG Scheduling development efforts have been primarily focused on improving perf ### SIG-storage -SIG Storage promoted the [Kubernetes volume topology feature](https://github.com/kubernetes/features/issues/490) to beta. This enables Kubernetes to understand and act intelligently on volume accessibility information (such as the “zone” a cloud volume is provisioned in, the “rack” that a SAN array is accessible from, and so on). +SIG Storage promoted the [Kubernetes volume topology feature](https://github.com/kubernetes/enhancements/issues/490) to beta. This enables Kubernetes to understand and act intelligently on volume accessibility information (such as the “zone” a cloud volume is provisioned in, the “rack” that a SAN array is accessible from, and so on). -The [dynamic maximum volume count](https://github.com/kubernetes/features/issues/554) feature was also moved to beta. This enables a volume plugin to specify the maximum number of a given volume type per node as a function of the node characteristics (for example, a larger limit for larger nodes, a smaller limit for smaller nodes). +The [dynamic maximum volume count](https://github.com/kubernetes/enhancements/issues/554) feature was also moved to beta. This enables a volume plugin to specify the maximum number of a given volume type per node as a function of the node characteristics (for example, a larger limit for larger nodes, a smaller limit for smaller nodes). -SIG Storage also worked on a number of [Container Storage Interface (CSI) features](https://github.com/kubernetes/features/issues/178) this quarter in anticipation of moving support for CSI from beta to GA in the next Kubernetes release. This includes graduating the dependent “mount namespace propagation” feature to GA, moving the Kubelet plugin registration mechanism to beta, adding alpha support for a new CSI driver registry as well as for topology, and adding a number of alpha features to support the use of CSI for “local ephemeral volumes” (that is, volumes that exist for the lifecycle of a pod and contain some injected information, like a token or secret). +SIG Storage also worked on a number of [Container Storage Interface (CSI) features](https://github.com/kubernetes/enhancements/issues/178) this quarter in anticipation of moving support for CSI from beta to GA in the next Kubernetes release. This includes graduating the dependent “mount namespace propagation” feature to GA, moving the Kubelet plugin registration mechanism to beta, adding alpha support for a new CSI driver registry as well as for topology, and adding a number of alpha features to support the use of CSI for “local ephemeral volumes” (that is, volumes that exist for the lifecycle of a pod and contain some injected information, like a token or secret). -With Kubernetes v1.12, SIG Storage also introduced alpha support for [volume snapshotting](https://github.com/kubernetes/features/issues/177). This feature introduces the ability to create/delete volume snapshots and create new volumes from a snapshot using the Kubernetes API. +With Kubernetes v1.12, SIG Storage also introduced alpha support for [volume snapshotting](https://github.com/kubernetes/enhancements/issues/177). This feature introduces the ability to create/delete volume snapshots and create new volumes from a snapshot using the Kubernetes API. ### SIG-vmware @@ -1514,7 +1679,7 @@ filename | sha256 hash * The `system-node-critical` and `system-cluster-critical` priority classes are now limited to the `kube-system` namespace by the `PodPriority` admission plugin. ([#65593](https://github.com/kubernetes/kubernetes/pull/65593), [@bsalamat](https://github.com/bsalamat)) * kubernetes-worker juju charm: Added support for setting the --enable-ssl-chain-completion option on the ingress proxy. "action required": if your installation relies on supplying incomplete certificate chains and using OCSP to fill them in, you must set "ingress-ssl-chain-completion" to "true" in your juju configuration. ([#63845](https://github.com/kubernetes/kubernetes/pull/63845), [@paulgear](https://github.com/paulgear)) * In anticipation of CSI 1.0 in the next release, Kubernetes 1.12 calls the CSI `NodeGetInfo` RPC instead of `NodeGetId` RPC. Ensure your CSI Driver implements `NodeGetInfo(...)` before upgrading to 1.12. [@saad-ali](https://github.com/kubernetes/kubernetes/issues/68688) -* Kubernetes 1.12 also enables [Kubelet device plugin registration](https://github.com/kubernetes/features/issues/595) feature by default. Before upgrading to 1.12, ensure the `driver-registrar` CSI sidecar container for your CSI driver is configured to handle plugin registration (set the `--kubelet-registration-path` parameter on `driver-registrar` to expose a new unix domain socket to handle Kubelet Plugin Registration). +* Kubernetes 1.12 also enables [Kubelet device plugin registration](https://github.com/kubernetes/enhancements/issues/595) feature by default. Before upgrading to 1.12, ensure the `driver-registrar` CSI sidecar container for your CSI driver is configured to handle plugin registration (set the `--kubelet-registration-path` parameter on `driver-registrar` to expose a new unix domain socket to handle Kubelet Plugin Registration). ### Other notable changes @@ -1680,7 +1845,7 @@ filename | sha256 hash * Added block volume support to Cinder volume plugin. ([#64879](https://github.com/kubernetes/kubernetes/pull/64879), [@bertinatto](https://github.com/bertinatto)) * fixed incorrect OpenAPI schema for CustomResourceDefinition objects ([#65256](https://github.com/kubernetes/kubernetes/pull/65256), [@liggitt](https://github.com/liggitt)) * ignore not found file error when watching manifests ([#64880](https://github.com/kubernetes/kubernetes/pull/64880), [@dixudx](https://github.com/dixudx)) -* add port-forward examples for sevice ([#64773](https://github.com/kubernetes/kubernetes/pull/64773), [@MasayaAoyama](https://github.com/MasayaAoyama)) +* add port-forward examples for service ([#64773](https://github.com/kubernetes/kubernetes/pull/64773), [@MasayaAoyama](https://github.com/MasayaAoyama)) * Fix issues for block device not mapped to container. ([#64555](https://github.com/kubernetes/kubernetes/pull/64555), [@wenlxie](https://github.com/wenlxie)) * Update crictl on GCE to v1.11.0. ([#65254](https://github.com/kubernetes/kubernetes/pull/65254), [@Random-Liu](https://github.com/Random-Liu)) * Fixes missing nodes lines when kubectl top nodes ([#64389](https://github.com/kubernetes/kubernetes/pull/64389), [@yue9944882](https://github.com/yue9944882)) diff --git a/CHANGELOG-1.13.md b/CHANGELOG-1.13.md index c46578637dd..900c86e4034 100644 --- a/CHANGELOG-1.13.md +++ b/CHANGELOG-1.13.md @@ -1,39 +1,883 @@ -- [v1.13.0-beta.1](#v1130-beta1) - - [Downloads for v1.13.0-beta.1](#downloads-for-v1130-beta1) +- [v1.13.1](#v1131) + - [Downloads for v1.13.1](#downloads-for-v1131) - [Client Binaries](#client-binaries) - [Server Binaries](#server-binaries) - [Node Binaries](#node-binaries) - - [Changelog since v1.13.0-alpha.3](#changelog-since-v1130-alpha3) - - [Action Required](#action-required) + - [Changelog since v1.13.0](#changelog-since-v1130) - [Other notable changes](#other-notable-changes) -- [v1.13.0-alpha.3](#v1130-alpha3) - - [Downloads for v1.13.0-alpha.3](#downloads-for-v1130-alpha3) +- [v1.13.0](#v1130) + - [Downloads for v1.13.0](#downloads-for-v1130) - [Client Binaries](#client-binaries-1) - [Server Binaries](#server-binaries-1) - [Node Binaries](#node-binaries-1) - - [Changelog since v1.13.0-alpha.2](#changelog-since-v1130-alpha2) - - [Other notable changes](#other-notable-changes-1) -- [v1.13.0-alpha.2](#v1130-alpha2) - - [Downloads for v1.13.0-alpha.2](#downloads-for-v1130-alpha2) +- [Kubernetes 1.13 Release Notes](#kubernetes-113-release-notes) + - [Security Content](#security-content) + - [Urgent Upgrade Notes](#urgent-upgrade-notes) + - [(No, really, you MUST do this before you upgrade)](#no-really-you-must-do-this-before-you-upgrade) + - [Known Issues](#known-issues) + - [Deprecations](#deprecations) + - [Major Themes](#major-themes) + - [SIG API Machinery](#sig-api-machinery) + - [SIG Auth](#sig-auth) + - [SIG AWS](#sig-aws) + - [SIG Azure](#sig-azure) + - [SIG Big Data](#sig-big-data) + - [SIG CLI](#sig-cli) + - [SIG Cloud Provider](#sig-cloud-provider) + - [SIG Cluster Lifecycle](#sig-cluster-lifecycle) + - [SIG IBM Cloud](#sig-ibm-cloud) + - [SIG Multicluster](#sig-multicluster) + - [SIG Network](#sig-network) + - [SIG Node](#sig-node) + - [SIG Openstack](#sig-openstack) + - [SIG Scalability](#sig-scalability) + - [SIG Scheduling](#sig-scheduling) + - [SIG Service Catalog](#sig-service-catalog) + - [SIG Storage](#sig-storage) + - [SIG UI](#sig-ui) + - [SIG VMWare](#sig-vmware) + - [SIG Windows](#sig-windows) + - [New Features](#new-features) + - [Release Notes From SIGs](#release-notes-from-sigs) + - [SIG API Machinery](#sig-api-machinery-1) + - [SIG Auth](#sig-auth-1) + - [SIG Autoscaling](#sig-autoscaling) + - [SIG AWS](#sig-aws-1) + - [SIG Azure](#sig-azure-1) + - [SIG CLI](#sig-cli-1) + - [SIG Cloud Provider](#sig-cloud-provider-1) + - [SIG Cluster Lifecycle](#sig-cluster-lifecycle-1) + - [SIG GCP](#sig-gcp) + - [SIG Network](#sig-network-1) + - [SIG Node](#sig-node-1) + - [SIG OpenStack](#sig-openstack-1) + - [SIG Release](#sig-release) + - [SIG Scheduling](#sig-scheduling-1) + - [SIG Storage](#sig-storage-1) + - [SIG Windows](#sig-windows-1) + - [External Dependencies](#external-dependencies) +- [v1.13.0-rc.2](#v1130-rc2) + - [Downloads for v1.13.0-rc.2](#downloads-for-v1130-rc2) - [Client Binaries](#client-binaries-2) - [Server Binaries](#server-binaries-2) - [Node Binaries](#node-binaries-2) - - [Changelog since v1.13.0-alpha.1](#changelog-since-v1130-alpha1) - - [Other notable changes](#other-notable-changes-2) -- [v1.13.0-alpha.1](#v1130-alpha1) - - [Downloads for v1.13.0-alpha.1](#downloads-for-v1130-alpha1) + - [Changelog since v1.13.0-rc.1](#changelog-since-v1130-rc1) + - [Other notable changes](#other-notable-changes-1) +- [v1.13.0-rc.1](#v1130-rc1) + - [Downloads for v1.13.0-rc.1](#downloads-for-v1130-rc1) - [Client Binaries](#client-binaries-3) - [Server Binaries](#server-binaries-3) - [Node Binaries](#node-binaries-3) + - [Changelog since v1.13.0-beta.2](#changelog-since-v1130-beta2) + - [Other notable changes](#other-notable-changes-2) +- [v1.13.0-beta.2](#v1130-beta2) + - [Downloads for v1.13.0-beta.2](#downloads-for-v1130-beta2) + - [Client Binaries](#client-binaries-4) + - [Server Binaries](#server-binaries-4) + - [Node Binaries](#node-binaries-4) + - [Changelog since v1.13.0-beta.1](#changelog-since-v1130-beta1) + - [Other notable changes](#other-notable-changes-3) +- [v1.13.0-beta.1](#v1130-beta1) + - [Downloads for v1.13.0-beta.1](#downloads-for-v1130-beta1) + - [Client Binaries](#client-binaries-5) + - [Server Binaries](#server-binaries-5) + - [Node Binaries](#node-binaries-5) + - [Changelog since v1.13.0-alpha.3](#changelog-since-v1130-alpha3) + - [Action Required](#action-required) + - [Other notable changes](#other-notable-changes-4) +- [v1.13.0-alpha.3](#v1130-alpha3) + - [Downloads for v1.13.0-alpha.3](#downloads-for-v1130-alpha3) + - [Client Binaries](#client-binaries-6) + - [Server Binaries](#server-binaries-6) + - [Node Binaries](#node-binaries-6) + - [Changelog since v1.13.0-alpha.2](#changelog-since-v1130-alpha2) + - [Other notable changes](#other-notable-changes-5) +- [v1.13.0-alpha.2](#v1130-alpha2) + - [Downloads for v1.13.0-alpha.2](#downloads-for-v1130-alpha2) + - [Client Binaries](#client-binaries-7) + - [Server Binaries](#server-binaries-7) + - [Node Binaries](#node-binaries-7) + - [Changelog since v1.13.0-alpha.1](#changelog-since-v1130-alpha1) + - [Other notable changes](#other-notable-changes-6) +- [v1.13.0-alpha.1](#v1130-alpha1) + - [Downloads for v1.13.0-alpha.1](#downloads-for-v1130-alpha1) + - [Client Binaries](#client-binaries-8) + - [Server Binaries](#server-binaries-8) + - [Node Binaries](#node-binaries-8) - [Changelog since v1.12.0](#changelog-since-v1120) - [Action Required](#action-required-1) - - [Other notable changes](#other-notable-changes-3) + - [Other notable changes](#other-notable-changes-7) +# v1.13.1 + +[Documentation](https://docs.k8s.io) + +## Downloads for v1.13.1 + + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes.tar.gz) | `de3858357b2b4444bccc0599c7d0edd3e6ec1a80267ef96883ebcfb06c518ce467dd8720b48084644677a42b8e3ffad9a7d4745b40170ce9dfe5b43310979be1` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-src.tar.gz) | `7f0a8dbd3c7397cc5a5bc0297eb24b8e734c3c7b78e48fc794c525377c3895f4fd84fd0a2fa70c5513cc47ee5a174c22bab54796abc5a8f2b30687642c819a68` + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-client-darwin-386.tar.gz) | `371028dba7a28ec3c8f10b861448cb1574dce25d32d847af254b76b7f158aa4fcda695972e2a08440faa4e16077f8021b07115d0da897bef79c33e702f3be95e` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-client-darwin-amd64.tar.gz) | `6aa7025308e9fb1eb4415e504e8aa9c7a0a20b09c500cb48df82bbd04443101664b2614fb284875b9670d4bb11e8f1a10190eaf1d54f81f3a9526053958b0802` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-client-linux-386.tar.gz) | `6453670bb61b4f5f7fe8ae78804864ecd52682b32592f6956faf3d2220884a64fb22ae2e668b63f28ea8fd354c50aa90ce61c60be327fb0b5fcfe2c7835ef559` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-client-linux-amd64.tar.gz) | `ca00442f50b5d5627357dce97c90c17cb0126d746b887afdab2d4db9e0826532469fd1ee62f40eb6923761618f46752d10993578ca19c8b92c3a2aeb5102a318` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-client-linux-arm.tar.gz) | `5fa170cbe56b8f5d103f520e2493f911c5eb59b51a6afdbaa9c08196943f1235e533f0384ce7c01c73a020c6889cf8f03cc3642912d0953c74d1098e4b21f3a0` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-client-linux-arm64.tar.gz) | `710343ad067f0d642c43cd26871828275645b08b4f4c86bd555865318d8fe08b7f0a720174c04d58acffcb26faf563636dc13eef66a2813eac68bb8b994908f4` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-client-linux-ppc64le.tar.gz) | `0fa7ab255f0cba3adc754337c6184e6ec464aa5a4d6dd4d38aad8a0e2430a0044f4ed1ffcd7cc7c863190d3cda6b84abd12ca7536139d665ad61fe7704e63d30` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-client-linux-s390x.tar.gz) | `749a8dce5b81e2edbd315841acac64a0e5d17bb1ead8173560b6a4ccc28604bc8254051297ab51cb5df845495bd75a45137827b3386e3962295fec8601563eaa` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-client-windows-386.tar.gz) | `cd4732fbe569009c426f963318d05ddcc7c63dc27ec9d2bf9c60d716195e3676aa5b0e6ccbde6298f621450d365d41a910ce3ced89bf2ae6d3e81ee2fed0bb16` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-client-windows-amd64.tar.gz) | `40f5b5d221b3a611511690d316539dc8fb3f4513e4f9eb141bffa17c9ddeee875a462f5bd45e62ce7c7535310fc3e48e3441614700ee9877584c5948ddbef19f` + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-server-linux-amd64.tar.gz) | `e0e48825c5fe33a3f82b1b74847d9bfb8c5716c4313c5e4e6f46be0580e20a1e396a669b8ca446cfa581e3eb75698813249bbfcfc79c8a90793880eb5c177921` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-server-linux-arm.tar.gz) | `7ff4856e7959cf14eba0e1ab274c0bf0d3193391e7034a936697f0c4813e81d8dda4a019d3185677bee9d1345a6433db3fd6e55f644a0f73d076e0b2014ed172` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-server-linux-arm64.tar.gz) | `b8c2356002e675bd3de5ee9c2337a12e2a1bbfa2478f8e3b91065a578dfa8d50f596fd606d9f0232b06b8263867a7ca5cc7c04150718b8e40b49ae7d46001c30` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-server-linux-ppc64le.tar.gz) | `5d3a15b1241d849d8954894aa7f3fb12606f9966f73fc36aa15152038fc385153b0f0e967cc0bf410a5d5894d0269e54eac581d8e79003904d7bc29b33e98684` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-server-linux-s390x.tar.gz) | `78a9cccaf9d737b519db0866c2e80c472c7136bc723910d08649ece1c420ae7f6e56e610d65c436c56ccef8360c4da0f70e75d0cf47c0c8e739f5138cdc7b0d2` + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-node-linux-amd64.tar.gz) | `3a7881a52885bebe5958f02dc54194cc8c330576b7cf5935189df4f0b754b958917b104e1d3358c0bc9277f13a8eef2176284548d664f27a36baa389fbcc7bea` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-node-linux-arm.tar.gz) | `d0bfcff3ef7c0aa36005e7b111685438ebd0ea61d48dc68a7bd06eea3782b6eb224f9b651d80c955afa162f766c8b682976db43238562c293d6552cdadf9e934` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-node-linux-arm64.tar.gz) | `2e23bd00661aceb30fa37e24ab71315755bd93dfcc5ff361d78445a8e9ff99e7b3a56641112af3184e8b107545fba6573a6368a82bd0ce475c81cb53fd44da3b` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-node-linux-ppc64le.tar.gz) | `8d0fdb743c700d662886636fe67b52202cf9e6e57c2d7de5961b8189d8c03c91fda1d68c47033286efcc582e78be40846e2b1f5c589a0b94794fa2ce3c1ebfee` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-node-linux-s390x.tar.gz) | `70445038b4db62c3fc99540f5ddbb881387018244242f182332b8eaa7159ce1aa8929145010ab2befd4e101d39c24c61e430928235434c7d7eb54f113860a83a` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-node-windows-amd64.tar.gz) | `a87ad43f5a6b8f66d1bbd64f9c91e8bcbdf4adc8de0ec3cd559adaa8c14a6fe078ffdf090e52627c0522b79209fcc37bf822b323895dd47b18c20026cb25e9f5` + +## Changelog since v1.13.0 + +### Other notable changes + +* Fix overlapping filenames in diff if multiple resources have the same name. ([#71923](https://github.com/kubernetes/kubernetes/pull/71923), [@apelisse](https://github.com/apelisse)) +* Disable proxy to loopback and linklocal ([#71980](https://github.com/kubernetes/kubernetes/pull/71980), [@micahhausler](https://github.com/micahhausler)) +* kube-scheduler: restores ability to run without authentication configuration lookup permissions ([#71755](https://github.com/kubernetes/kubernetes/pull/71755), [@liggitt](https://github.com/liggitt)) +* client-go: restores behavior of populating the BearerToken field in rest.Config objects constructed from kubeconfig files containing tokenFile config, or from in-cluster configuration. An additional BearerTokenFile field is now populated to enable constructed clients to periodically refresh tokens. ([#71713](https://github.com/kubernetes/kubernetes/pull/71713), [@liggitt](https://github.com/liggitt)) +* apply: fix detection of non-dry-run enabled servers ([#71854](https://github.com/kubernetes/kubernetes/pull/71854), [@apelisse](https://github.com/apelisse)) +* Scheduler only activates unschedulable pods if node's scheduling related properties change. ([#71551](https://github.com/kubernetes/kubernetes/pull/71551), [@mlmhl](https://github.com/mlmhl)) +* Fixes pod deletion when cleaning old cronjobs ([#71802](https://github.com/kubernetes/kubernetes/pull/71802), [@soltysh](https://github.com/soltysh)) +* fix issue: vm sku restriction policy does not work in azure disk attach/detach ([#71941](https://github.com/kubernetes/kubernetes/pull/71941), [@andyzhangx](https://github.com/andyzhangx)) +* Include CRD for BGPConfigurations, needed for calico 2.x to 3.x upgrade. ([#71868](https://github.com/kubernetes/kubernetes/pull/71868), [@satyasm](https://github.com/satyasm)) +* UDP connections now support graceful termination in IPVS mode ([#71515](https://github.com/kubernetes/kubernetes/pull/71515), [@lbernail](https://github.com/lbernail)) +* kubeadm: use kubeconfig flag instead of kubeconfig-dir on init phase bootstrap-token ([#71803](https://github.com/kubernetes/kubernetes/pull/71803), [@yagonobre](https://github.com/yagonobre)) +* On GCI, NPD starts to monitor kubelet, docker, containerd crashlooping, read-only filesystem and corrupt docker overlay2 issues. ([#71522](https://github.com/kubernetes/kubernetes/pull/71522), [@wangzhen127](https://github.com/wangzhen127)) +* Fixes an issue where Portworx volumes cannot be mounted if 9001 port is already in use on the host and users remap 9001 to another port. ([#70392](https://github.com/kubernetes/kubernetes/pull/70392), [@harsh-px](https://github.com/harsh-px)) +* Only use the first IP address got from instance metadata. This is because Azure CNI would set up a list of IP addresses in instance metadata, while only the first one is the Node's IP. ([#71736](https://github.com/kubernetes/kubernetes/pull/71736), [@feiskyer](https://github.com/feiskyer)) +* kube-controller-manager: fixed issue display help for the deprecated insecure --port flag ([#71601](https://github.com/kubernetes/kubernetes/pull/71601), [@liggitt](https://github.com/liggitt)) +* Update Cluster Autoscaler version in gce manifests to 1.13.1 (https://github.com/kubernetes/autoscaler/releases/tag/cluster-autoscaler-1.13.1) ([#71842](https://github.com/kubernetes/kubernetes/pull/71842), [@losipiuk](https://github.com/losipiuk)) +* kubectl: fixes regression in --sort-by behavior ([#71805](https://github.com/kubernetes/kubernetes/pull/71805), [@liggitt](https://github.com/liggitt)) +* Fixes apiserver nil pointer panics when requesting v2beta1 autoscaling object metrics ([#71744](https://github.com/kubernetes/kubernetes/pull/71744), [@yue9944882](https://github.com/yue9944882)) +* Fix scheduling starvation of pods in cluster with large number of unschedulable pods. ([#71488](https://github.com/kubernetes/kubernetes/pull/71488), [@bsalamat](https://github.com/bsalamat)) + + + +# v1.13.0 + +[Documentation](https://docs.k8s.io) + +## Downloads for v1.13.0 + + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes.tar.gz) | `7b6a81c9f1b852b1e889c1b62281569a4b8853c79e5675b0910d941dfa7863c97f244f6d607aae3faf60bccd596dedb9d136b7fffeae199876e780904fd9f31e` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-src.tar.gz) | `844b9fbba21374dd190c8f12dd0e5b3303dd2cd7ad25f241d6f7e46f74adf6987afad021553521d4f479c19d87aa8d4d5be77ac7a6715d31a9187a5bab3b397b` + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-client-darwin-386.tar.gz) | `0c010351acb660a75122feb876c9887d46ec2cb466872dd073b7f5b26fdadd96888a350e01606f2ae43606a5a4ab2d9309441f4357cee924b19688f9b02c55dc` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-client-darwin-amd64.tar.gz) | `c2c40bd202900124f4e9458b067a1e1fc040030dc84ce9bcc6a5beb263de05892c16f3bdafb8d854e343e71f086207f390fd0b60f6e32e770c73294b053da6e4` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-client-linux-386.tar.gz) | `5f5449be103b103d72a4e2b1028ab014cf7f74781166327f2ae284e4f5ecb539f6b60f36b8f7c7be0ae43dfb30661b2672dd93a1fa7e26d6c67498672674bf12` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-client-linux-amd64.tar.gz) | `61a6cd3b1fb34507e0b762a45da09d88e34921985970a2ba594e0e5af737d94c966434b4e9f8e84fb73a0aeb5fa3e557344cd2eb902bf73c67d4b4bff33c6831` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-client-linux-arm.tar.gz) | `dd5591e2b88c347759a138c4d2436a0f5252341d0e8c9fbab16b8f151e2744cbdd0c8583555a451425bc471f11b688ce568d9245caf8a278cbac2b343fdead89` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-client-linux-arm64.tar.gz) | `894ed30261598ebf3485f3575e95f85e3c353f4d834bf9a6ea53b265427704b43fba5403fbc4d522b3f02afb08e6afaae200af1fe57996291a7c74398ec2fe17` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-client-linux-ppc64le.tar.gz) | `6c26c807fc730ea736fda75dc57ac73395ba78bb828fffeee18b385be550d8f3ba2bbc27a52a8f15bcbbe68218c7945d9fb725e6759c117422bc0a632c110670` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-client-linux-s390x.tar.gz) | `41e6e972de77c0bde22fdd779ea64e731b60f32e97e78a024f33fc3e33a3b364b7f77ece7d3c64ad85b7f8fe7c8fc6d6892098a3362d1fe01ebf3d551fe2bf37` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-client-windows-386.tar.gz) | `442229e5030452901b924a94e7a879d4085597a4f201a5b3fc5ac9806cab5830c836cfa7a33e8f1693fe2e8badc4047bf227d7fb00c537fb1fb4cb7639de5455` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-client-windows-amd64.tar.gz) | `a11a8e8e732e7292781b9cb1de6e3e41683f95fb3fefc2b1a7b5fb1f064a0d80c0833876d931675135778457d81de9ed2e81caee4b3eb27d9f23c7b722b17442` + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-server-linux-amd64.tar.gz) | `a8e3d457e5bcc1c09eeb66111e8dd049d6ba048c3c0fa90a61814291afdcde93f1c6dbb07beef090d1d8a9958402ff843e9af23ae9f069c17c0a7c6ce4034686` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-server-linux-arm.tar.gz) | `4e17494767000256775e4dd33c0a9b2d152bd4b5fba9f343b6dfeb5746ff34e400a8e0aaf2153476453225ef57e4bb1ae3635416ab18f9e4dabf4e5cc82f8aaa` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-server-linux-arm64.tar.gz) | `0ddd0cf0ff56cebfa89efb1972cc2bc6916e824c2af56cfd330ac5638c8918eaf3c60d05714b220dbf4f896160eded123beeba42f5be55fe434a43d04508d86a` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-server-linux-ppc64le.tar.gz) | `b93828560224e812ed21b57fea5458fa8560745cfec96fc1677b258393c00e208ad9b99467b575e74e01699ffd75f03f5793675032e7306cba7208c1afb53c8d` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-server-linux-s390x.tar.gz) | `154d565329d5ba52cdb7c3d43d8854b7a9b8e34803c4df6b3e6ae74c1a6e255c78e6559b7546b9158df0e3f7931bbdaf43407d95cd875c79f5cce960bb9882dd` + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-node-linux-amd64.tar.gz) | `9d18ba5f0c3b09edcf29397a496a1e908f4906087be3792989285630d7bcbaf6cd3bdd7b07dace439823885acc808637190f5eaa240b7b4580acf277b67bb553` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-node-linux-arm.tar.gz) | `959b04ff7b8690413e01bffeabaab2119794dedf06b7aae1743e49988f797cb7e6ff12e1a91af2d4c5f664414f3aa4bd9020521c6a21c1196c194d12a6f7fe08` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-node-linux-arm64.tar.gz) | `b5c18e8c9e28cf276067c871446720d86b6f162e22c3a5e9343cdbc6857baa6961d09a6908b6acd1bbd132c2e2e526377676babf77b8d3bfb36f8711827c105a` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-node-linux-ppc64le.tar.gz) | `63e3504d3b115fdf3396968afafd1107b98e5a1a15b7c042a87f5a9cffbdc274f7b06b07ce90eb51876cfffd57cf7f20180bad7e9f9762af577e51f4f13d2f7a` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-node-linux-s390x.tar.gz) | `21c5c2721febf7fddeada9569f3ecbd059267e5d2cc325d98fb74faf1ae9e9e15899750225a1fc7c25feef96e7705b1456cb489f4882b9eb10e78bd0f590d019` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-node-windows-amd64.tar.gz) | `3e73d3ecff14b4c85a71bb6cf91b1ab7d9c3075c64bd5ce6863562ab17bf808b0cbc33ddd25346d25040649c1ad89745796afd218190886b54f1d8acc17896e4` + +# Kubernetes 1.13 Release Notes + +## Security Content + +- CVE-2018-1002105, a critical security issue in the Kubernetes API Server, is resolved in v1.13.0 (and in [v1.10.11](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.10.md/#v11011), [v1.11.5](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.11.md/#v1115), and [v1.12.3](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.12.md/#v1123)). We recommend all clusters running previous versions update to one of these releases immediately. See issue [#71411](https://github.com/kubernetes/kubernetes/issues/71411) for details. + +## Urgent Upgrade Notes + +### (No, really, you MUST do this before you upgrade) + +Before upgrading to Kubernetes 1.13, you must keep the following in mind: + +- kube-apiserver + - The deprecated `etcd2` storage backend has been removed. Before upgrading a kube-apiserver using `--storage-backend=etcd2`, etcd v2 data must be migrated to the v3 storage backend, and kube-apiserver invocations changed to use `--storage-backend=etcd3`. Please consult the installation procedure used to set up etcd for specific migration instructions. Backups prior to upgrade are always a good practice, but since the etcd2 to etcd3 migration is not reversible, an etcd backup prior to migration is essential. + - The deprecated `--etcd-quorum-read` flag has been removed. Quorum reads are now always enabled when fetching data from etcd. Remove the `--etcd-quorum-read` flag from kube-apiserver invocations before upgrading. +- kube-controller-manager + - The deprecated `--insecure-experimental-approve-all-kubelet-csrs-for-group` flag has been removed. +- kubelet + - The deprecated `--google-json-key` flag has been removed. Remove the `--google-json-key` flag from kubelet invocations before upgrading. ([#69354](https://github.com/kubernetes/kubernetes/pull/69354), [@yujuhong](https://github.com/yujuhong)) + - DaemonSet pods now make use of scheduling features that require kubelets to be at 1.11 or above. Ensure all kubelets in the cluster are at 1.11 or above before upgrading kube-controller-manager to 1.13. + - The schema for the alpha `CSINodeInfo` CRD has been split into `spec` and `status` fields, and new fields `status.available` and `status.volumePluginMechanism` added. Clusters using the previous alpha schema must delete and recreate the CRD using the new schema. ([#70515](https://github.com/kubernetes/kubernetes/pull/70515), [@davidz627](https://github.com/davidz627)) +- kube-scheduler dropped support for configuration files with apiVersion `componentconfig/v1alpha1`. Ensure kube-scheduler is configured using command-line flags or a configuration file with apiVersion `kubescheduler.config.k8s.io/v1alpha1` before upgrading to 1.13. +- kubectl + - The deprecated command `run-container` has been removed. Invocations should use `kubectl run` instead ([#70728](https://github.com/kubernetes/kubernetes/pull/70728), [@Pingan2017](https://github.com/Pingan2017)) +- client-go releases will no longer have bootstrap (k8s.io/client-go/tools/bootstrap) related code. Any reference to it will break. Please redirect all references to k8s.io/bootstrap instead. ([#67356](https://github.com/kubernetes/kubernetes/pull/67356), [@yliaog](https://github.com/yliaog)) +- Kubernetes cannot distinguish between GCE Zonal PDs and Regional PDs with the same name. To workaround this issue, precreate PDs with unique names. PDs that are dynamically provisioned do not encounter this issue. ([#70716](https://github.com/kubernetes/kubernetes/pull/70716), [@msau42](https://github.com/msau42)) + +## Known Issues + +- If kubelet plugin registration for a driver fails, kubelet will not retry. The driver must delete and recreate the driver registration socket in order to force kubelet to attempt registration again. Restarting only the driver container may not be sufficient to trigger recreation of the socket, instead a pod restart may be required. ([#71487](https://github.com/kubernetes/kubernetes/issues/71487)) +- In some cases, a Flex volume resize may leave a PVC with erroneous Resizing condition even after volume has been successfully expanded. Users may choose to delete the condition, but it is not required. ([#71470](https://github.com/kubernetes/kubernetes/issues/71470)) +- The CSI driver-registrar external sidecar container v1.0.0-rc2 is known to take up to 1 minute to start in some cases. We expect this issue to be resolved in a future release of the sidecar container. For verification, please see the release notes of future releases of the external sidecar container. ([#76](https://github.com/kubernetes-csi/driver-registrar/issues/76)) +- When using IPV6-only, be sure to use `proxy-mode=iptables` as `proxy-mode=ipvs` is known to not work. ([#68437](https://github.com/kubernetes/kubernetes/issues/68437)) + +## Deprecations + +- kube-apiserver + - The `--service-account-api-audiences` flag is deprecated in favor of `--api-audiences`. The old flag is accepted with a warning but will be removed in a future release. ([#70105](https://github.com/kubernetes/kubernetes/pull/70105), [@mikedanese](https://github.com/mikedanese)) + - The `--experimental-encryption-provider-config` flag is deprecated in favor of `--encryption-provider-config`. The old flag is accepted with a warning but will be removed in 1.14. ([#71206](https://github.com/kubernetes/kubernetes/pull/71206), [@stlaz](https://github.com/stlaz)) + - As part of graduating the etcd encryption feature to beta, the configuration file referenced by `--encryption-provider-config` now uses `kind: EncryptionConfiguration` and `apiVersion: apiserver.config.k8s.io/v1`. Support for `kind: EncryptionConfig` and `apiVersion: v1` is deprecated and will be removed in a future release. ([#67383](https://github.com/kubernetes/kubernetes/pull/67383), [@stlaz](https://github.com/stlaz)) + - The `--deserialization-cache-size` flag is deprecated, and will be removed in a future release. The flag is inactive since the etcd2 storage backend was removed. ([#69842](https://github.com/kubernetes/kubernetes/pull/69842), [@liggitt](https://github.com/liggitt)) + - The `Node` authorization mode no longer allows kubelets to delete their Node API objects (prior to 1.11, in rare circumstances related to cloudprovider node ID changes, kubelets would attempt to delete/recreate their Node object at startup) ([#71021](https://github.com/kubernetes/kubernetes/pull/71021), [@liggitt](https://github.com/liggitt)) + - The built-in `system:csi-external-provisioner` and `system:csi-external-attacher` cluster roles are deprecated and will not be auto-created in a future release. CSI deployments should provide their own RBAC role definitions with required permissions. ([#69868](https://github.com/kubernetes/kubernetes/pull/69868), [@pohly]( https://github.com/pohly)) + - The built-in `system:aws-cloud-provider` cluster role is deprecated and will not be auto-created in a future release. Deployments using the AWS cloud provider should grant required permissions to the `aws-cloud-provider` service account in the `kube-system` namespace as part of deployment. ([#66635](https://github.com/kubernetes/kubernetes/pull/66635), [@wgliang](https://github.com/wgliang)) +- kubelet + - Use of the beta plugin registration directory `{kubelet_root_dir}/plugins/` for registration of external drivers via the kubelet plugin registration protocol is deprecated in favor of `{kubelet_root_dir}/plugins_registry/`. Support for the old directory is planned to be removed in v1.15. Device plugin and CSI storage drivers should switch to the new directory prior to v1.15. Only CSI storage drivers that support 0.x versions of the CSI API are allowed in the old directory. ([#70494](https://github.com/kubernetes/kubernetes/pull/70494) by [@RenaudWasTaken](https://github.com/RenaudWasTaken) and [#71314](https://github.com/kubernetes/kubernetes/pull/71314) by [@saad-ali](https://github.com/saad-ali)) + - With the release of the CSI 1.0 API, support for CSI drivers using 0.3 and older releases of the CSI API is deprecated, and is planned to be removed in Kubernetes v1.15. CSI drivers should be updated to support the CSI 1.0 API, and deployed in the new kubelet plugin registration directory (`{kubelet_root_dir}/plugins_registry/`) once all nodes in the cluster are at 1.13 or higher ([#71020](https://github.com/kubernetes/kubernetes/pull/71020) and [#71314](https://github.com/kubernetes/kubernetes/pull/71314), both by [@saad-ali](https://github.com/saad-ali)) + - Use of the `--node-labels` flag to set labels under the `kubernetes.io/` and `k8s.io/` prefix will be subject to restriction by the `NodeRestriction` admission plugin in future releases. [See admission plugin documentation](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#noderestriction) for allowed labels. ([#68267](https://github.com/kubernetes/kubernetes/pull/68267), [@liggitt](https://github.com/liggitt)) +- kube-scheduler + - The alpha critical pod annotation (`scheduler.alpha.kubernetes.io/critical-pod`) is deprecated. Pod priority should be used instead to mark pods as critical. ([#70298](https://github.com/kubernetes/kubernetes/pull/70298), [@bsalamat](https://github.com/bsalamat)) +- The following features are now GA, and the associated feature gates are deprecated and will be removed in a future release: + - CSIPersistentVolume + - GCERegionalPersistentDisk + - KubeletPluginsWatcher + - VolumeScheduling +- kubeadm + - The DynamicKubeletConfig feature gate is deprecated. The functionality is still accessible by using the kubeadm alpha kubelet enable-dynamic command. + - The command `kubeadm config print-defaults` is deprecated in favor of `kubeadm config print init-defaults` and `kubeadm config print join-defaults` ([#69617](https://github.com/kubernetes/kubernetes/pull/69617), [@rosti](https://github.com/rosti)) + - support for the `v1alpha3` configuration file format is deprecated and will be removed in 1.14. Use `kubeadm config migrate` to migrate `v1alpha3` configuration files to `v1beta1`, which provides improvements in image repository management, addons configuration, and other areas. The documentation for `v1beta1` can be found here: https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1 +- The `node.status.volumes.attached.devicePath` field is deprecated for CSI volumes and will not be set in future releases ([#71095](https://github.com/kubernetes/kubernetes/pull/71095), [@msau42](https://github.com/msau42)) +- kubectl + - The `kubectl convert` command is deprecated and will be removed in a future release ([#70820](https://github.com/kubernetes/kubernetes/pull/70820), [@seans3](https://github.com/seans3)) +- Support for passing unknown provider names to the E2E test binaries is deprecated and will be removed in a future release. Use `--provider=skeleton` (no ssh access) or `--provider=local` (local cluster with ssh) instead. ([#70141](https://github.com/kubernetes/kubernetes/pull/70141), [@pohly](https://github.com/pohly)) + +## Major Themes + +### SIG API Machinery + +For the 1.13 release, SIG API Machinery is happy to announce that the [dry-run functionality](https://kubernetes.io/docs/reference/using-api/api-concepts/#dry-run) is now beta. + +### SIG Auth + +With this release we've made several important enhancements to core SIG Auth areas. In the authorization category, we've further reduced Kubelet privileges by [restricting node self-updates of labels to a whitelisted selection and by disallowing kubelets from deleting their Node API object](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#noderestriction). In authentication, we added alpha-level support for automounting improved service account tokens through projected volumes. We also enabled [audience validation in TokenReview](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.13/#tokenreview-v1-authentication-k8s-io) for the new tokens for improved scoping. Under audit logging, the new alpha-level "dynamic audit configuration" adds support for [dynamically registering webhooks to receive a stream of audit events](https://kubernetes.io/docs/tasks/debug-application-cluster/audit/#dynamic-backend). Finally, we've enhanced secrets protection by graduating [etcd encryption](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/) out of experimental. + +### SIG AWS + +In v1.13 we worked on tighter integrations of Kubernetes API objects with AWS services. These include three out-of-tree alpha feature releases: + +1) Alpha for AWS ALB (Application Load Balancer) integration to Kubernetes Ingress resources. +2) Alpha for CSI specification 0.3 integration to AWS EBS (Elastic Block Store) +3) Alpha for the cloudprovider-aws cloud controller manager binary. Additionally we added [aws-k8s-tester](https://github.com/kubernetes/test-infra/issues/9814), deployer interface for kubetest, to the test-infra repository. This plugin allowed us to integrate Prow to the 3 subprojects defined above in order to provide CI signal for all 3 features. The CI signal is visible [here](https://testgrid.k8s.io/) under SIG-AWS. + +For detailed release notes on the three alpha features from SIG AWS, please refer to the following Changelogs: + +- [aws-alb-ingress-controller v1.0.0](https://github.com/kubernetes-sigs/aws-alb-ingress-controller/releases/tag/v1.0.0) +- [aws-ebs-csi-driver v0.1](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/blob/master/CHANGELOG-0.1.md) +- [cloudprovider-aws external v0.1.0](https://github.com/kubernetes/cloud-provider-aws/blob/master/changelogs/CHANGELOG-0.1.md) + +### SIG Azure + +For 1.13 SIG Azure was focused on adding additional Azure Disk support for Ultra SSD, Standard SSD, and Premium Azure Files. Azure Availability Zones and cross resource group nodes were also moved from Alpha to Beta in 1.13. + +### SIG Big Data + +During the 1.13 release cycle, SIG Big Data has been focused on community engagements relating to 3rd-party project integrations with Kubernetes. There have been no impacts on the 1.13 release. + +### SIG CLI + +Over the course of 1.13 release SIG CLI mostly focused on stabilizing the items we’ve been working on over the past releases such as server-side printing and its support in kubectl, as well as finishing [kubectl diff which is based on server-side dry-run feature](https://kubernetes.io/docs/concepts/overview/object-management-kubectl/#how-to-create-objects). We’ve continued separating kubectl code to prepare for extraction out of main repository. Finally, thanks to the awesome support and feedback from community we’ve managed to promote the new [plugin mechanism to Beta](https://kubernetes.io/docs/tasks/extend-kubectl/kubectl-plugins/). + +### SIG Cloud Provider + +For v1.13, SIG Cloud Provider has been focused on stabilizing the common APIs and interfaces consumed by cloud providers today. This involved auditing the cloud provider APIs for anything that should be deprecated as well as adding changes where necessary. In addition, SIG Cloud Provider has begun exploratory work around having a “cloud provider” e2e test suite which can be used to test common cloud provider functionalities with resources such as nodes and load balancers. + +We are also continuing our long running effort to extract all the existing cloud providers that live in k8s.io/kubernetes into their own respective repos. Along with this migration, we are slowly transitioning users to use the cloud-controller-manager for any cloud provider features instead of the kube-controller-manager. + +### SIG Cluster Lifecycle + +For 1.13 SIG Cluster Lifecycle is pleased to announce the long awaited promotion of kubeadm to stable GA, and the promotion of kubeadm’s configuration API to `v1beta1`. +In this release the SIG again focused on further improving the user experience on cluster creation and also fixing a number of bugs and other assorted improvements. + +Some notable changes in kubeadm since Kubernetes 1.12: + +- kubeadm’s configuration API is now `v1beta1`. The new configuration format provides improvements in - image repository management, addons configuration, and other areas. We encourage `v1alpha3` users to migrate to this configuration API using `kubeadm config migrate`, as `v1alpha3` will be removed in 1.14. The documentation for `v1beta1` can be found here: https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1 +- kubeadm has graduated `kubeadm alpha phase` commands to `kubeadm init phase`. This means that the phases of creating a control-plane node are now tightly integrated as part of the `init` command. Alpha features, not yet ready for GA are still kept under `kubeadm alpha` and we appreciate feedback on them. +- `kubeadm init` and `kubeadm init phase` now have a `--image-repository` flag, improving support for environments with limited access to official kubernetes repository. +- The DynamicKubeletConfig and SelfHosting functionality was moved outside of `kubeadm init` and feature gates and is now exposed under `kubeadm alpha`. +- Kubeadm init phase certs now support the `--csr-only` option, simplifying custom CA creation. +- `kubeadm join --experimental-control-plane` now automatically adds a new etcd member for `local etcd` mode, further simplifying required tasks for HA clusters setup. +- Improvements were made to `kubeadm reset` related to cleaning etcd and notifying the user about the state of iptables. +- kubeadm commands now print warnings if input YAML documents contain unknown or duplicate fields. +- kubeadm now properly recognizes Docker 18.09.0 and newer, but still treats 18.06 as the default supported version. +- kubeadm now automatically sets the `--pod-infra-container-image` flag when starting the kubelet. + +### SIG IBM Cloud + +The IBM Cloud SIG was focused on defining its charter and working towards moving its cloud provider code to an external repository with a goal to have this work done by the end of Kubernetes 1.14 release cycle. In the SIG meetings, we also made sure to share updates on the latest Kubernetes developments in the IBM Cloud like the availability of Kubernetes v1.12.2 in the IBM Cloud Kubernetes Service (IKS). The SIG updates were provided in the Kubernetes community weekly call and at the KubeCon China 2018. + +### SIG Multicluster + +Moving Federation v2 from Alpha towards Beta has been the focus of our effort over the past quarter. To this end we engaged with end users, and successfully enlisted additional contributors from companies including IBM, Amadeus, Cisco and others. Federation v2 provides a suite of decoupled API’s and re-usable components for building multi-cluster control planes. We plan to start releasing Beta components in late 2018. In addition, more minor updates were made to our cluster-registry and multi-cluster ingress sub-projects. + +### SIG Network + +For 1.13, the areas of focus were in IPv6, DNS improvements and some smaller items: +CoreDNS is now the default cluster DNS passing all of the scale/resource usage tests +Node-local DNS cache feature is available in Alpha. This feature deploys a lightweight DNS caching Daemonset that avoids the conntrack and converts queries from UDP to more reliable TCP. +PodReady++ feature now has `kubectl` CLI support. + +Progress was made towards finalizing the IPv6 dual stack support KEP and support for topological routing of services. + +### SIG Node + +SIG Node focused on stability and performance improvements in the 1.13 release. A new alpha feature is introduced to improve the mechanism that nodes heartbeat back to the control plane. The `NodeLease` feature results in the node using a `Lease` resource in the `kube-node-lease` namespace that is renewed periodically. The `NodeStatus` that was used previously to heartbeat back to the control plane is only updated when it changes. This reduces load on the control plane for large clusters. The Kubelet plugin registration mechanism, which enables automatic discovery of external plugins (including CSI and device plugins) has been promoted to stable in this release (introduced as alpha in 1.11 and promoted to beta in 1.12). + +### SIG Openstack + +The major theme for the SIG OpenStack release is the work-in-progress for removing the in-tree provider. This work, being done in conjunction with SIG Cloud Provider, is focusing on moving internal APIs that the OpenStack (and other providers) depends upon to staging to guarantee API stability. This work also included abstracting the in-tree Cinder API and refactoring code to the external Cinder provider to remove additional Cinder volume provider code. + +Additional work was also done to implement an OpenStack driver for the Cluster API effort lead by SIG Cluster Lifecycle. For the external Cloud-Provider-OpenStack code, the SIG largely focused on bug fixes and updates to match K8s 1.13 development. + +### SIG Scalability + +SIG Scalability has mostly focused on stability and deflaking our tests, investing into framework for writing scalability tests (ClusterLoader v2) with a goal to migrate all tests to it by the end of 2018 and on the work towards extending definition of Kubernetes scalability by providing more/better user-friendly SLIs/SLOs. + +### SIG Scheduling + +SIG Scheduling has mostly focused on stability in 1.13 and has postponed some of the major features to the next versions. There are still two notable changes: 1. TaintBasedEviction is moved to Beta and will be enabled by default. With this feature enabled, condition taints are automatically added to the nodes and pods can add tolerations for them if needed. 2. Pod critical annotation is deprecated. Pods should use pod priority instead of the annotation. + +It is worth noting again that kube-scheduler will use apiVersion `kubescheduler.config.k8s.io/v1alpha1` instead of `componentconfig/v1alpha1` in its configuration files in 1.13. + +### SIG Service Catalog + +The Service Plan Defaults feature is still under active development. +We continue to improve the UX for the svcat CLI, specifically filling in gaps for the new Namespaced Service Broker feature. + +### SIG Storage + +Over the last year, SIG Storage has been focused on adding support for the Container Storage Interface (CSI) to Kubernetes. The specification recently moved to 1.0, and on the heels of this achievement, Kubernetes v1.13 moves CSI support for PersistentVolumes to GA. + +With CSI the Kubernetes volume layer becomes truly extensible, allowing third party storage developers to write drivers making their storage systems available in Kubernetes without having to touch the core code. + +CSI was first introduction as alpha in Kubernetes v1.9 and moved to beta in Kubernetes v1.10. + +You can find a list of sample and production drivers in the [CSI Documentation](https://kubernetes.io/docs/concepts/storage/volumes/#csi). + +SIG Storage also moves support for Block Volumes to beta (introduced as alpha in v1.9) and support for Topology Aware Volume Scheduling to stable (introduced as alpha in v1.9 and promoted to beta in 1.10). + +### SIG UI + +The migration to the newest version of Angular is still under active development as it is most important thing on the roadmap at the moment. We are getting closer to to the new release. We continue fixing bugs and adding other improvements. + +### SIG VMWare + +Major focus for SIG VMware for this release is the work on moving internal APIs that the vSphere provider depends upon to staging to guarantee API stability. This work is being done in conjunction with SIG Cloud Provider and includes the creation of a brand new vsphere-csi plugin to replace the current volume functionalities in-tree. + +Additional work was also done to implement a vSphere provider for the Cluster API effort lead by SIG Cluster Lifecycle. For the out-of-tree vSphere cloud provider, the SIG largely focused on bug fixes and updates to match K8s 1.13 development. + +### SIG Windows + +SIG Windows focused on improving reliability for Windows and Kubernetes support + +## New Features + +- kubelet: When node lease feature is enabled, kubelet reports node status to api server only if there is some change or it didn't report over last report interval. ([#69753](https://github.com/kubernetes/kubernetes/pull/69753), [@wangzhen127](https://github.com/wangzhen127)) +- vSphereVolume implements Raw Block Volume Support ([#68761](https://github.com/kubernetes/kubernetes/pull/68761), [@fanzhangio](https://github.com/fanzhangio)) +- CRD supports multi-version Schema, Subresources and AdditionalPrintColumns (NOTE that CRDs created prior to 1.13 populated the top-level additionalPrinterColumns field by default. To apply an updated that changes to per-version additionalPrinterColumns, the top-level additionalPrinterColumns field must be explicitly set to null). ([#70211](https://github.com/kubernetes/kubernetes/pull/70211), [@roycaihw](https://github.com/roycaihw)) +- New addon in addon manager that automatically installs CSI CRDs if CSIDriverRegistry or CSINodeInfo feature gates are true. ([#70193](https://github.com/kubernetes/kubernetes/pull/70193), [@saad-ali](https://github.com/saad-ali)) +- Delegated authorization can now allow unrestricted access for `system:masters` like the main kube-apiserver ([#70671](https://github.com/kubernetes/kubernetes/pull/70671), [@deads2k](https://github.com/deads2k)) +- Added dns capabilities for Windows CNI plugins: ([#67435](https://github.com/kubernetes/kubernetes/pull/67435), [@feiskyer](https://github.com/feiskyer)) +- kube-apiserver: `--audit-webhook-version` and `--audit-log-version` now default to `audit.k8s.io/v1` if unspecified ([#70476](https://github.com/kubernetes/kubernetes/pull/70476), [@charrywanganthony](https://github.com/charrywanganthony)) +- kubeadm: timeoutForControlPlane is introduced as part of the API Server config, that controls the timeout for the wait for control plane to be up. Default value is 4 minutes. ([#70480](https://github.com/kubernetes/kubernetes/pull/70480), [@rosti](https://github.com/rosti)) +- `--api-audiences` now defaults to the `--service-account-issuer` if the issuer is provided but the API audience is not. ([#70308](https://github.com/kubernetes/kubernetes/pull/70308), [@mikedanese](https://github.com/mikedanese)) +- Added support for projected volume in describe function ([#70158](https://github.com/kubernetes/kubernetes/pull/70158), [@WanLinghao](https://github.com/WanLinghao)) +- kubeadm now automatically creates a new stacked etcd member when joining a new control plane node (does not applies to external etcd) ([#69486](https://github.com/kubernetes/kubernetes/pull/69486), [@fabriziopandini](https://github.com/fabriziopandini)) +- Display the usage of ephemeral-storage when using `kubectl describe node` ([#70268](https://github.com/kubernetes/kubernetes/pull/70268), [@Pingan2017](https://github.com/Pingan2017)) +- Added functionality to enable br_netfilter and ip_forward for debian packages to improve kubeadm support for CRI runtime besides Docker. ([#70152](https://github.com/kubernetes/kubernetes/pull/70152), [@ashwanikhemani](https://github.com/ashwanikhemani)) +- Added regions ap-northeast-3 and eu-west-3 to the list of well known AWS regions. ([#70252](https://github.com/kubernetes/kubernetes/pull/70252), [@nckturner](https://github.com/nckturner)) +- kubeadm: Implemented preflight check to ensure that number of CPUs ([#70048](https://github.com/kubernetes/kubernetes/pull/70048), [@bart0sh](https://github.com/bart0sh)) +- CoreDNS is now the default DNS server in kube-up deployments. ([#69883](https://github.com/kubernetes/kubernetes/pull/69883), [@chrisohaver](https://github.com/chrisohaver)) +- Opt out of chowning and chmoding from kubectl cp. ([#69573](https://github.com/kubernetes/kubernetes/pull/69573), [@bjhaid](https://github.com/bjhaid)) +- Failed to provision volume with StorageClass "azurefile-premium": failed to create share andy-mg1121-dynamic-pvc-1a7b2813-d1b7-11e8-9e96-000d3a03e16b in account f7228f99bcde411e8ba4900: failed to create file share, err: storage: service returned error: StatusCode=400, ErrorCode=InvalidHeaderValue, ErrorMessage=The value for one of the HTTP headers is not in the correct format. ([#69718](https://github.com/kubernetes/kubernetes/pull/69718), [@andyzhangx](https://github.com/andyzhangx)) +- `TaintBasedEvictions` feature is promoted to beta. ([#69824](https://github.com/kubernetes/kubernetes/pull/69824), [@Huang-Wei](https://github.com/Huang-Wei)) +- Fixed https://github.com/kubernetes/client-go/issues/478 by adding support for JSON Patch in client-go/dynamic/fake ([#69330](https://github.com/kubernetes/kubernetes/pull/69330), [@vaikas-google](https://github.com/vaikas-google)) +- Dry-run is promoted to Beta and will be enabled by default. ([#69644](https://github.com/kubernetes/kubernetes/pull/69644), [@apelisse](https://github.com/apelisse)) +- `kubectl get priorityclass` now prints value column by default. ([#69431](https://github.com/kubernetes/kubernetes/pull/69431), [@Huang-Wei](https://github.com/Huang-Wei)) +- Added a new container based image for running e2e tests ([#69368](https://github.com/kubernetes/kubernetes/pull/69368), [@dims](https://github.com/dims)) +- The `LC_ALL` and `LC_MESSAGES` env vars can now be used to set desired locale for `kubectl` while keeping `LANG` unchanged. ([#69500](https://github.com/kubernetes/kubernetes/pull/69500), [@m1kola](https://github.com/m1kola)) +- NodeLifecycleController: Now node lease renewal is treated as the heartbeat signal from the node, in addition to NodeStatus Update. ([#69241](https://github.com/kubernetes/kubernetes/pull/69241), [@wangzhen127](https://github.com/wangzhen127)) +- Added dynamic shared informers to write generic, non-generated controllers ([#69308](https://github.com/kubernetes/kubernetes/pull/69308), [@p0lyn0mial](https://github.com/p0lyn0mial)) +- Upgraded to etcd 3.3 client ([#69322](https://github.com/kubernetes/kubernetes/pull/69322), [@jpbetz](https://github.com/jpbetz)) +- It is now possible to use named ports in the `kubectl port-forward` command ([#69477](https://github.com/kubernetes/kubernetes/pull/69477), [@m1kola](https://github.com/m1kola)) +- `kubectl wait` now supports condition value checks other than true using `--for condition=available=false` ([#69295](https://github.com/kubernetes/kubernetes/pull/69295), [@deads2k](https://github.com/deads2k)) +- Updated defaultbackend image to 1.5. Users should concentrate on updating scripts to the new version. ([#69120](https://github.com/kubernetes/kubernetes/pull/69120), [@aledbf](https://github.com/aledbf)) +- Bumped Dashboard version to v1.10.0 ([#68450](https://github.com/kubernetes/kubernetes/pull/68450), [@jeefy](https://github.com/jeefy)) +- Added env variables to control CPU requests of kube-controller-manager and kube-scheduler. ([#68823](https://github.com/kubernetes/kubernetes/pull/68823), [@loburm](https://github.com/loburm)) +- PodSecurityPolicy objects now support a `MayRunAs` rule for `fsGroup` and `supplementalGroups` options. This allows specifying ranges of allowed GIDs for pods/containers without forcing a default GID the way `MustRunAs` does. This means that a container to which such a policy applies to won't use any fsGroup/supplementalGroup GID if not explicitly specified, yet a specified GID must still fall in the GID range according to the policy. ([#65135](https://github.com/kubernetes/kubernetes/pull/65135), [@stlaz](https://github.com/stlaz)) +- Upgrade Stackdriver Logging Agent addon image to 0.6-1.6.0-1 to use Fluentd v1.2. This provides nanoseconds timestamp granularity for logs. ([#70954](https://github.com/kubernetes/kubernetes/pull/70954), [@qingling128](https://github.com/qingling128)) +- When the BoundServiceAccountTokenVolumes Alpha feature is enabled, ServiceAccount volumes now use a projected volume source and their names have the prefix "kube-api-access". ([#69848](https://github.com/kubernetes/kubernetes/pull/69848), [@mikedanese](https://github.com/mikedanese)) +- Raw block volume support is promoted to beta, and enabled by default. This is accessible via the `volumeDevices` container field in pod specs, and the `volumeMode` field in persistent volume and persistent volume claims definitions. ([#71167](https://github.com/kubernetes/kubernetes/pull/71167), [@msau42](https://github.com/msau42)) +- TokenReview now supports audience validation of tokens with audiences other than the kube-apiserver. ([#62692](https://github.com/kubernetes/kubernetes/pull/62692), [@mikedanese](https://github.com/mikedanese)) +- StatefulSet is supported in `kubectl autoscale` command ([#71103](https://github.com/kubernetes/kubernetes/pull/71103), [@Pingan2017](https://github.com/Pingan2017)) +- Kubernetes v1.13 moves support for Container Storage Interface to GA. As part of this move Kubernetes now supports CSI v1.0.0 and deprecates support for CSI 0.3 and older releases. Older CSI drivers must be updated to CSI 1.0 and moved to the new kubelet plugin registration directory in order to work with Kubernetes 1.15+. ([#71020](https://github.com/kubernetes/kubernetes/pull/71020), [@saad-ali](https://github.com/saad-ali)) +- Added option to create CSRs instead of certificates for kubeadm init phase certs and kubeadm alpha certs renew ([#70809](https://github.com/kubernetes/kubernetes/pull/70809), [@liztio](https://github.com/liztio)) +- Added a kubelet socket which serves an grpc service containing the devices used by containers on the node. ([#70508](https://github.com/kubernetes/kubernetes/pull/70508), [@dashpole](https://github.com/dashpole)) +- Added DynamicAuditing feature which allows for the configuration of audit webhooks through the use of an AuditSink API object. ([#67257](https://github.com/kubernetes/kubernetes/pull/67257), [@pbarker](https://github.com/pbarker)) +- The kube-apiserver's healthz now takes in an optional query parameter which allows you to disable health checks from causing healthz failures. ([#70676](https://github.com/kubernetes/kubernetes/pull/70676), [@logicalhan](https://github.com/logicalhan)) +- Introduced support for running a nodelocal dns cache. It is disabled by default, can be enabled by setting KUBE_ENABLE_NODELOCAL_DNS=true ([#70555](https://github.com/kubernetes/kubernetes/pull/70555), [@prameshj](https://github.com/prameshj)) +- Added readiness gates in extended output for pods ([#70775](https://github.com/kubernetes/kubernetes/pull/70775), [@freehan](https://github.com/freehan)) +- Added `Ready` column and improve human-readable output of Deployments and StatefulSets ([#70466](https://github.com/kubernetes/kubernetes/pull/70466), [@Pingan2017](https://github.com/Pingan2017)) +- Added `kubelet_container_log_size_bytes` metric representing the log file size of a container. ([#70749](https://github.com/kubernetes/kubernetes/pull/70749), [@brancz](https://github.com/brancz)) +- NodeLifecycleController: When node lease feature is enabled, node lease will be deleted when the corresponding node is deleted. ([#70034](https://github.com/kubernetes/kubernetes/pull/70034), [@wangzhen127](https://github.com/wangzhen127)) +- GCERegionalPersistentDisk feature is GA now! ([#70716](https://github.com/kubernetes/kubernetes/pull/70716), [@jingxu97](https://github.com/jingxu97)) +- Added secure port 10259 to the kube-scheduler (enabled by default) and deprecate old insecure port 10251. Without further flags self-signed certs are created on startup in memory. ([#69663](https://github.com/kubernetes/kubernetes/pull/69663), [@sttts](https://github.com/sttts)) + +## Release Notes From SIGs + +### SIG API Machinery + +- The OwnerReferencesPermissionEnforcement admission plugin now checks authorization for the correct scope (namespaced or cluster-scoped) of the owner resource type. Previously, it always checked permissions at the same scope as the child resource. ([#70389](https://github.com/kubernetes/kubernetes/pull/70389), [@caesarxuchao](https://github.com/caesarxuchao)) +- OpenAPI spec now correctly marks delete request's body parameter as optional ([#70032](https://github.com/kubernetes/kubernetes/pull/70032), [@iamneha](https://github.com/iamneha)) +- The rules for incrementing `metadata.generation` of custom resources changed: ([#69059](https://github.com/kubernetes/kubernetes/pull/69059), [@caesarxuchao](https://github.com/caesarxuchao)) + - If the custom resource participates the spec/status convention, the metadata.generation of the CR increments when there is any change, except for the changes to the metadata or the changes to the status. + - If the custom resource does not participate the spec/status convention, the metadata.generation of the CR increments when there is any change to the CR, except for changes to the metadata. + - A custom resource is considered to participate the spec/status convention if and only if the "CustomResourceSubresources" feature gate is turned on and the CRD has `.spec.subresources.status={}`. +- Fixed patch/update operations on multi-version custom resources ([#70087](https://github.com/kubernetes/kubernetes/pull/70087), [@liggitt](https://github.com/liggitt)) +- Reduced memory utilization of admission webhook metrics by removing resource related labels. ([#69895](https://github.com/kubernetes/kubernetes/pull/69895), [@jpbetz](https://github.com/jpbetz)) +- Kubelet can now parse PEM file containing both TLS certificate and key in arbitrary order. Previously key was always required to be first. ([#69536](https://github.com/kubernetes/kubernetes/pull/69536), [@awly](https://github.com/awly)) +- Code-gen: Removed lowercasing for project imports ([#68484](https://github.com/kubernetes/kubernetes/pull/68484), [@jsturtevant](https://github.com/jsturtevant)) +- Fixed client cert setup in delegating authentication logic ([#69430](https://github.com/kubernetes/kubernetes/pull/69430), [@DirectXMan12](https://github.com/DirectXMan12)) +- OpenAPI spec and API reference now reflect dryRun query parameter for POST/PUT/PATCH operations ([#69359](https://github.com/kubernetes/kubernetes/pull/69359), [@roycaihw](https://github.com/roycaihw)) +- Fixed the sample-apiserver so that its BanFlunder admission plugin can be used. ([#68417](https://github.com/kubernetes/kubernetes/pull/68417), [@MikeSpreitzer](https://github.com/MikeSpreitzer)) +- APIService availability related to networking glitches are corrected faster ([#68678](https://github.com/kubernetes/kubernetes/pull/68678), [@deads2k](https://github.com/deads2k)) +- Fixed an issue with stuck connections handling error responses ([#71412](https://github.com/kubernetes/kubernetes/pull/71412), [@liggitt](https://github.com/liggitt)) +- apiserver: fixed handling and logging of panics in REST handlers ([#71076](https://github.com/kubernetes/kubernetes/pull/71076), [@liggitt](https://github.com/liggitt)) +- kube-controller-manager no longer removes ownerReferences from ResourceQuota objects ([#70035](https://github.com/kubernetes/kubernetes/pull/70035), [@liggitt](https://github.com/liggitt)) +- "unfinished_work_microseconds" is added to the workqueue metrics; it can be used to detect stuck worker threads. (kube-controller-manager runs many workqueues.) ([#70884](https://github.com/kubernetes/kubernetes/pull/70884), [@lavalamp](https://github.com/lavalamp)) +- Timeouts set in ListOptions for clients are also be respected locally ([#70998](https://github.com/kubernetes/kubernetes/pull/70998), [@deads2k](https://github.com/deads2k)) +- Added support for CRD conversion webhook ([#67006](https://github.com/kubernetes/kubernetes/pull/67006), [@mbohlool](https://github.com/mbohlool)) +- client-go: fixed sending oversized data frames to spdystreams in remotecommand.NewSPDYExecutor ([#70999](https://github.com/kubernetes/kubernetes/pull/70999), [@liggitt](https://github.com/liggitt)) +- Fixed missing flags in `-controller-manager --help`. ([#71298](https://github.com/kubernetes/kubernetes/pull/71298), [@stewart-yu](https://github.com/stewart-yu)) +- Fixed missing flags in `kube-apiserver --help`. ([#70204](https://github.com/kubernetes/kubernetes/pull/70204), [@imjching](https://github.com/imjching)) +- The caBundle and service fields in admission webhook API objects now correctly indicate they are optional ([#70138](https://github.com/kubernetes/kubernetes/pull/70138), [@liggitt](https://github.com/liggitt)) +- Fixed an issue with stuck connections handling error responses ([#71419](https://github.com/kubernetes/kubernetes/pull/71419), [@liggitt](https://github.com/liggitt)) +- kube-controller-manager and cloud-controller-manager now hold generated serving certificates in-memory unless a writeable location is specified with --cert-dir ([#69884](https://github.com/kubernetes/kubernetes/pull/69884), [@liggitt](https://github.com/liggitt)) +- CCM server will not listen insecurely if secure port is specified ([#68982](https://github.com/kubernetes/kubernetes/pull/68982), [@aruneli](https://github.com/aruneli)) +- List operations against the API now return internal server errors instead of partially complete lists when a value cannot be transformed from storage. The updated behavior is consistent with all other operations that require transforming data from storage such as watch and get. ([#69399](https://github.com/kubernetes/kubernetes/pull/69399), [@mikedanese](https://github.com/mikedanese)) + +### SIG Auth + +- API Server can be configured to reject requests that cannot be audit-logged. ([#65763](https://github.com/kubernetes/kubernetes/pull/65763), [@x13n](https://github.com/x13n)) +- Go clients created from a kubeconfig that specifies a TokenFile now periodically reload the token from the specified file. ([#70606](https://github.com/kubernetes/kubernetes/pull/70606), [@mikedanese](https://github.com/mikedanese)) +- When `--rotate-server-certificates` is enabled, kubelet will no longer request a new certificate on startup if the current certificate on disk is satisfactory. ([#69991](https://github.com/kubernetes/kubernetes/pull/69991), [@agunnerson-ibm](https://github.com/agunnerson-ibm)) +- Added dynamic audit configuration api ([#67547](https://github.com/kubernetes/kubernetes/pull/67547), [@pbarker](https://github.com/pbarker)) +- Added ability to control primary GID of containers through Pod Spec and PodSecurityPolicy ([#67802](https://github.com/kubernetes/kubernetes/pull/67802), [@krmayankk](https://github.com/krmayankk)) +- kube-apiserver: the `NodeRestriction` admission plugin now prevents kubelets from modifying `Node` labels prefixed with `node-restriction.kubernetes.io/`. The `node-restriction.kubernetes.io/` label prefix is reserved for cluster administrators to use for labeling `Node` objects to target workloads to nodes in a way that kubelets cannot modify or spoof. ([#68267](https://github.com/kubernetes/kubernetes/pull/68267), [@liggitt](https://github.com/liggitt)) + +### SIG Autoscaling + +- Updated Cluster Autoscaler version to 1.13.0. See the [Release Notes](https://github.com/kubernetes/autoscaler/releases/tag/cluster-autoscaler-1.13.0) for more information. ([#71513](https://github.com/kubernetes/kubernetes/pull/71513), [@losipiuk](https://github.com/losipiuk)) + +### SIG AWS + +- `service.beta.kubernetes.io/aws-load-balancer-internal` now supports true and false values, previously it only supported non-empty strings ([#69436](https://github.com/kubernetes/kubernetes/pull/69436), [@mcrute](https://github.com/mcrute)) +- Added `service.beta.kubernetes.io/aws-load-balancer-security-groups` annotation to set the security groups to the AWS ELB to be the only ones specified in the annotation in case this is present (does not add `0.0.0.0/0`). ([#62774](https://github.com/kubernetes/kubernetes/pull/62774), [@Raffo](https://github.com/Raffo)) + +### SIG Azure + +- Ensured orphan public IPs on Azure deleted when service recreated with the same name. ([#70463](https://github.com/kubernetes/kubernetes/pull/70463), [@feiskyer](https://github.com/feiskyer)) +- Improved Azure instance metadata handling by adding caches. ([#70353](https://github.com/kubernetes/kubernetes/pull/70353), [@feiskyer](https://github.com/feiskyer)) +- Corrected check for non-Azure managed nodes with the Azure cloud provider ([#70135](https://github.com/kubernetes/kubernetes/pull/70135), [@marc-sensenich](https://github.com/marc-sensenich)) +- Fixed azure disk attach/detach failed forever issue ([#71377](https://github.com/kubernetes/kubernetes/pull/71377), [@andyzhangx](https://github.com/andyzhangx)) +- DisksAreAttached --> getNodeDataDisks--> GetDataDisks --> getVirtualMachine --> vmCache.Get ([#71495](https://github.com/kubernetes/kubernetes/pull/71495), [@andyzhangx](https://github.com/andyzhangx)) + +### SIG CLI + +- `kubectl apply` can now change a deployment strategy from rollout to recreate without explicitly clearing the rollout-related fields ([#70436](https://github.com/kubernetes/kubernetes/pull/70436), [@liggitt](https://github.com/liggitt)) +- The `kubectl plugin list` command now displays discovered plugin paths in the same order as they are found in a user's PATH variable. ([#70443](https://github.com/kubernetes/kubernetes/pull/70443), [@juanvallejo](https://github.com/juanvallejo)) +- `kubectl get` no longer exits before printing all of its results if an error is found ([#70311](https://github.com/kubernetes/kubernetes/pull/70311), [@juanvallejo](https://github.com/juanvallejo)) +- Fixed a runtime error occuring when sorting the output of `kubectl get` with empty results ([#70740](https://github.com/kubernetes/kubernetes/pull/70740), [@mfpierre](https://github.com/mfpierre)) +- kubectl: support multiple arguments for cordon/uncordon and drain ([#68655](https://github.com/kubernetes/kubernetes/pull/68655), [@goodluckbot](https://github.com/goodluckbot)) +- Fixed ability for admin/edit/view users to see controller revisions, needed for kubectl rollout commands ([#70699](https://github.com/kubernetes/kubernetes/pull/70699), [@liggitt](https://github.com/liggitt)) +- `kubectl rollout undo` now returns errors when attempting to rollback a deployment to a non-existent revision ([#70039](https://github.com/kubernetes/kubernetes/pull/70039), [@liggitt](https://github.com/liggitt)) +- kubectl run now generates apps/v1 deployments by default ([#71006](https://github.com/kubernetes/kubernetes/pull/71006), [@liggitt](https://github.com/liggitt)) +- The "kubectl cp" command now supports path shortcuts (../) in remote paths. ([#65189](https://github.com/kubernetes/kubernetes/pull/65189), [@juanvallejo](https://github.com/juanvallejo)) +- Fixed dry-run output in kubectl apply --prune ([#69344](https://github.com/kubernetes/kubernetes/pull/69344), [@zegl](https://github.com/zegl)) +- The kubectl wait command must handle when a watch returns an error vs closing by printing out the error and retrying the watch. ([#69389](https://github.com/kubernetes/kubernetes/pull/69389), [@smarterclayton](https://github.com/smarterclayton)) +- kubectl: support multiple arguments for cordon/uncordon and drain ([#68655](https://github.com/kubernetes/kubernetes/pull/68655), [@goodluckbot](https://github.com/goodluckbot)) + +### SIG Cloud Provider + +- Added deprecation warning for all cloud providers ([#69171](https://github.com/kubernetes/kubernetes/pull/69171), [@andrewsykim](https://github.com/andrewsykim)) + +### SIG Cluster Lifecycle + +- kubeadm: Updates version of CoreDNS to 1.2.6 ([#70796](https://github.com/kubernetes/kubernetes/pull/70796), [@detiber](https://github.com/detiber)) +- kubeadm: Validate kubeconfig files in case of external CA mode. ([#70537](https://github.com/kubernetes/kubernetes/pull/70537), [@yagonobre](https://github.com/yagonobre)) +- kubeadm: The writable config file option for extra volumes is renamed to readOnly with a reversed meaning. With readOnly defaulted to false (as in pod specs). ([#70495](https://github.com/kubernetes/kubernetes/pull/70495), [@rosti](https://github.com/rosti)) +- kubeadm: Multiple API server endpoints support upon join is removed as it is now redundant. ([#69812](https://github.com/kubernetes/kubernetes/pull/69812), [@rosti](https://github.com/rosti)) +- `kubeadm reset` now cleans up custom etcd data path ([#70003](https://github.com/kubernetes/kubernetes/pull/70003), [@yagonobre](https://github.com/yagonobre)) +- kubeadm: Fixed unnecessary upgrades caused by undefined order of Volumes and VolumeMounts in manifests ([#70027](https://github.com/kubernetes/kubernetes/pull/70027), [@bart0sh](https://github.com/bart0sh)) +- kubeadm: Fixed node join taints. ([#69846](https://github.com/kubernetes/kubernetes/pull/69846), [@andrewrynhard](https://github.com/andrewrynhard)) +- Fixed cluster autoscaler addon permissions so it can access batch/job. ([#69858](https://github.com/kubernetes/kubernetes/pull/69858), [@losipiuk](https://github.com/losipiuk)) +- kubeadm: JoinConfiguration now houses the discovery options in a nested Discovery structure, which in turn has a couple of other nested structures to house more specific options (BootstrapTokenDiscovery and FileDiscovery) ([#67763](https://github.com/kubernetes/kubernetes/pull/67763), [@rosti](https://github.com/rosti)) +- kubeadm: Fixed a possible scenario where kubeadm can pull much newer control-plane images ([#69301](https://github.com/kubernetes/kubernetes/pull/69301), [@neolit123](https://github.com/neolit123)) +- kubeadm now allows mixing of init/cluster and join configuration in a single YAML file (although a warning gets printed in this case). ([#69426](https://github.com/kubernetes/kubernetes/pull/69426), [@rosti](https://github.com/rosti)) +- kubeadm: Added a `v1beta1` API. ([#69289](https://github.com/kubernetes/kubernetes/pull/69289), [@fabriziopandini](https://github.com/fabriziopandini)) +- kubeadm init correctly uses `--node-name` and `--cri-socket` when `--config` option is also used ([#71323](https://github.com/kubernetes/kubernetes/pull/71323), [@bart0sh](https://github.com/bart0sh)) +- kubeadm: Always pass spec.nodeName as `--hostname-override` for kube-proxy ([#71283](https://github.com/kubernetes/kubernetes/pull/71283), [@Klaven](https://github.com/Klaven)) +- `kubeadm join` correctly uses `--node-name` and `--cri-socket` when `--config` option is also used ([#71270](https://github.com/kubernetes/kubernetes/pull/71270), [@bart0sh](https://github.com/bart0sh)) +- kubeadm now supports the `--image-repository` flag for customizing what registry to pull images from ([#71135](https://github.com/kubernetes/kubernetes/pull/71135), [@luxas](https://github.com/luxas)) +- kubeadm: The writable config file option for extra volumes is renamed to readOnly with a reversed meaning. With readOnly defaulted to false (as in pod specs). ([#70495](https://github.com/kubernetes/kubernetes/pull/70495), [@rosti](https://github.com/rosti)) +- kubeadm: Multiple API server endpoints support upon join is removed as it is now redundant. ([#69812](https://github.com/kubernetes/kubernetes/pull/69812), [@rosti](https://github.com/rosti)) +- kubeadm: JoinConfiguration now houses the discovery options in a nested Discovery structure, which in turn has a couple of other nested structures to house more specific options (BootstrapTokenDiscovery and FileDiscovery) ([#67763](https://github.com/kubernetes/kubernetes/pull/67763), [@rosti](https://github.com/rosti)) +- kubeadm: Added a `v1beta1` API. ([#69289](https://github.com/kubernetes/kubernetes/pull/69289), [@fabriziopandini](https://github.com/fabriziopandini)) +- kubeadm: Use `advertise-client-urls` instead of `listen-client-urls` as and `etcd-servers` options for apiserver. ([#69827](https://github.com/kubernetes/kubernetes/pull/69827), [@tomkukral](https://github.com/tomkukral)) +- Kubeadm now respects the custom image registry configuration across joins and upgrades. Kubeadm passes the custom registry to the kubelet for a custom pause container. ([#70603](https://github.com/kubernetes/kubernetes/pull/70603), [@chuckha](https://github.com/chuckha)) +- `kubeadm reset` now outputs instructions about manual iptables rules cleanup. ([#70874](https://github.com/kubernetes/kubernetes/pull/70874), [@rdodev](https://github.com/rdodev)) +- kubeadm: remove the AuditPolicyConfiguration feature gate ([#70807](https://github.com/kubernetes/kubernetes/pull/70807), [@Klaven](https://github.com/Klaven)) +- kubeadm pre-pulls Etcd image only if external Etcd is not used and ([#70743](https://github.com/kubernetes/kubernetes/pull/70743), [@bart0sh](https://github.com/bart0sh)) +- kubeadm: UnifiedControlPlaneImage is replaced by UseHyperKubeImage boolean value. ([#70793](https://github.com/kubernetes/kubernetes/pull/70793), [@rosti](https://github.com/rosti)) +- For kube-up and derived configurations, CoreDNS will honor master taints, for consistency with kube-dns behavior. ([#70868](https://github.com/kubernetes/kubernetes/pull/70868), [@justinsb](https://github.com/justinsb)) +- Recognize newer docker versions without -ce/-ee suffix: 18.09.0 ([#71001](https://github.com/kubernetes/kubernetes/pull/71001), [@thomas-riccardi](https://github.com/thomas-riccardi)) +- Any external provider should be aware the cloud-provider interface should be imported from :- ([#68310](https://github.com/kubernetes/kubernetes/pull/68310), [@cheftako](https://github.com/cheftako)) +- Fixed 'kubeadm upgrade' infinite loop waiting for pod restart ([#69886](https://github.com/kubernetes/kubernetes/pull/69886), [@bart0sh](https://github.com/bart0sh)) +- Bumped addon-manager to v8.8 ([#69337](https://github.com/kubernetes/kubernetes/pull/69337), [@MrHohn](https://github.com/MrHohn)) +- GCE: Filter out spammy audit logs from cluster autoscaler. ([#70696](https://github.com/kubernetes/kubernetes/pull/70696), [@loburm](https://github.com/loburm)) +- GCE: Enable by default audit logging truncating backend. ([#68288](https://github.com/kubernetes/kubernetes/pull/68288), [@loburm](https://github.com/loburm)) +- Bumped cluster-proportional-autoscaler to 1.3.0 ([#69338](https://github.com/kubernetes/kubernetes/pull/69338), [@MrHohn](https://github.com/MrHohn)) +- Updated defaultbackend to v1.5 ([#69334](https://github.com/kubernetes/kubernetes/pull/69334), [@bowei](https://github.com/bowei)) + +### SIG GCP + +- Added tolerations for Stackdriver Logging and Metadata Agents. ([#69737](https://github.com/kubernetes/kubernetes/pull/69737), [@qingling128](https://github.com/qingling128)) +- Enabled insertId generation, and updated Stackdriver Logging Agent image to 0.5-1.5.36-1-k8s. This help reduce log duplication and guarantee log order. ([#68920](https://github.com/kubernetes/kubernetes/pull/68920), [@qingling128](https://github.com/qingling128)) +- Updated crictl to v1.12.0 ([#69033](https://github.com/kubernetes/kubernetes/pull/69033), [@feiskyer](https://github.com/feiskyer)) + +### SIG Network + +- Corrected family type (inet6) for ipsets in ipv6-only clusters ([#68436](https://github.com/kubernetes/kubernetes/pull/68436), [@uablrek](https://github.com/uablrek)) +- kube-proxy argument `hostname-override` can be used to override hostname defined in the configuration file ([#69340](https://github.com/kubernetes/kubernetes/pull/69340), [@stevesloka](https://github.com/stevesloka)) +- CoreDNS correctly implements DNS spec for Services with externalNames that look like IP addresses. Kube-dns does not follow the spec for the same case, resulting in a behavior change when moving from Kube-dns to CoreDNS. See: [coredns/coredns#2324](https://github.com/coredns/coredns/issues/2324) +- IPVS proxier now set net/ipv4/vs/conn_reuse_mode to 0 by default, which will highly improve IPVS proxier performance. ([#71114](https://github.com/kubernetes/kubernetes/pull/71114), [@Lion-Wei](https://github.com/Lion-Wei)) +- CoreDNS is now version 1.2.6 ([#70799](https://github.com/kubernetes/kubernetes/pull/70799), [@rajansandeep](https://github.com/rajansandeep)) +- Addon configuration is introduced in the kubeadm config API, while feature flag CoreDNS is now deprecated. ([#70024](https://github.com/kubernetes/kubernetes/pull/70024), [@fabriziopandini](https://github.com/fabriziopandini)) + +### SIG Node + +- Fixed a bug in previous releases where a pod could be placed inside another pod's cgroup when specifying --cgroup-root ([#70678](https://github.com/kubernetes/kubernetes/pull/70678), [@dashpole](https://github.com/dashpole)) +- Optimized calculating stats when only CPU and Memory stats are returned from Kubelet stats/summary http endpoint. ([#68841](https://github.com/kubernetes/kubernetes/pull/68841), [@krzysztof-jastrzebski](https://github.com/krzysztof-jastrzebski)) +- kubelet now supports `log-file` option to write logs directly to a specific file ([#70917](https://github.com/kubernetes/kubernetes/pull/70917), [@dims](https://github.com/dims)) +- Do not detach volume if mount in progress ([#71145](https://github.com/kubernetes/kubernetes/pull/71145), [@gnufied](https://github.com/gnufied)) +- The runtimeHandler field on the RuntimeClass resource now accepts the empty string. ([#69550](https://github.com/kubernetes/kubernetes/pull/69550), [@tallclair](https://github.com/tallclair)) +- kube-apiserver: fixes `procMount` field incorrectly being marked as required in openapi schema ([#69694](https://github.com/kubernetes/kubernetes/pull/69694), [@jessfraz](https://github.com/jessfraz)) + +### SIG OpenStack + +- Fixed cloud-controller-manager crash when using OpenStack provider and PersistentVolume initializing controller ([#70459](https://github.com/kubernetes/kubernetes/pull/70459), [@mvladev](https://github.com/mvladev)) + +### SIG Release + +- Use debian-base instead of busybox as base image for server images ([#70245](https://github.com/kubernetes/kubernetes/pull/70245), [@ixdy](https://github.com/ixdy)) +- Images for cloud-controller-manager, kube-apiserver, kube-controller-manager, and kube-scheduler now contain a minimal /etc/nsswitch.conf and should respect /etc/hosts for lookups ([#69238](https://github.com/kubernetes/kubernetes/pull/69238), [@BenTheElder](https://github.com/BenTheElder)) + +### SIG Scheduling + +- Added metrics for volume scheduling operations ([#59529](https://github.com/kubernetes/kubernetes/pull/59529), [@wackxu](https://github.com/wackxu)) +- Improved memory use and performance when processing large numbers of pods containing tolerations ([#65350](https://github.com/kubernetes/kubernetes/pull/65350), [@liggitt](https://github.com/liggitt)) +- Fixed a bug in the scheduler that could cause the scheduler to go to an infinite loop when all nodes in a zone are removed. ([#69758](https://github.com/kubernetes/kubernetes/pull/69758), [@bsalamat](https://github.com/bsalamat)) +- Clear pod binding cache on bind error to make sure stale pod binding cache will not be used. ([#71212](https://github.com/kubernetes/kubernetes/pull/71212), [@cofyc](https://github.com/cofyc)) +- Fixed a scheduler panic due to internal cache inconsistency ([#71063](https://github.com/kubernetes/kubernetes/pull/71063), [@Huang-Wei](https://github.com/Huang-Wei)) +- Report kube-scheduler unhealthy if leader election is deadlocked. ([#71085](https://github.com/kubernetes/kubernetes/pull/71085), [@bsalamat](https://github.com/bsalamat)) +- Fixed a potential bug that scheduler preempts unnecessary pods. ([#70898](https://github.com/kubernetes/kubernetes/pull/70898), [@Huang-Wei](https://github.com/Huang-Wei)) + +### SIG Storage + +- Fixed CSI volume limits not showing up in node's capacity and allocatable ([#70540](https://github.com/kubernetes/kubernetes/pull/70540), [@gnufied](https://github.com/gnufied)) +- CSI drivers now have access to mountOptions defined on the storage class when attaching volumes. ([#67898](https://github.com/kubernetes/kubernetes/pull/67898), [@bswartz](https://github.com/bswartz)) +- change default azure file mount permission to 0777 ([#69854](https://github.com/kubernetes/kubernetes/pull/69854), [@andyzhangx](https://github.com/andyzhangx)) +- Fixed subpath in containerized kubelet. ([#69565](https://github.com/kubernetes/kubernetes/pull/69565), [@jsafrane](https://github.com/jsafrane)) +- Fixed panic on iSCSI volume tear down. ([#69140](https://github.com/kubernetes/kubernetes/pull/69140), [@jsafrane](https://github.com/jsafrane)) +- CSIPersistentVolume feature, i.e. PersistentVolumes with CSIPersistentVolumeSource, is GA. ([#69929](https://github.com/kubernetes/kubernetes/pull/69929), [@jsafrane](https://github.com/jsafrane)) +- Fixed CSIDriver API object to allow missing fields. ([#69331](https://github.com/kubernetes/kubernetes/pull/69331), [@jsafrane](https://github.com/jsafrane)) +- Flex volume plugins now support expandvolume (to increase underlying volume capacity) and expanfs (resize filesystem) commands that Flex plugin authors can implement to support expanding in use Flex PersistentVolumes ([#67851](https://github.com/kubernetes/kubernetes/pull/67851), [@aniket-s-kulkarni](https://github.com/aniket-s-kulkarni)) +- Enabled AttachVolumeLimit feature ([#69225](https://github.com/kubernetes/kubernetes/pull/69225), [@gnufied](https://github.com/gnufied)) +- The default storage class annotation for the storage addons has been changed to use the GA variant ([#68345](https://github.com/kubernetes/kubernetes/pull/68345), [@smelchior](https://github.com/smelchior)) +- GlusterFS PersistentVolumes sources can now reference endpoints in any namespace using the `spec.glusterfs.endpointsNamespace` field. Ensure all kubelets are upgraded to 1.13+ before using this capability. ([#60195](https://github.com/kubernetes/kubernetes/pull/60195), [@humblec](https://github.com/humblec)) +- Fixed GetVolumeLimits log flushing issue ([#69558](https://github.com/kubernetes/kubernetes/pull/69558), [@andyzhangx](https://github.com/andyzhangx)) +- The `MountPropagation` feature is unconditionally enabled in v1.13, and can no longer be disabled. ([#68230](https://github.com/kubernetes/kubernetes/pull/68230), [@bertinatto](https://github.com/bertinatto)) + +### SIG Windows + +- `kubelet --system-reserved` and `--kube-reserved` are supported now on Windows nodes ([#69960](https://github.com/kubernetes/kubernetes/pull/69960), [@feiskyer](https://github.com/feiskyer)) +- Windows runtime endpoints is now switched to `npipe:////./pipe/dockershim` from `tcp://localhost:3735`. ([#69516](https://github.com/kubernetes/kubernetes/pull/69516), [@feiskyer](https://github.com/feiskyer)) +- Fixed service issues with named targetPort for Windows ([#70076](https://github.com/kubernetes/kubernetes/pull/70076), [@feiskyer](https://github.com/feiskyer)) +- Handle Windows named pipes in host mounts. ([#69484](https://github.com/kubernetes/kubernetes/pull/69484), [@ddebroy](https://github.com/ddebroy)) +- Fixed inconsistency in windows kernel proxy when updating HNS policy. ([#68923](https://github.com/kubernetes/kubernetes/pull/68923), [@delulu](https://github.com/delulu)) + +## External Dependencies + +- Default etcd server is unchanged at v3.2.24 since Kubernetes 1.12. ([#68318](https://github.com/kubernetes/kubernetes/pull/68318)) +- The list of validated docker versions remain unchanged at 1.11.1, 1.12.1, 1.13.1, 17.03, 17.06, 17.09, 18.06 since Kubernetes 1.12. ([#68495](https://github.com/kubernetes/kubernetes/pull/68495)) +- The default Go version was updated to 1.11.2. ([#70665](https://github.com/kubernetes/kubernetes/pull/70665)) +- The minimum supported Go version was updated to 1.11.2 ([#69386](https://github.com/kubernetes/kubernetes/pull/69386)) +- CNI is unchanged at v0.6.0 since Kubernetes 1.10 ([#51250](https://github.com/kubernetes/kubernetes/pull/51250)) +- CSI is updated to 1.0.0. Pre-1.0.0 API support is now deprecated. ([#71020](https://github.com/kubernetes/kubernetes/pull/71020)]) +- The dashboard add-on has been updated to v1.10.0. ([#68450](https://github.com/kubernetes/kubernetes/pull/68450)) +- Heapster remains at v1.6.0-beta, but is now retired in Kubernetes 1.13 ([#67074](https://github.com/kubernetes/kubernetes/pull/67074)) +- Cluster Autoscaler has been upgraded to v1.13.0 ([#71513](https://github.com/kubernetes/kubernetes/pull/71513)) +- kube-dns is unchanged at v1.14.13 since Kubernetes 1.12 ([#68900](https://github.com/kubernetes/kubernetes/pull/68900)) +- Influxdb is unchanged at v1.3.3 since Kubernetes 1.10 ([#53319](https://github.com/kubernetes/kubernetes/pull/53319)) +- Grafana is unchanged at v4.4.3 since Kubernetes 1.10 ([#53319](https://github.com/kubernetes/kubernetes/pull/53319)) +- Kibana has been upgraded to v6.3.2. ([#67582](https://github.com/kubernetes/kubernetes/pull/67582)) +- CAdvisor has been updated to v0.32.0 ([#70964](https://github.com/kubernetes/kubernetes/pull/70964)) +- fluentd-gcp-scaler has been updated to v0.5.0 ([#68837](https://github.com/kubernetes/kubernetes/pull/68837)) +- Fluentd in fluentd-elasticsearch is unchanged at v1.2.4 since Kubernetes 1.11 ([#67434](https://github.com/kubernetes/kubernetes/pull/67434)) +- fluentd-elasticsearch has been updated to v2.2.1 ([#68012](https://github.com/kubernetes/kubernetes/pull/68012)) +- The fluent-plugin-kubernetes_metadata_filter plugin in fluentd-elasticsearch is unchanged at 2.0.0 since Kubernetes 1.12 ([#67544](https://github.com/kubernetes/kubernetes/pull/67544)) +- fluentd-gcp has been updated to v3.2.0 ([#70954](https://github.com/kubernetes/kubernetes/pull/70954)) +- OIDC authentication is unchanged at coreos/go-oidc v2 since Kubernetes 1.10 ([#58544](https://github.com/kubernetes/kubernetes/pull/58544)) +- Calico was updated to v3.3.1 ([#70932](https://github.com/kubernetes/kubernetes/pull/70932)) +- Upgraded crictl on GCE to v1.12.0 ([#69033](https://github.com/kubernetes/kubernetes/pull/69033)) +- CoreDNS has been updated to v1.2.6 ([#70799](https://github.com/kubernetes/kubernetes/pull/70799)) +- event-exporter has been updated to v0.2.3 ([#67691](https://github.com/kubernetes/kubernetes/pull/67691)) +- Es-image remains unchanged at Elasticsearch 6.3.2 since Kubernetes 1.12 ([#67484](https://github.com/kubernetes/kubernetes/pull/67484)) +- metrics-server remains unchanged at v0.3.1 since Kubernetes 1.12 ([#68746](https://github.com/kubernetes/kubernetes/pull/68746)) +- GLBC remains unchanged at v1.2.3 since Kubernetes 1.12 ([#66793](https://github.com/kubernetes/kubernetes/pull/66793)) +- Ingress-gce remains unchanged at v1.2.3 since Kubernetes 1.12 ([#66793](https://github.com/kubernetes/kubernetes/pull/66793)) +- ip-masq-agen remains unchanged at v2.1.1 since Kubernetes 1.12 ([#67916](https://github.com/kubernetes/kubernetes/pull/67916)) + +# v1.13.0-rc.2 + +[Documentation](https://docs.k8s.io) + +## Downloads for v1.13.0-rc.2 + + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes.tar.gz) | `12fbaf943ae72711cd93c9955719ec1773a229dbb8f86a44fcda179229beb82add4dc1a54ceb50b9f48fde48e2464ed0cd4b2e57d9689a7ae784cb052beb6751` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-src.tar.gz) | `8e94f0fe73909610e85c201bb1ba4f66fd55ca2b4ded77217a4dfad2874d402cc1cc94203ecc195f909126c186701e5e1e62890ad288895493a1759f88a190d0` + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-client-darwin-386.tar.gz) | `ac555f5d1e6b88fa4de1e06e0a1ebd372582f97c526c938334a8c63fbf17545607efbba9975d1767e147113e551e986d6523f6985ea41236cfbf7949df31f016` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-client-darwin-amd64.tar.gz) | `2eae428a0e4bcb2237343d7ac1e431ccfc1f7037622bb3131ad8d48a3af6f5ed34be899ec1ec32af7eb7d411cb0cda02a2413405479722ab868cdc816726c9df` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-client-linux-386.tar.gz) | `89e671679b4516f184f7fd5ea0fe2a9ab0245fab34447625786bf55841223124527d3aa2ee6fa2474333f37eea4e9a5ba6f3f4dc3698907fd24bedf522f53b40` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-client-linux-amd64.tar.gz) | `61f6513722e9c485300b822d6fc5998927bbffa18862d2d3f177a7c7cc0ee56c51ec169e3c8239e352c022094bb02124ed060d7d5c3cec9b67aae20ffd42f387` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-client-linux-arm.tar.gz) | `ef0e5fd4bf2074dfd3cf54d45307550273695906baca3533a9d23424e7b693d706f6d1d3a09a34e2d1f84d9eddc6b62d96e5190b8c7145919e93f0ae75ec4d06` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-client-linux-arm64.tar.gz) | `d34bb9ce9bfe2a5375fd58920e63b4eef818348719dba460f35838433af57a1a23fa659e53de52c8174fa212c94c4196ac5a02ce02ef714860488c77563b5821` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-client-linux-ppc64le.tar.gz) | `4dc4e4a5e166e63360ba86e1278bbe75212ac7c3f60ba30425a1c5654bf5a9b1164543fdc23d7dfd9d3aea7be38544c8dc535459e96c062db631e58c5c628762` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-client-linux-s390x.tar.gz) | `d27675f4753469cd5e31faed13a1ea9654c25d38b0d96c1340215fd231050ffc66dc40c5103f8377339bacf00f1c99d386fe9c21fc68c5a21c10667f773d9d4b` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-client-windows-386.tar.gz) | `9d6e6de2d4a55eaeebd7fa6b861548e0768381d50838430722b56636428a3417b8f2bbc953bc365294a857d8f5b51d90807e5eafe874f37d9b726f48b5d04197` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-client-windows-amd64.tar.gz) | `30b2da5c015ef88b9efcf90bffe0498d367df7c126b65f2e878af263c5d62b8c93792dbf20511d0ff034c7a9e2c3fc93931860e1254ed158eddec34f407b9005` + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-server-linux-amd64.tar.gz) | `8180f2b788249fe65f7f1d3ee431ac758ede29a6349db312afbee080ff2c24586fc468f11a9cbcb8d22842739974f29e10793778f5fd5c55d10129e97a1efce3` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-server-linux-arm.tar.gz) | `e9165284a0b82a9ab88dad05f43bfe1bebecad3bb1c7118475c3426e0b6f9f91d340e1e6223d81df9337ab4cc9a96708443c025030127acf88437f0c327b750b` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-server-linux-arm64.tar.gz) | `03797c021ebed3b08835e72eed405c57aaacce972bbbbf88bf49310efbf8c7242f2f223d73b5d2ed4c21e5196e6e5fb7b2b811f08607db6dbe98f869bf28bedb` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-server-linux-ppc64le.tar.gz) | `ceb49af22e3b518f3ba27c1e7de28e577e2735175e84a6d203f1f8766eceaa7c0424746ff71498d7847e98f538af5663b16cc306cb0adbb006d5d869766dfb9b` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-server-linux-s390x.tar.gz) | `bee4752e8a52e217ae1ffcfbc263453c724de684b4d463d5ddb24a3a30a67fc8f78e6c0a8154c6b6581d17f1e168903bc18d0e56f02fce5933f673bb4c74a8cf` + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-node-linux-amd64.tar.gz) | `b368989bbb8ab4d29b51d5d4d71d073b0ceb39614c944859dcd14c3303c31475850f7012deaa8d5ba9c17edd728bce536fbd523ae7defc74a30f0878f05497bf` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-node-linux-arm.tar.gz) | `404b7b74a1e0d0fed9088a7e9461e02cfd9a6992c554baa125b7a361a6baa03d1e4622fbc4ec51836f00a7ac4f90167f345307678527f5781e06acdf526b9a45` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-node-linux-arm64.tar.gz) | `fa531b1675a778c572a2175fb1bed00e78dc589f638f2096b3b5c9d3d691a5668787a43d69898678abd70c7b949e05cfebfb0783c0144a66bdff61fed6094582` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-node-linux-ppc64le.tar.gz) | `a7ecc1f63e632c1b4f9b312babd6882ec966420bf4f8346edf80495fcf860d912729072c79d23cc071a07239783409b02c1f4a716a24e2597f2b490c9b3bb5b3` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-node-linux-s390x.tar.gz) | `a7171ed95de943a0ac5a32da4458e8d4366eb1fadbe426bebc371d2bb6536636b14db9d2cd03952258b3cb1b99fdca2db07947b028cc6c7bb92f4281ba6f62f2` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-node-windows-amd64.tar.gz) | `8a3a71d142b99fb200c4c1c9c0fa4dc6a3b64a0b506dc37dc3d832a94a791619a09ae4b2c6f73802f6833234570633974547f7700c8bb6de71d91ba2c4ac4b54` + +## Changelog since v1.13.0-rc.1 + +### Other notable changes + +* Update Cluster Autoscaler version to 1.13.0. Release notes: https://github.com/kubernetes/autoscaler/releases/tag/cluster-autoscaler-1.13.0 ([#71513](https://github.com/kubernetes/kubernetes/pull/71513), [@losipiuk](https://github.com/losipiuk)) +* fix detach azure disk issue due to dirty cache ([#71495](https://github.com/kubernetes/kubernetes/pull/71495), [@andyzhangx](https://github.com/andyzhangx)) + + + +# v1.13.0-rc.1 + +[Documentation](https://docs.k8s.io) + +## Downloads for v1.13.0-rc.1 + + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.13.0-rc.1/kubernetes.tar.gz) | `1c047e4edcf3553a568679e6e5083988b06df9d938f299a9193c72ad96a9c439a1f47f98b86f75d94746e8c1ae363b7a3de3c29dbdf7585b5e5e67b95f309d4a` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.13.0-rc.1/kubernetes-src.tar.gz) | `d2fd47c38abd29a2037b9e2a3a958ec250e2c6ae77532f6e935a6422bd626485fd720932b18fe2fdfcc7b17c6014a9da08cd9e6f9272f19f666ec52ffc02b564` + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.13.0-rc.1/kubernetes-client-darwin-386.tar.gz) | `44d0733359be5036953775e12fc1723e4c64452a24a8c3b522c8a624e0a132cf61483a120cafebe1370939b38ddf1809969dfc0daf0c087ce8a888aa98f2fa6f` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.13.0-rc.1/kubernetes-client-darwin-amd64.tar.gz) | `2acd37ed234271b0ff9c30273261e4b127309a1bc91a006b7a07e1a948703fa550699cd7f44dceb4e7cc6be139f80785853ce4dedb3c3d3f0df85598d0488d56` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.13.0-rc.1/kubernetes-client-linux-386.tar.gz) | `5fe07ea2f776086df0e9447b7e6b0863c5b3af71f5aff8e302087e242d78613278023a169f211be96feab5109d801c9e4f427a911221d039e4d9cadec3086ebf` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.13.0-rc.1/kubernetes-client-linux-amd64.tar.gz) | `7541d5850d74156862e5fe00817bd954d2b49b2c0cf15abe5cde34406928b8ca34b6907eea51e79e005156964ea1269102f2663e667ccbb4223ea12edfc97c20` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.13.0-rc.1/kubernetes-client-linux-arm.tar.gz) | `122121d3e469b6e33cc3fd910b32a5a94b9d3479f0367c54fbc4e7f13df7b097c061b0624b36c0e59f9a35dda7d021f04d400506e6f40eff657672ee53b91a69` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.13.0-rc.1/kubernetes-client-linux-arm64.tar.gz) | `5e3d415db4239f27461c4ea404903cfc762084d5c1e84f9ed8bc0325d7fa845ac540a279e3bd67ac80d00fcad4860398166f55f76ba22c1060e0bc1c867b2464` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.13.0-rc.1/kubernetes-client-linux-ppc64le.tar.gz) | `8651f4161569913b616695bdd1a41c4b177cbfb4773fbca649b3e97957f6c5f46f4fa84bfa92ba24abc34b90cc9543d3c0707962d28d701ef784c764ef49f407` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.13.0-rc.1/kubernetes-client-linux-s390x.tar.gz) | `920b81f6bbc7e7d4fa2f9c61fbc6f529621f2f134dbbb0f407866ffd0ec47791484187c609cca3b615034a5393869ae8f156a7bd0001d0ef59f195d8aab7229d` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.13.0-rc.1/kubernetes-client-windows-386.tar.gz) | `0d49277cb7c36e5538d4c1c0fd6e6a69da7cd73c226f5869b29fad1e5b9bf434ffc8423b72d807df67b6674541a370a5235881bccff1b9f390f175064020453a` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.13.0-rc.1/kubernetes-client-windows-amd64.tar.gz) | `34ae587e2d439f925d1e324d2bbff3a751bb73b18e98b13c93e5742e7e16c00b4d9956b91721b4e06a00087dc00862248e6de167683a6bd1ccd14b1a6dcef753` + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.13.0-rc.1/kubernetes-server-linux-amd64.tar.gz) | `7030ef7463bef0871e524a5233d23c5f8aee18ac92e96555910ddc7a891772d451dac08b583f391132c654eaaea788f3bf29fb510a2f6f3b24b0ac79ca669f77` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.13.0-rc.1/kubernetes-server-linux-arm.tar.gz) | `ccd1f413ad357581a904d1ff67f3e376be7882bd72efb13657f8aa1191c4481691743016a1385b777b5e62fe9854c1695ffa847c3b4534459317d0d5b5baaf76` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.13.0-rc.1/kubernetes-server-linux-arm64.tar.gz) | `ff589f5b6c56713818edda8ae9b39b17dfbf34e881c09736f722de5d70e6dd1508b5fefc60f40547dfd4fddb32ddce2a4470e1d240b315db5840a0fba957d553` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.13.0-rc.1/kubernetes-server-linux-ppc64le.tar.gz) | `f748985751bf403bc7b1f9160ce937cd2915552b27c3c79764a66789dc39ef9e3069e6f25d21e15bfaf81c535e3ee3b195eb636965456467ee56c0167c526129` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.13.0-rc.1/kubernetes-server-linux-s390x.tar.gz) | `b3b0075948d72784defe94073dff251b79083aa46b4f29419026757665cac554356486948a41b59293904238651a733747a0e271f43a72228c6c83cf8f5634a7` + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.13.0-rc.1/kubernetes-node-linux-amd64.tar.gz) | `01907a104c043607985053571183b7bdccf655f847d1dd9d8991cd2c464ddf9953f25cacb255be3067c1b65f6168fe92a90162636e6c6b6ec33340926d537959` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.13.0-rc.1/kubernetes-node-linux-arm.tar.gz) | `dbf1801c456312698253767dd36b186fb4e503a03454cd16bba68a1ede9d29e14939591eb39516129bc8c88e64fba2a287ae6447b7e4ff4afcecd1fb50713403` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.13.0-rc.1/kubernetes-node-linux-arm64.tar.gz) | `15f3259370f1419fcc372a28faa9a3caae5f2c89ee76286c14ea62d612fdca94ac7358a3cd76877736389080d28ba65237fc0aeffed2050bac4e342877351e51` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.13.0-rc.1/kubernetes-node-linux-ppc64le.tar.gz) | `00dc7f5bd40d045baeb72d5dcfb302b8566aacc23cd7de1b877724e1160ee1608b3b121358d2c3b081d06deb1a8107d0437d3d1b20df88e4adcfd5f4a05964ee` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.13.0-rc.1/kubernetes-node-linux-s390x.tar.gz) | `2b80e4dffa0b8bdc0305d1263c06320918541f3a7b6519123752b89be335a2c48965b7d16d814ffc02e304e9cf932db0c780fc316c99a080bebd880d55e3c939` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.13.0-rc.1/kubernetes-node-windows-amd64.tar.gz) | `600b442a1665e39621fce03ad07b162e2353cc8bc982cad849dab7e1c2db34bde675ef12a4907a75f2ba82e62ae3126e189b3d69db7fcab71548bf6940351dda` + +## Changelog since v1.13.0-beta.2 + +### Other notable changes + +* CVE-2018-1002105: Fix critical security issue in kube-apiserver upgrade request proxy handler ([#71411](https://github.com/kubernetes/kubernetes/issues/71411), [@liggitt](https://github.com/liggitt)) +* Update Cluster Autoscaler version to 1.13.0-rc.2. Release notes: https://github.com/kubernetes/autoscaler/releases/tag/cluster-autoscaler-1.13.0-rc.2 ([#71452](https://github.com/kubernetes/kubernetes/pull/71452), [@losipiuk](https://github.com/losipiuk)) +* Upgrade Stackdriver Logging Agent addon image to 0.6-1.6.0-1 to use Fluentd v1.2. This provides nanoseconds timestamp granularity for logs. ([#70954](https://github.com/kubernetes/kubernetes/pull/70954), [@qingling128](https://github.com/qingling128)) +* fixes a runtime error occuring when sorting the output of `kubectl get` with empty results ([#70740](https://github.com/kubernetes/kubernetes/pull/70740), [@mfpierre](https://github.com/mfpierre)) +* fix azure disk attach/detach failed forever issue ([#71377](https://github.com/kubernetes/kubernetes/pull/71377), [@andyzhangx](https://github.com/andyzhangx)) +* Do not detach volume if mount in progress ([#71145](https://github.com/kubernetes/kubernetes/pull/71145), [@gnufied](https://github.com/gnufied)) + + + +# v1.13.0-beta.2 + +[Documentation](https://docs.k8s.io) + +## Downloads for v1.13.0-beta.2 + + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.13.0-beta.2/kubernetes.tar.gz) | `e8607473e2b3946a3655fa895c2b7dee74818b4c2701047fee5343ab6b2f2aa3d97b19b11c7e7aeaca322a95bf99cbb5a7dafca187922fd40eaf24daaaf3bc8d` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.13.0-beta.2/kubernetes-src.tar.gz) | `6ca15ad729a82b41587e1dbbd4e9ad5447e202e8e7ee8c01c411090031ee3feb83f0cc65e211e8634a01e7c52d5f1f7b47cd2ac601708542227853f312401e8f` + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.13.0-beta.2/kubernetes-client-darwin-386.tar.gz) | `5727218280ea7c68350aa5cf04e3d3c346f97d462e3f60f5196e27358f71841e19523b277a5b8fe9cea4b8fa323c54a820ae1956937922bcb24524612435e699` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.13.0-beta.2/kubernetes-client-darwin-amd64.tar.gz) | `3e3975a41da08135dc654a40acb86ce862b1f56a9361e0c38c9c99c5b5bcad970f2271ae9a17e03c3d6e13ed03176e5e80313b39a8a02680a3b6d806e33f3a1b` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.13.0-beta.2/kubernetes-client-linux-386.tar.gz) | `26cfa99fbe09b20ebe3d2aebb4d08f0f9f2661d5533b94daf6c8354701b1e4ddb8981c10323073c0d06e52eeb0f68839726b684e4e2222b1be7b940ff6017c72` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.13.0-beta.2/kubernetes-client-linux-amd64.tar.gz) | `42204953b02af81bb5f695c957aca9fa382609447ada5e3a9701da3e8bbd54923084e0b28dd5be455f39ec0dd5c4bf4e81704542d1ce64d7292c17f0a9b04a32` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.13.0-beta.2/kubernetes-client-linux-arm.tar.gz) | `c680c94699b0b319b654a4c1c0a9b7fc387c44fb22744f30049142b17c3fabd3ba5358904cf8d5ccb077d0fb96d0360d222616980a13f91fc4a64ba2afce35b8` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.13.0-beta.2/kubernetes-client-linux-arm64.tar.gz) | `aa997b3428979ba2652fd251c4c5ece87043472ebe2ee15d8a179e69ddbefd47e8030e9392c4f6659b8207fcfb45b2effb69d9736af7c6c82f4217c5e3ac89e1` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.13.0-beta.2/kubernetes-client-linux-ppc64le.tar.gz) | `684dfc462d84d3902e322535997e57f7874003ab17c41508c057bc7c6220062cf57d0486086d28940d9b4c0e8b5ebead5d4a64ad6e23895ecd2f8841844a8527` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.13.0-beta.2/kubernetes-client-linux-s390x.tar.gz) | `ff98b3a23dfe436a12843eb388be9568cbc29c9328648a1d166518aac40841bd8d855916918259cd92a0cc465e376ab1cb4c7b2b92c1eee454a76495772dc7e1` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.13.0-beta.2/kubernetes-client-windows-386.tar.gz) | `6897a0f59fb409526dae9c86680702f3d2a1dc68d145504ed2e98b05d8f1dcc9b6a977c1af17277775b64501c86d5392e2f052aded8aec0c0640d6e78f609b87` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.13.0-beta.2/kubernetes-client-windows-amd64.tar.gz) | `6ed67eecb2b79ace8d428cbd4d07ef7d52ba4e5b3b44eb59d46aff99a7a862f158573b4c2678cbdd31ba060a0acd695fa2d8a29ad0c4e22516622c23d017c5cb` + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.13.0-beta.2/kubernetes-server-linux-amd64.tar.gz) | `351292b217c1c49b5c0241da11b4be0929a5d1645bec7dd05051930df8a70090b130d3ceef2482657db16dd6e4f71013075bcd727e741b497143bc6db67c134a` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.13.0-beta.2/kubernetes-server-linux-arm.tar.gz) | `88f166a7b5a3f9d9c19a5b911adb6e8e4cac1a3323b83d681f13aaf7bb285b0d016b147b4168c886efeccb3d9052c7512c1f61f7a0f59e2ba324e2827f802712` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.13.0-beta.2/kubernetes-server-linux-arm64.tar.gz) | `fb4868a939eca18de17e0b606d1ab127712e277e01c02ffa96138a53973cd583bfc28cf9c2967906896740665fdb02ed53f01ef4341cad9718a8770779d99431` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.13.0-beta.2/kubernetes-server-linux-ppc64le.tar.gz) | `47a4e8e96c1e8a8cc37eabd19194b9d174fa93c3feaf1384895f89c5c6836511eb9f4ff3c91dd84c05398b7d0ce2d49fd5b43ddb518b7b9ed706f224a48b18fa` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.13.0-beta.2/kubernetes-server-linux-s390x.tar.gz) | `4e0823d1da55a71f001fcb07511a7b3416641ea93bfbd56b1e1e435c0a78bafbcecc873ba43808985b66b01464cbf9b60e9ad057ec78fea5080de8a5f7684bab` + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.13.0-beta.2/kubernetes-node-linux-amd64.tar.gz) | `e21964063b80f52e387cd35826f3081ad0a3b62608d182e008b8b76f572442905e4b0625839d3ff28a353f1d686f8dbd15104e6b27126e3c6a579704b2106154` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.13.0-beta.2/kubernetes-node-linux-arm.tar.gz) | `cb665911af59a1cf86e5d66a4cdc134dc412e9e479dd89fa0bbbaeb8324eb87d090ffb0985e31bb12b5e063bfe8c045ac797931cfb856287b05f63b53b26a524` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.13.0-beta.2/kubernetes-node-linux-arm64.tar.gz) | `c172126829aea38e2238af6b62035abad6ed08d041175b0bf99792b7c608a0b27dd7f80b5ad301843cbdfee7ed2825aee9ea52d69752759a79e1148529ad1999` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.13.0-beta.2/kubernetes-node-linux-ppc64le.tar.gz) | `0367940078ea9b4d46778b8406840fd2925f612304b5fa5b675fc07d5457bea524ebaf0378691af27f97d0c0fe39fffc6ad75f6db10139f377e52d0d3888252a` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.13.0-beta.2/kubernetes-node-linux-s390x.tar.gz) | `74382ed862ae099b91ce6056b85b7ee4f075fbdb4e737a8448c92e20fe3a0717047a138c23e13b0a8bda3e457f4299f412ca31768e8c87e57c9faf95b7f9adda` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.13.0-beta.2/kubernetes-node-windows-amd64.tar.gz) | `9164c4eae920c727965caae046e1b2daabf4822e2dee2260697b22e5208a0d8c6e7ce152a5df7852e8203d4771e6dc6bf9f63159324facb1ab81ea8f212b55fb` + +## Changelog since v1.13.0-beta.1 + +### Other notable changes + +* Fix missing flags in kube-apiserver --help. ([#70204](https://github.com/kubernetes/kubernetes/pull/70204), [@imjching](https://github.com/imjching)) +* kubeadm init correctly uses --node-name and --cri-socket when --config option is also used ([#71323](https://github.com/kubernetes/kubernetes/pull/71323), [@bart0sh](https://github.com/bart0sh)) +* API server flag `--experimental-encryption-provider-config` was renamed to `--encryption-provider-config`. The old flag is accepted with a warning but will be removed in 1.14. ([#71206](https://github.com/kubernetes/kubernetes/pull/71206), [@stlaz](https://github.com/stlaz)) +* Fix missing flags in *-controller-manager --help. ([#71298](https://github.com/kubernetes/kubernetes/pull/71298), [@stewart-yu](https://github.com/stewart-yu)) +* Clear pod binding cache on bind error to make sure stale pod binding cache will not be used. ([#71212](https://github.com/kubernetes/kubernetes/pull/71212), [@cofyc](https://github.com/cofyc)) +* kubeadm: always pass spec.nodeName as --hostname-override for kube-proxy ([#71283](https://github.com/kubernetes/kubernetes/pull/71283), [@Klaven](https://github.com/Klaven)) +* kubeadm join correctly uses --node-name and --cri-socket when --config option is also used ([#71270](https://github.com/kubernetes/kubernetes/pull/71270), [@bart0sh](https://github.com/bart0sh)) +* apiserver can be configured to reject requests that cannot be audit-logged. ([#65763](https://github.com/kubernetes/kubernetes/pull/65763), [@x13n](https://github.com/x13n)) +* Kubelet Device Plugin Registration directory changed from from `{kubelet_root_dir}/plugins/` to `{kubelet_root_dir}/plugins_registry/`. Any drivers (CSI or device plugin) that were using the old path must be updated to work with this version. ([#70494](https://github.com/kubernetes/kubernetes/pull/70494), [@RenaudWasTaken](https://github.com/RenaudWasTaken)) +* When the BoundServiceAccountTokenVolumes Alpha feature is enabled, ServiceAccount volumes now use a projected volume source and their names have the prefix "kube-api-access". ([#69848](https://github.com/kubernetes/kubernetes/pull/69848), [@mikedanese](https://github.com/mikedanese)) + + + # v1.13.0-beta.1 [Documentation](https://docs.k8s.io) @@ -101,7 +945,6 @@ filename | sha512 hash * StatefulSet is supported in `kubectl autoscale` command ([#71103](https://github.com/kubernetes/kubernetes/pull/71103), [@Pingan2017](https://github.com/Pingan2017)) * Report kube-scheduler unhealthy if leader election is deadlocked. ([#71085](https://github.com/kubernetes/kubernetes/pull/71085), [@bsalamat](https://github.com/bsalamat)) * apiserver: fixes handling and logging of panics in REST handlers ([#71076](https://github.com/kubernetes/kubernetes/pull/71076), [@liggitt](https://github.com/liggitt)) -* kube-apiserver now serves OpenAPI specs for registered CRDs with defined validation schemata. Kubectl will validate client-side using those. ([#67205](https://github.com/kubernetes/kubernetes/pull/67205), [@roycaihw](https://github.com/roycaihw)) * kubelets are no longer allowed to delete their own Node API object. Prior to 1.11, in rare circumstances related to cloudprovider node ID changes, kubelets would attempt to delete/recreate their Node object at startup. Kubelets older than 1.11 are not supported running against a v1.13+ API server. If an unsupported legacy kubelet encounters this situation, a cluster admin can remove the Node object: ([#71021](https://github.com/kubernetes/kubernetes/pull/71021), [@liggitt](https://github.com/liggitt)) * `kubectl delete node/` * or grant self-deletion permission explicitly: diff --git a/CHANGELOG-1.4.md b/CHANGELOG-1.4.md index ada428ec673..e5f0b08e712 100644 --- a/CHANGELOG-1.4.md +++ b/CHANGELOG-1.4.md @@ -753,18 +753,18 @@ binary | sha256 hash This is the first release tracked via the use of the [kubernetes/features](https://github.com/kubernetes/features) issues repo. Each Feature issue is owned by a Special Interest Group from [kubernetes/community](https://github.com/kubernetes/community) - **API Machinery** - - [alpha] Generate audit logs for every request user performs against secured API server endpoint. ([docs](http://kubernetes.io/docs/admin/audit/)) ([kubernetes/features#22](https://github.com/kubernetes/features/issues/22)) - - [beta] `kube-apiserver` now publishes a swagger 2.0 spec in addition to a swagger 1.2 spec ([kubernetes/features#53](https://github.com/kubernetes/features/issues/53)) + - [alpha] Generate audit logs for every request user performs against secured API server endpoint. ([docs](http://kubernetes.io/docs/admin/audit/)) ([kubernetes/features#22](https://github.com/kubernetes/enhancements/issues/22)) + - [beta] `kube-apiserver` now publishes a swagger 2.0 spec in addition to a swagger 1.2 spec ([kubernetes/features#53](https://github.com/kubernetes/enhancements/issues/53)) - [beta] Server-side garbage collection is enabled by default. See [user-guide](http://kubernetes.io/docs/user-guide/garbage-collection/) - **Apps** - - [alpha] Introducing 'ScheduledJobs', which allow running time based Jobs, namely once at a specified time or repeatedly at specified point in time. ([docs](http://kubernetes.io/docs/user-guide/scheduled-jobs/)) ([kubernetes/features#19](https://github.com/kubernetes/features/issues/19)) + - [alpha] Introducing 'ScheduledJobs', which allow running time based Jobs, namely once at a specified time or repeatedly at specified point in time. ([docs](http://kubernetes.io/docs/user-guide/scheduled-jobs/)) ([kubernetes/features#19](https://github.com/kubernetes/enhancements/issues/19)) - **Auth** - - [alpha] Container Image Policy allows an access controller to determine whether a pod may be scheduled based on a policy ([docs](http://kubernetes.io/docs/admin/admission-controllers/#imagepolicywebhook)) ([kubernetes/features#59](https://github.com/kubernetes/features/issues/59)) - - [alpha] Access Review APIs expose authorization engine to external inquiries for delegation, inspection, and debugging ([docs](http://kubernetes.io/docs/admin/authorization/)) ([kubernetes/features#37](https://github.com/kubernetes/features/issues/37)) + - [alpha] Container Image Policy allows an access controller to determine whether a pod may be scheduled based on a policy ([docs](http://kubernetes.io/docs/admin/admission-controllers/#imagepolicywebhook)) ([kubernetes/features#59](https://github.com/kubernetes/enhancements/issues/59)) + - [alpha] Access Review APIs expose authorization engine to external inquiries for delegation, inspection, and debugging ([docs](http://kubernetes.io/docs/admin/authorization/)) ([kubernetes/features#37](https://github.com/kubernetes/enhancements/issues/37)) - **Cluster Lifecycle** - - [alpha] Ensure critical cluster infrastructure pods (Heapster, DNS, etc.) can schedule by evicting regular pods when necessary to make the critical pods schedule. ([docs](http://kubernetes.io/docs/admin/rescheduler/#guaranteed-scheduling-of-critical-add-on-pods)) ([kubernetes/features#62](https://github.com/kubernetes/features/issues/62)) - - [alpha] Simplifies bootstrapping of TLS secured communication between the API server and kubelet. ([docs](http://kubernetes.io/docs/admin/master-node-communication/#kubelet-tls-bootstrap)) ([kubernetes/features#43](https://github.com/kubernetes/features/issues/43)) - - [alpha] The `kubeadm` tool makes it much easier to bootstrap Kubernetes. ([docs](http://kubernetes.io/docs/getting-started-guides/kubeadm/)) ([kubernetes/features#11](https://github.com/kubernetes/features/issues/11)) + - [alpha] Ensure critical cluster infrastructure pods (Heapster, DNS, etc.) can schedule by evicting regular pods when necessary to make the critical pods schedule. ([docs](http://kubernetes.io/docs/admin/rescheduler/#guaranteed-scheduling-of-critical-add-on-pods)) ([kubernetes/features#62](https://github.com/kubernetes/enhancements/issues/62)) + - [alpha] Simplifies bootstrapping of TLS secured communication between the API server and kubelet. ([docs](http://kubernetes.io/docs/admin/master-node-communication/#kubelet-tls-bootstrap)) ([kubernetes/features#43](https://github.com/kubernetes/enhancements/issues/43)) + - [alpha] The `kubeadm` tool makes it much easier to bootstrap Kubernetes. ([docs](http://kubernetes.io/docs/getting-started-guides/kubeadm/)) ([kubernetes/features#11](https://github.com/kubernetes/enhancements/issues/11)) - **Federation** - [alpha] Creating a `Federated Ingress` is as simple as submitting an `Ingress` creation request to the Federation API Server. The @@ -774,41 +774,41 @@ This is the first release tracked via the use of the [kubernetes/features](https GCE L7 LoadBalancer is the first supported implementation, and is available in this release. ([docs](http://kubernetes.io/docs/user-guide/federation/federated-ingress.md)) - ([kubernetes/features#82](https://github.com/kubernetes/features/issues/82)) + ([kubernetes/features#82](https://github.com/kubernetes/enhancements/issues/82)) - [beta] `Federated Replica Sets` create and maintain matching `Replica Set`s in some or all clusters in a federation, with the desired replica count distributed equally or according to specified per-cluster weights. ([docs](http://kubernetes.io/docs/user-guide/federation/federated-replicasets.md)) - ([kubernetes/features#46](https://github.com/kubernetes/features/issues/46)) + ([kubernetes/features#46](https://github.com/kubernetes/enhancements/issues/46)) - [beta] `Federated Secrets` are created and kept consistent across all clusters in a federation. ([docs](http://kubernetes.io/docs/user-guide/federation/federated-secrets.md)) - ([kubernetes/features#68](https://github.com/kubernetes/features/issues/68)) + ([kubernetes/features#68](https://github.com/kubernetes/enhancements/issues/68)) - [beta] Federation API server gained support for events and many federation controllers now report important events. ([docs](http://kubernetes.io/docs/user-guide/federation/events)) - ([kubernetes/features#70](https://github.com/kubernetes/features/issues/70)) + ([kubernetes/features#70](https://github.com/kubernetes/enhancements/issues/70)) - [alpha] Creating a `Federated Namespace` causes matching - `Namespace`s to be created and maintained in all the clusters registered with that federation. ([docs](http://kubernetes.io/docs/user-guide/federation/federated-namespaces.md)) ([kubernetes/features#69](https://github.com/kubernetes/features/issues/69)) - - [alpha] ingress has alpha support for a single master multi zone cluster ([docs](http://kubernetes.io/docs/user-guide/ingress.md#failing-across-availability-zones)) ([kubernetes/features#52](https://github.com/kubernetes/features/issues/52)) + `Namespace`s to be created and maintained in all the clusters registered with that federation. ([docs](http://kubernetes.io/docs/user-guide/federation/federated-namespaces.md)) ([kubernetes/features#69](https://github.com/kubernetes/enhancements/issues/69)) + - [alpha] ingress has alpha support for a single master multi zone cluster ([docs](http://kubernetes.io/docs/user-guide/ingress.md#failing-across-availability-zones)) ([kubernetes/features#52](https://github.com/kubernetes/enhancements/issues/52)) - **Network** - - [alpha] Service LB now has alpha support for preserving client source IP ([docs](http://kubernetes.io/docs/user-guide/load-balancer/)) ([kubernetes/features#27](https://github.com/kubernetes/features/issues/27)) + - [alpha] Service LB now has alpha support for preserving client source IP ([docs](http://kubernetes.io/docs/user-guide/load-balancer/)) ([kubernetes/features#27](https://github.com/kubernetes/enhancements/issues/27)) - **Node** - - [alpha] Publish node performance dashboard at http://node-perf-dash.k8s.io/#/builds ([docs](https://github.com/kubernetes/contrib/blob/master/node-perf-dash/README.md)) ([kubernetes/features#83](https://github.com/kubernetes/features/issues/83)) - - [alpha] Pods now have alpha support for setting whitelisted, safe sysctls. Unsafe sysctls can be whitelisted on the kubelet. ([docs](http://kubernetes.io/docs/admin/sysctls/)) ([kubernetes/features#34](https://github.com/kubernetes/features/issues/34)) - - [beta] AppArmor profiles can be specified & applied to pod containers ([docs](http://kubernetes.io/docs/admin/apparmor/)) ([kubernetes/features#24](https://github.com/kubernetes/features/issues/24)) - - [beta] Cluster policy to control access and defaults of security related features ([docs](http://kubernetes.io/docs/user-guide/pod-security-policy/)) ([kubernetes/features#5](https://github.com/kubernetes/features/issues/5)) - - [stable] kubelet is able to evict pods when it observes disk pressure ([docs](http://kubernetes.io/docs/admin/out-of-resource/)) ([kubernetes/features#39](https://github.com/kubernetes/features/issues/39)) - - [stable] Automated docker validation results posted to https://k8s-testgrid.appspot.com/docker [kubernetes/features#57](https://github.com/kubernetes/features/issues/57) + - [alpha] Publish node performance dashboard at http://node-perf-dash.k8s.io/#/builds ([docs](https://github.com/kubernetes/contrib/blob/master/node-perf-dash/README.md)) ([kubernetes/features#83](https://github.com/kubernetes/enhancements/issues/83)) + - [alpha] Pods now have alpha support for setting whitelisted, safe sysctls. Unsafe sysctls can be whitelisted on the kubelet. ([docs](http://kubernetes.io/docs/admin/sysctls/)) ([kubernetes/features#34](https://github.com/kubernetes/enhancements/issues/34)) + - [beta] AppArmor profiles can be specified & applied to pod containers ([docs](http://kubernetes.io/docs/admin/apparmor/)) ([kubernetes/features#24](https://github.com/kubernetes/enhancements/issues/24)) + - [beta] Cluster policy to control access and defaults of security related features ([docs](http://kubernetes.io/docs/user-guide/pod-security-policy/)) ([kubernetes/features#5](https://github.com/kubernetes/enhancements/issues/5)) + - [stable] kubelet is able to evict pods when it observes disk pressure ([docs](http://kubernetes.io/docs/admin/out-of-resource/)) ([kubernetes/features#39](https://github.com/kubernetes/enhancements/issues/39)) + - [stable] Automated docker validation results posted to https://k8s-testgrid.appspot.com/docker [kubernetes/features#57](https://github.com/kubernetes/enhancements/issues/57) - **Scheduling** - - [alpha] Allows pods to require or prohibit (or prefer or prefer not) co-scheduling on the same node (or zone or other topology domain) as another set of pods. ([docs](http://kubernetes.io/docs/user-guide/node-selection/) ([kubernetes/features#51](https://github.com/kubernetes/features/issues/51)) + - [alpha] Allows pods to require or prohibit (or prefer or prefer not) co-scheduling on the same node (or zone or other topology domain) as another set of pods. ([docs](http://kubernetes.io/docs/user-guide/node-selection/) ([kubernetes/features#51](https://github.com/kubernetes/enhancements/issues/51)) - **Storage** - - [beta] Persistent Volume provisioning now supports multiple provisioners using StorageClass configuration. ([docs](http://kubernetes.io/docs/user-guide/persistent-volumes/)) ([kubernetes/features#36](https://github.com/kubernetes/features/issues/36)) - - [stable] New volume plugin for the Quobyte Distributed File System ([docs](http://kubernetes.io/docs/user-guide/volumes/#quobyte)) ([kubernetes/features#80](https://github.com/kubernetes/features/issues/80)) - - [stable] New volume plugin for Azure Data Disk ([docs](http://kubernetes.io/docs/user-guide/volumes/#azurediskvolume)) ([kubernetes/features#79](https://github.com/kubernetes/features/issues/79)) + - [beta] Persistent Volume provisioning now supports multiple provisioners using StorageClass configuration. ([docs](http://kubernetes.io/docs/user-guide/persistent-volumes/)) ([kubernetes/features#36](https://github.com/kubernetes/enhancements/issues/36)) + - [stable] New volume plugin for the Quobyte Distributed File System ([docs](http://kubernetes.io/docs/user-guide/volumes/#quobyte)) ([kubernetes/features#80](https://github.com/kubernetes/enhancements/issues/80)) + - [stable] New volume plugin for Azure Data Disk ([docs](http://kubernetes.io/docs/user-guide/volumes/#azurediskvolume)) ([kubernetes/features#79](https://github.com/kubernetes/enhancements/issues/79)) - **UI** - [stable] Kubernetes Dashboard UI - a great looking Kubernetes Dashboard UI with 90% CLI parity for at-a-glance management. [docs](https://github.com/kubernetes/dashboard) - - [stable] `kubectl` no longer applies defaults before sending objects to the server in create and update requests, allowing the server to apply the defaults. ([kubernetes/features#55](https://github.com/kubernetes/features/issues/55)) + - [stable] `kubectl` no longer applies defaults before sending objects to the server in create and update requests, allowing the server to apply the defaults. ([kubernetes/features#55](https://github.com/kubernetes/enhancements/issues/55)) ## Known Issues diff --git a/CHANGELOG-1.5.md b/CHANGELOG-1.5.md index 99fb9576dcd..30b72d095aa 100644 --- a/CHANGELOG-1.5.md +++ b/CHANGELOG-1.5.md @@ -624,45 +624,45 @@ filename | sha256 hash Features for this release were tracked via the use of the [kubernetes/features](https://github.com/kubernetes/features) issues repo. Each Feature issue is owned by a Special Interest Group from [kubernetes/community](https://github.com/kubernetes/community) - **API Machinery** - - [beta] `kube-apiserver` support for the OpenAPI spec is moving from alpha to beta. The first [non-go client](https://github.com/kubernetes-incubator/client-python) is based on it ([kubernetes/features#53](https://github.com/kubernetes/features/issues/53)) + - [beta] `kube-apiserver` support for the OpenAPI spec is moving from alpha to beta. The first [non-go client](https://github.com/kubernetes-incubator/client-python) is based on it ([kubernetes/features#53](https://github.com/kubernetes/enhancements/issues/53)) - **Apps** - - [stable] When replica sets cannot create pods, they will now report detail via the API about the underlying reason ([kubernetes/features#120](https://github.com/kubernetes/features/issues/120)) - - [stable] `kubectl apply` is now able to delete resources you no longer need with `--prune` ([kubernetes/features#128](https://github.com/kubernetes/features/issues/128)) - - [beta] Deployments that cannot make progress in rolling out the newest version will now indicate via the API they are blocked ([docs](http://kubernetes.io/docs/user-guide/deployments/#failed-deployment)) ([kubernetes/features#122](https://github.com/kubernetes/features/issues/122)) - - [beta] StatefulSets allow workloads that require persistent identity or per-instance storage to be created and managed on Kubernetes. ([docs](http://kubernetes.io/docs/concepts/abstractions/controllers/statefulsets/)) ([kubernetes/features#137](https://github.com/kubernetes/features/issues/137)) - - [beta] In order to preserve safety guarantees the cluster no longer force deletes pods on un-responsive nodes and users are now warned if they try to force delete pods via the CLI. ([docs](http://kubernetes.io/docs/tasks/manage-stateful-set/scale-stateful-set/)) ([kubernetes/features#119](https://github.com/kubernetes/features/issues/119)) + - [stable] When replica sets cannot create pods, they will now report detail via the API about the underlying reason ([kubernetes/features#120](https://github.com/kubernetes/enhancements/issues/120)) + - [stable] `kubectl apply` is now able to delete resources you no longer need with `--prune` ([kubernetes/features#128](https://github.com/kubernetes/enhancements/issues/128)) + - [beta] Deployments that cannot make progress in rolling out the newest version will now indicate via the API they are blocked ([docs](http://kubernetes.io/docs/user-guide/deployments/#failed-deployment)) ([kubernetes/features#122](https://github.com/kubernetes/enhancements/issues/122)) + - [beta] StatefulSets allow workloads that require persistent identity or per-instance storage to be created and managed on Kubernetes. ([docs](http://kubernetes.io/docs/concepts/abstractions/controllers/statefulsets/)) ([kubernetes/features#137](https://github.com/kubernetes/enhancements/issues/137)) + - [beta] In order to preserve safety guarantees the cluster no longer force deletes pods on un-responsive nodes and users are now warned if they try to force delete pods via the CLI. ([docs](http://kubernetes.io/docs/tasks/manage-stateful-set/scale-stateful-set/)) ([kubernetes/features#119](https://github.com/kubernetes/enhancements/issues/119)) - **Auth** - - [alpha] Further polishing of the Role-based access control alpha API including a default set of cluster roles. ([docs](http://kubernetes.io/docs/admin/authorization/)) ([kubernetes/features#2](https://github.com/kubernetes/features/issues/2)) - - [beta] Added ability to authenticate/authorize access to the Kubelet API ([docs](http://kubernetes.io/docs/admin/kubelet-authentication-authorization/)) ([kubernetes/features#89](https://github.com/kubernetes/features/issues/89)) + - [alpha] Further polishing of the Role-based access control alpha API including a default set of cluster roles. ([docs](http://kubernetes.io/docs/admin/authorization/)) ([kubernetes/features#2](https://github.com/kubernetes/enhancements/issues/2)) + - [beta] Added ability to authenticate/authorize access to the Kubelet API ([docs](http://kubernetes.io/docs/admin/kubelet-authentication-authorization/)) ([kubernetes/features#89](https://github.com/kubernetes/enhancements/issues/89)) - **AWS** - - [stable] Roles should appear in kubectl get nodes ([kubernetes/features#113](https://github.com/kubernetes/features/issues/113)) + - [stable] Roles should appear in kubectl get nodes ([kubernetes/features#113](https://github.com/kubernetes/enhancements/issues/113)) - **Cluster Lifecycle** - - [alpha] Improved UX and usability for the kubeadm binary that makes it easy to get a new cluster running. ([docs](http://kubernetes.io/docs/getting-started-guides/kubeadm/)) ([changelog](https://github.com/kubernetes/kubeadm/blob/master/CHANGELOG.md)) ([kubernetes/features#11](https://github.com/kubernetes/features/issues/11)) + - [alpha] Improved UX and usability for the kubeadm binary that makes it easy to get a new cluster running. ([docs](http://kubernetes.io/docs/getting-started-guides/kubeadm/)) ([changelog](https://github.com/kubernetes/kubeadm/blob/master/CHANGELOG.md)) ([kubernetes/features#11](https://github.com/kubernetes/enhancements/issues/11)) - **Cluster Ops** - - [alpha] Added ability to create/remove clusters w/highly available (replicated) masters on GCE using kube-up/kube-down scripts. ([docs](http://kubernetes.io/docs/admin/ha-master-gce/)) ([kubernetes/features#48](https://github.com/kubernetes/features/issues/48)) + - [alpha] Added ability to create/remove clusters w/highly available (replicated) masters on GCE using kube-up/kube-down scripts. ([docs](http://kubernetes.io/docs/admin/ha-master-gce/)) ([kubernetes/features#48](https://github.com/kubernetes/enhancements/issues/48)) - **Federation** - - [alpha] Support for ConfigMaps in federation. ([docs](http://kubernetes.io/docs/user-guide/federation/configmap/)) ([kubernetes/features#105](https://github.com/kubernetes/features/issues/105)) - - [alpha] Alpha level support for DaemonSets in federation. ([docs](http://kubernetes.io/docs/user-guide/federation/daemonsets/)) ([kubernetes/features#101](https://github.com/kubernetes/features/issues/101)) - - [alpha] Alpha level support for Deployments in federation. ([docs](http://kubernetes.io/docs/user-guide/federation/deployment/)) ([kubernetes/features#100](https://github.com/kubernetes/features/issues/100)) - - [alpha] Cluster federation: Added support for DeleteOptions.OrphanDependents for federation resources. ([docs](http://kubernetes.io/docs/user-guide/federation/#cascading-deletion)) ([kubernetes/features#99](https://github.com/kubernetes/features/issues/99)) - - [alpha] Introducing `kubefed`, a new command line tool to simplify federation control plane. ([docs](http://kubernetes.io/docs/admin/federation/kubefed/)) ([kubernetes/features#97](https://github.com/kubernetes/features/issues/97)) + - [alpha] Support for ConfigMaps in federation. ([docs](http://kubernetes.io/docs/user-guide/federation/configmap/)) ([kubernetes/features#105](https://github.com/kubernetes/enhancements/issues/105)) + - [alpha] Alpha level support for DaemonSets in federation. ([docs](http://kubernetes.io/docs/user-guide/federation/daemonsets/)) ([kubernetes/features#101](https://github.com/kubernetes/enhancements/issues/101)) + - [alpha] Alpha level support for Deployments in federation. ([docs](http://kubernetes.io/docs/user-guide/federation/deployment/)) ([kubernetes/features#100](https://github.com/kubernetes/enhancements/issues/100)) + - [alpha] Cluster federation: Added support for DeleteOptions.OrphanDependents for federation resources. ([docs](http://kubernetes.io/docs/user-guide/federation/#cascading-deletion)) ([kubernetes/features#99](https://github.com/kubernetes/enhancements/issues/99)) + - [alpha] Introducing `kubefed`, a new command line tool to simplify federation control plane. ([docs](http://kubernetes.io/docs/admin/federation/kubefed/)) ([kubernetes/features#97](https://github.com/kubernetes/enhancements/issues/97)) - **Network** - - [stable] Services can reference another service by DNS name, rather than being hosted in pods ([kubernetes/features#33](https://github.com/kubernetes/features/issues/33)) - - [beta] Opt in source ip preservation for Services with Type NodePort or LoadBalancer ([docs](http://kubernetes.io/docs/tutorials/services/source-ip/)) ([kubernetes/features#27](https://github.com/kubernetes/features/issues/27)) + - [stable] Services can reference another service by DNS name, rather than being hosted in pods ([kubernetes/features#33](https://github.com/kubernetes/enhancements/issues/33)) + - [beta] Opt in source ip preservation for Services with Type NodePort or LoadBalancer ([docs](http://kubernetes.io/docs/tutorials/services/source-ip/)) ([kubernetes/features#27](https://github.com/kubernetes/enhancements/issues/27)) - [stable] Enable DNS Horizontal Autoscaling with beta ConfigMap parameters support ([docs](http://kubernetes.io/docs/tasks/administer-cluster/dns-horizontal-autoscaling/)) - **Node** - - [alpha] Added ability to preserve access to host userns when userns remapping is enabled in container runtime ([kubernetes/features#127](https://github.com/kubernetes/features/issues/127)) - - [alpha] Introducing the v1alpha1 CRI API to allow pluggable container runtimes; an experimental docker-CRI integration is ready for testing and feedback. ([docs](https://github.com/kubernetes/community/blob/master/contributors/devel/container-runtime-interface.md)) ([kubernetes/features#54](https://github.com/kubernetes/features/issues/54)) - - [alpha] Kubelet launches container in a per pod cgroup hierarchy based on quality of service tier ([kubernetes/features#126](https://github.com/kubernetes/features/issues/126)) - - [beta] Kubelet integrates with memcg notification API to detect when a hard eviction threshold is crossed ([kubernetes/features#125](https://github.com/kubernetes/features/issues/125)) - - [beta] Introducing the beta version containerized node conformance test gcr.io/google_containers/node-test:0.2 for users to verify node setup. ([docs](http://kubernetes.io/docs/admin/node-conformance/)) ([kubernetes/features#84](https://github.com/kubernetes/features/issues/84)) + - [alpha] Added ability to preserve access to host userns when userns remapping is enabled in container runtime ([kubernetes/features#127](https://github.com/kubernetes/enhancements/issues/127)) + - [alpha] Introducing the v1alpha1 CRI API to allow pluggable container runtimes; an experimental docker-CRI integration is ready for testing and feedback. ([docs](https://github.com/kubernetes/community/blob/master/contributors/devel/container-runtime-interface.md)) ([kubernetes/features#54](https://github.com/kubernetes/enhancements/issues/54)) + - [alpha] Kubelet launches container in a per pod cgroup hierarchy based on quality of service tier ([kubernetes/features#126](https://github.com/kubernetes/enhancements/issues/126)) + - [beta] Kubelet integrates with memcg notification API to detect when a hard eviction threshold is crossed ([kubernetes/features#125](https://github.com/kubernetes/enhancements/issues/125)) + - [beta] Introducing the beta version containerized node conformance test gcr.io/google_containers/node-test:0.2 for users to verify node setup. ([docs](http://kubernetes.io/docs/admin/node-conformance/)) ([kubernetes/features#84](https://github.com/kubernetes/enhancements/issues/84)) - **Scheduling** - - [alpha] Added support for accounting opaque integer resources. ([docs](http://kubernetes.io/docs/user-guide/compute-resources/#opaque-integer-resources-alpha-feature)) ([kubernetes/features#76](https://github.com/kubernetes/features/issues/76)) - - [beta] PodDisruptionBudget has been promoted to beta, can be used to safely drain nodes while respecting application SLO's ([docs](http://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/)) ([kubernetes/features#85](https://github.com/kubernetes/features/issues/85)) + - [alpha] Added support for accounting opaque integer resources. ([docs](http://kubernetes.io/docs/user-guide/compute-resources/#opaque-integer-resources-alpha-feature)) ([kubernetes/features#76](https://github.com/kubernetes/enhancements/issues/76)) + - [beta] PodDisruptionBudget has been promoted to beta, can be used to safely drain nodes while respecting application SLO's ([docs](http://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/)) ([kubernetes/features#85](https://github.com/kubernetes/enhancements/issues/85)) - **UI** - - [stable] Dashboard UI now shows all user facing objects and their resource usage. ([docs](http://kubernetes.io/docs/user-guide/ui/)) ([kubernetes/features#136](https://github.com/kubernetes/features/issues/136)) + - [stable] Dashboard UI now shows all user facing objects and their resource usage. ([docs](http://kubernetes.io/docs/user-guide/ui/)) ([kubernetes/features#136](https://github.com/kubernetes/enhancements/issues/136)) - **Windows** - - [alpha] Added support for Windows Server 2016 nodes and scheduling Windows Server Containers ([docs](http://kubernetes.io/docs/getting-started-guides/windows/)) ([kubernetes/features#116](https://github.com/kubernetes/features/issues/116)) + - [alpha] Added support for Windows Server 2016 nodes and scheduling Windows Server Containers ([docs](http://kubernetes.io/docs/getting-started-guides/windows/)) ([kubernetes/features#116](https://github.com/kubernetes/enhancements/issues/116)) ## Known Issues diff --git a/CHANGELOG-1.6.md b/CHANGELOG-1.6.md index 7080309563b..a230108938c 100644 --- a/CHANGELOG-1.6.md +++ b/CHANGELOG-1.6.md @@ -1349,7 +1349,7 @@ Features for this release were tracked via the use of the [kubernetes/features]( * **[beta]** `kubefed` has graduated to beta: supports hosting federation on on-prem clusters, automatically configures `kube-dns` in joining clusters and allows passing arguments to federation components. ### Internal Storage Layer -* **[stable]** The internal storage layer for kubernetes cluster state has been updated to use etcd v3 by default. Existing clusters will have to plan for a data migration window. ([docs](https://kubernetes.io/docs/tasks/administer-cluster/upgrade-1-6/))([kubernetes/features#44](https://github.com/kubernetes/features/issues/44)) +* **[stable]** The internal storage layer for kubernetes cluster state has been updated to use etcd v3 by default. Existing clusters will have to plan for a data migration window. ([docs](https://kubernetes.io/docs/tasks/administer-cluster/upgrade-1-6/))([kubernetes/features#44](https://github.com/kubernetes/enhancements/issues/44)) ### kubeadm * **[beta]** Introduces an API for clients to request TLS certificates from the API server. See the [tutorial](https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster). diff --git a/CHANGELOG-1.7.md b/CHANGELOG-1.7.md index 0c713dd36df..faee0bbe8a6 100644 --- a/CHANGELOG-1.7.md +++ b/CHANGELOG-1.7.md @@ -1535,7 +1535,7 @@ Features for this release were tracked via the use of the [kubernetes/features]( * [beta] DaemonSet supports history and rollback. See [Performing a Rollback on a DaemonSet](https://kubernetes.io/docs/tasks/manage-daemon/rollback-daemon-set/). #### Deployments -* [beta] Deployments uses a hashing collision avoidance mechanism that ensures new rollouts will not block on hashing collisions anymore. ([kubernetes/features#287](https://github.com/kubernetes/features/issues/287)) +* [beta] Deployments uses a hashing collision avoidance mechanism that ensures new rollouts will not block on hashing collisions anymore. ([kubernetes/features#287](https://github.com/kubernetes/enhancements/issues/287)) #### PodDisruptionBudget * [beta] PodDisruptionBudget has a new field MaxUnavailable, which allows users to specify the maximum number of disruptions that can be tolerated during eviction. For more information, see [Pod Disruptions](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/) and [Specifying a Disruption Budget for your Application](https://kubernetes.io/docs/tasks/run-application/configure-pdb/). @@ -1569,7 +1569,7 @@ Features for this release were tracked via the use of the [kubernetes/features]( ### **Cluster Lifecycle** #### kubeadm -* [alpha] Manual [upgrades for kubeadm from v1.6 to v1.7](https://kubernetes.io/docs/tasks/administer-cluster/kubeadm-upgrade-1-7/). Automated upgrades ([kubernetes/features#296](https://github.com/kubernetes/features/issues/296)) are targeted for v1.8. +* [alpha] Manual [upgrades for kubeadm from v1.6 to v1.7](https://kubernetes.io/docs/tasks/administer-cluster/kubeadm-upgrade-1-7/). Automated upgrades ([kubernetes/features#296](https://github.com/kubernetes/enhancements/issues/296)) are targeted for v1.8. #### Cloud Provider Support * [alpha] Improved support for out-of-tree and out-of-process cloud providers, a.k.a pluggable cloud providers. See [Build and Run cloud-controller-manager](https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller) documentation. @@ -1585,7 +1585,7 @@ Features for this release were tracked via the use of the [kubernetes/features]( ### **Instrumentation** #### Core Metrics API -* [alpha] Introduces a lightweight monitoring component for serving the core resource metrics API used by the Horizontal Pod Autoscaler and other components ([kubernetes/features#271](https://github.com/kubernetes/features/issues/271)) +* [alpha] Introduces a lightweight monitoring component for serving the core resource metrics API used by the Horizontal Pod Autoscaler and other components ([kubernetes/features#271](https://github.com/kubernetes/enhancements/issues/271)) ### **Internationalization** @@ -1707,11 +1707,11 @@ Features for this release were tracked via the use of the [kubernetes/features]( ### **Node Components** #### Container Runtime Interface -* [alpha] CRI validation testing, which provides a test framework and a suite of tests to validate that the CRI server implementation meets all the requirements. This allows the CRI runtime developers to verify that their runtime conforms to CRI, without needing to set up Kubernetes components or run Kubernetes end-to-end tests. ([docs](https://github.com/kubernetes/community/blob/master/contributors/devel/cri-validation.md) and [release notes](https://github.com/kubernetes-incubator/cri-tools/releases/tag/v0.1)) ([kubernetes/features#292](https://github.com/kubernetes/features/issues/292)) +* [alpha] CRI validation testing, which provides a test framework and a suite of tests to validate that the CRI server implementation meets all the requirements. This allows the CRI runtime developers to verify that their runtime conforms to CRI, without needing to set up Kubernetes components or run Kubernetes end-to-end tests. ([docs](https://github.com/kubernetes/community/blob/master/contributors/devel/cri-validation.md) and [release notes](https://github.com/kubernetes-incubator/cri-tools/releases/tag/v0.1)) ([kubernetes/features#292](https://github.com/kubernetes/enhancements/issues/292)) -* [alpha] Adds support of container metrics in CRI ([docs PR](https://github.com/kubernetes/community/pull/742)) ([kubernetes/features#290](https://github.com/kubernetes/features/issues/290)) +* [alpha] Adds support of container metrics in CRI ([docs PR](https://github.com/kubernetes/community/pull/742)) ([kubernetes/features#290](https://github.com/kubernetes/enhancements/issues/290)) -* [alpha] Integration with [containerd] (https://github.com/containerd/containerd) , which supports basic pod lifecycle and image management. ([docs](https://github.com/kubernetes-incubator/cri-containerd/blob/master/README.md) and [release notes](https://github.com/kubernetes-incubator/cri-containerd/releases/tag/v0.1.0)) ([kubernetes/features#286](https://github.com/kubernetes/features/issues/286)) +* [alpha] Integration with [containerd] (https://github.com/containerd/containerd) , which supports basic pod lifecycle and image management. ([docs](https://github.com/kubernetes-incubator/cri-containerd/blob/master/README.md) and [release notes](https://github.com/kubernetes-incubator/cri-containerd/releases/tag/v0.1.0)) ([kubernetes/features#286](https://github.com/kubernetes/enhancements/issues/286)) * [GA] The Docker-CRI implementation is GA. The legacy, non-CRI Docker integration has been completely removed. @@ -1723,7 +1723,7 @@ Features for this release were tracked via the use of the [kubernetes/features]( ### **Scheduling** #### Scheduler Extender -* [alpha] Support for delegating pod binding to a scheduler extender ([kubernetes/features#270](https://github.com/kubernetes/features/issues/270)) +* [alpha] Support for delegating pod binding to a scheduler extender ([kubernetes/features#270](https://github.com/kubernetes/enhancements/issues/270)) ### **Storage** #### Local Storage @@ -1807,7 +1807,7 @@ Features for this release were tracked via the use of the [kubernetes/features]( * PodDisruptionBudget now uses ControllerRef to decide which controller owns a given Pod, so it doesn't get confused by controllers with overlapping selectors. ([#45003](https://github.com/kubernetes/kubernetes/pull/45003), [@krmayankk](https://github.com/krmayankk)) -* Deployments are updated to use (1) a more stable hashing algorithm (fnv) than the previous one (adler) and (2) a hashing collision avoidance mechanism that will ensure new rollouts will not block on hashing collisions anymore. ([#44774](https://github.com/kubernetes/kubernetes/pull/44774), [@kargakis](https://github.com/kargakis))([kubernetes/features#287](https://github.com/kubernetes/features/issues/287)) +* Deployments are updated to use (1) a more stable hashing algorithm (fnv) than the previous one (adler) and (2) a hashing collision avoidance mechanism that will ensure new rollouts will not block on hashing collisions anymore. ([#44774](https://github.com/kubernetes/kubernetes/pull/44774), [@kargakis](https://github.com/kargakis))([kubernetes/features#287](https://github.com/kubernetes/enhancements/issues/287)) * Deployments and DaemonSets rollouts are considered complete when all of the desired replicas are updated and available. This change affects `kubectl rollout status` and Deployment condition. ([#44672](https://github.com/kubernetes/kubernetes/pull/44672), [@kargakis](https://github.com/kargakis)) diff --git a/CHANGELOG-1.8.md b/CHANGELOG-1.8.md index 467302a85f1..42848a7d8a2 100644 --- a/CHANGELOG-1.8.md +++ b/CHANGELOG-1.8.md @@ -1432,7 +1432,7 @@ Service Level Indicators (SLIs) and Service Level Objectives (SLOs) for the syst Here's the release [scalability validation report]. [SIG Scalability]: https://github.com/kubernetes/community/tree/master/sig-scalability -[scalability validation report]: https://github.com/kubernetes/features/tree/master/release-1.8/scalability_validation_report.md +[scalability validation report]: https://github.com/kubernetes/enhancements/tree/master/release-1.8/scalability_validation_report.md ### SIG Scheduling @@ -1611,7 +1611,7 @@ Kubernetes 1.8 adds the apps/v1beta2 group and version, which now consists of th DaemonSet, Deployment, ReplicaSet and StatefulSet kinds. This group and version are part of the Kubernetes Workloads API. We plan to move them to v1 in an upcoming release, so you might want to plan your migration accordingly. -For more information, see [the issue that describes this work in detail](https://github.com/kubernetes/features/issues/353) +For more information, see [the issue that describes this work in detail](https://github.com/kubernetes/enhancements/issues/353) #### API Object Additions and Migrations @@ -1862,7 +1862,7 @@ to the autoscalers in other clusters if required. #### Container Runtime Interface (CRI) -* [alpha] Add a CRI validation test suite and CRI command-line tools. ([#292](https://github.com/kubernetes/features/issues/292), [@feiskyer](https://github.com/feiskyer)) +* [alpha] Add a CRI validation test suite and CRI command-line tools. ([#292](https://github.com/kubernetes/enhancements/issues/292), [@feiskyer](https://github.com/feiskyer)) * [stable] [cri-o](https://github.com/kubernetes-incubator/cri-o): CRI implementation for OCI-based runtimes [[@mrunalp](https://github.com/mrunalp)] @@ -1886,13 +1886,13 @@ to the autoscalers in other clusters if required. #### kubelet -* [alpha] Kubelet now supports alternative container-level CPU affinity policies by using the new CPU manager. ([#375](https://github.com/kubernetes/features/issues/375), [@sjenning](https://github.com/sjenning), [@ConnorDoyle](https://github.com/ConnorDoyle)) +* [alpha] Kubelet now supports alternative container-level CPU affinity policies by using the new CPU manager. ([#375](https://github.com/kubernetes/enhancements/issues/375), [@sjenning](https://github.com/sjenning), [@ConnorDoyle](https://github.com/ConnorDoyle)) -* [alpha] Applications may now request pre-allocated hugepages by using the new `hugepages` resource in the container resource requests. ([#275](https://github.com/kubernetes/features/issues/275), [@derekwaynecarr](https://github.com/derekwaynecarr)) +* [alpha] Applications may now request pre-allocated hugepages by using the new `hugepages` resource in the container resource requests. ([#275](https://github.com/kubernetes/enhancements/issues/275), [@derekwaynecarr](https://github.com/derekwaynecarr)) -* [alpha] Add support for dynamic Kubelet configuration. ([#281](https://github.com/kubernetes/features/issues/281), [@mtaufen](https://github.com/mtaufen)) +* [alpha] Add support for dynamic Kubelet configuration. ([#281](https://github.com/kubernetes/enhancements/issues/281), [@mtaufen](https://github.com/mtaufen)) -* [alpha] Add the Hardware Device Plugins API. ([#368](https://github.com/kubernetes/features/issues/368), [[@jiayingz](https://github.com/jiayingz)], [[@RenaudWasTaken](https://github.com/RenaudWasTaken)]) +* [alpha] Add the Hardware Device Plugins API. ([#368](https://github.com/kubernetes/enhancements/issues/368), [[@jiayingz](https://github.com/jiayingz)], [[@RenaudWasTaken](https://github.com/RenaudWasTaken)]) * [stable] Upgrade cAdvisor to v0.27.1 with the enhancement for node monitoring. [[@dashpole](https://github.com/dashpole)] @@ -1958,33 +1958,33 @@ to the autoscalers in other clusters if required. #### kubeadm -* [beta] A new `upgrade` subcommand allows you to automatically upgrade a self-hosted cluster created with kubeadm. ([#296](https://github.com/kubernetes/features/issues/296), [@luxas](https://github.com/luxas)) +* [beta] A new `upgrade` subcommand allows you to automatically upgrade a self-hosted cluster created with kubeadm. ([#296](https://github.com/kubernetes/enhancements/issues/296), [@luxas](https://github.com/luxas)) -* [alpha] An experimental self-hosted cluster can now easily be created with `kubeadm init`. Enable the feature by setting the SelfHosting feature gate to true: `--feature-gates=SelfHosting=true` ([#296](https://github.com/kubernetes/features/issues/296), [@luxas](https://github.com/luxas)) +* [alpha] An experimental self-hosted cluster can now easily be created with `kubeadm init`. Enable the feature by setting the SelfHosting feature gate to true: `--feature-gates=SelfHosting=true` ([#296](https://github.com/kubernetes/enhancements/issues/296), [@luxas](https://github.com/luxas)) * **NOTE:** Self-hosting will be the default way to host the control plane in the next release, v1.9 -* [alpha] A new `phase` subcommand supports performing only subtasks of the full `kubeadm init` flow. Combined with fine-grained configuration, kubeadm is now more easily consumable by higher-level provisioning tools like kops or GKE. ([#356](https://github.com/kubernetes/features/issues/356), [@luxas](https://github.com/luxas)) +* [alpha] A new `phase` subcommand supports performing only subtasks of the full `kubeadm init` flow. Combined with fine-grained configuration, kubeadm is now more easily consumable by higher-level provisioning tools like kops or GKE. ([#356](https://github.com/kubernetes/enhancements/issues/356), [@luxas](https://github.com/luxas)) * **NOTE:** This command is currently staged under `kubeadm alpha phase` and will be graduated to top level in a future release. #### kops -* [alpha] Added support for targeting bare metal (or non-cloudprovider) machines. ([#360](https://github.com/kubernetes/features/issues/360), [@justinsb](https://github.com/justinsb)). +* [alpha] Added support for targeting bare metal (or non-cloudprovider) machines. ([#360](https://github.com/kubernetes/enhancements/issues/360), [@justinsb](https://github.com/justinsb)). -* [alpha] kops now supports [running as a server](https://github.com/kubernetes/kops/blob/master/docs/api-server/README.md). ([#359](https://github.com/kubernetes/features/issues/359), [@justinsb](https://github.com/justinsb)) +* [alpha] kops now supports [running as a server](https://github.com/kubernetes/kops/blob/master/docs/api-server/README.md). ([#359](https://github.com/kubernetes/enhancements/issues/359), [@justinsb](https://github.com/justinsb)) -* [beta] GCE support is promoted from alpha to beta. ([#358](https://github.com/kubernetes/features/issues/358), [@justinsb](https://github.com/justinsb)). +* [beta] GCE support is promoted from alpha to beta. ([#358](https://github.com/kubernetes/enhancements/issues/358), [@justinsb](https://github.com/justinsb)). #### Cluster Discovery/Bootstrap -* [beta] The authentication and verification mechanism called Bootstrap Tokens is improved. Use Bootstrap Tokens to easily add new node identities to a cluster. ([#130](https://github.com/kubernetes/features/issues/130), [@luxas](https://github.com/luxas), [@jbeda](https://github.com/jbeda)). +* [beta] The authentication and verification mechanism called Bootstrap Tokens is improved. Use Bootstrap Tokens to easily add new node identities to a cluster. ([#130](https://github.com/kubernetes/enhancements/issues/130), [@luxas](https://github.com/luxas), [@jbeda](https://github.com/jbeda)). #### Multi-platform -* [alpha] The Conformance e2e test suite now passes on the arm, arm64, and ppc64le platforms. ([#288](https://github.com/kubernetes/features/issues/288), [@luxas](https://github.com/luxas), [@mkumatag](https://github.com/mkumatag), [@ixdy](https://github.com/ixdy)) +* [alpha] The Conformance e2e test suite now passes on the arm, arm64, and ppc64le platforms. ([#288](https://github.com/kubernetes/enhancements/issues/288), [@luxas](https://github.com/luxas), [@mkumatag](https://github.com/mkumatag), [@ixdy](https://github.com/ixdy)) #### Cloud Providers -* [alpha] Support is improved for the pluggable, out-of-tree and out-of-core cloud providers. ([#88](https://github.com/kubernetes/features/issues/88), [@wlan0](https://github.com/wlan0)) +* [alpha] Support is improved for the pluggable, out-of-tree and out-of-core cloud providers. ([#88](https://github.com/kubernetes/enhancements/issues/88), [@wlan0](https://github.com/wlan0)) ### Network diff --git a/CHANGELOG-1.9.md b/CHANGELOG-1.9.md index 1d428e90f11..54225248b01 100644 --- a/CHANGELOG-1.9.md +++ b/CHANGELOG-1.9.md @@ -605,7 +605,7 @@ filename | sha256 hash * Fixed a race condition in k8s.io/client-go/tools/cache.SharedInformer that could violate the sequential delivery guarantee and cause panics on shutdown. ([#59828](https://github.com/kubernetes/kubernetes/pull/59828), [@krousey](https://github.com/krousey)) * [fluentd-gcp addon] Update fluentd and event-exporter images to have the latest base image. ([#61719](https://github.com/kubernetes/kubernetes/pull/61719), [@crassirostris](https://github.com/crassirostris)) * Support new NODE_OS_DISTRIBUTION 'custom' on GCE ([#61235](https://github.com/kubernetes/kubernetes/pull/61235), [@yguo0905](https://github.com/yguo0905)) -* Fixes a bug where character devices are not recongized by the kubelet ([#60440](https://github.com/kubernetes/kubernetes/pull/60440), [@andrewsykim](https://github.com/andrewsykim)) +* Fixes a bug where character devices are not recognized by the kubelet ([#60440](https://github.com/kubernetes/kubernetes/pull/60440), [@andrewsykim](https://github.com/andrewsykim)) * Fixes storage e2e test failures in GKE regional clusters. ([#61303](https://github.com/kubernetes/kubernetes/pull/61303), [@verult](https://github.com/verult)) * fix the error prone account creation method of blob disk ([#59739](https://github.com/kubernetes/kubernetes/pull/59739), [@andyzhangx](https://github.com/andyzhangx)) * The webhook admission controller in a custom apiserver now works off-the-shelf. ([#60995](https://github.com/kubernetes/kubernetes/pull/60995), [@caesarxuchao](https://github.com/caesarxuchao)) diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 7683bf490cc..7bb98cb5c81 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -2345,8 +2345,8 @@ }, { "ImportPath": "github.com/json-iterator/go", - "Comment": "1.1.3-22-gf2b4162afba355", - "Rev": "f2b4162afba35581b6d4a50d3b8f34e33c144682" + "Comment": "1.1.4", + "Rev": "ab8a2e0c74be9d3be70b3184d9acc634935ded82" }, { "ImportPath": "github.com/jteeuwen/go-bindata", @@ -3422,10 +3422,6 @@ "ImportPath": "golang.org/x/crypto/ssh/terminal", "Rev": "de0752318171da717af4ce24d0a2e8626afaeb11" }, - { - "ImportPath": "golang.org/x/exp/inotify", - "Rev": "292a51b8d262487dab23a588950e8052d63d9113" - }, { "ImportPath": "golang.org/x/net/context", "Rev": "0ed95abb35c445290478a5348a7b38bb154135fd" @@ -4053,19 +4049,19 @@ }, { "ImportPath": "k8s.io/utils/clock", - "Rev": "66066c83e385e385ccc3c964b44fd7dcd413d0ed" + "Rev": "8e7ff06bf0e2d3289061230af203e430a15b6dcc" }, { "ImportPath": "k8s.io/utils/exec", - "Rev": "66066c83e385e385ccc3c964b44fd7dcd413d0ed" + "Rev": "8e7ff06bf0e2d3289061230af203e430a15b6dcc" }, { "ImportPath": "k8s.io/utils/exec/testing", - "Rev": "66066c83e385e385ccc3c964b44fd7dcd413d0ed" + "Rev": "8e7ff06bf0e2d3289061230af203e430a15b6dcc" }, { "ImportPath": "k8s.io/utils/pointer", - "Rev": "66066c83e385e385ccc3c964b44fd7dcd413d0ed" + "Rev": "8e7ff06bf0e2d3289061230af203e430a15b6dcc" }, { "ImportPath": "sigs.k8s.io/yaml", diff --git a/Godeps/LICENSES b/Godeps/LICENSES index a46ce6e3f45..81c91077360 100644 --- a/Godeps/LICENSES +++ b/Godeps/LICENSES @@ -98293,41 +98293,6 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -================================================================================ -= vendor/golang.org/x/exp/inotify licensed under: = - -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -= vendor/golang.org/x/exp/LICENSE 5d4950ecb7b26d2c5e4e7b4e0dd74707 -================================================================================ - - ================================================================================ = vendor/golang.org/x/net/context licensed under: = diff --git a/OWNERS b/OWNERS index 822c0893e46..f5aeed776f2 100644 --- a/OWNERS +++ b/OWNERS @@ -7,6 +7,7 @@ filters: - lavalamp - smarterclayton - thockin + - liggitt approvers: - bgrant0607 - brendandburns @@ -17,6 +18,7 @@ filters: - smarterclayton - thockin - wojtek-t + - liggitt # Bazel build infrastructure changes often touch files throughout the tree "\\.bzl$": diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index 88959953bde..13fc127f1bd 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -108,6 +108,7 @@ aliases: - mbohlool - pweil- - tallclair + - krmayankk sig-auth-serviceaccounts-approvers: - deads2k @@ -119,6 +120,7 @@ aliases: - enj - liggitt - mikedanese + - WanLinghao sig-storage-reviewers: - saad-ali diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index ed880538214..6911a6fff13 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -92492,7 +92492,7 @@ "description": "VolumeError captures an error encountered during a volume operation.", "properties": { "message": { - "description": "String detailing the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information.", + "description": "String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.", "type": "string" }, "time": { @@ -92870,7 +92870,7 @@ "description": "VolumeError captures an error encountered during a volume operation.", "properties": { "message": { - "description": "String detailing the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information.", + "description": "String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.", "type": "string" }, "time": { diff --git a/api/swagger-spec/storage.k8s.io_v1.json b/api/swagger-spec/storage.k8s.io_v1.json index 23c9f051d75..d7c23c37ed1 100644 --- a/api/swagger-spec/storage.k8s.io_v1.json +++ b/api/swagger-spec/storage.k8s.io_v1.json @@ -2255,7 +2255,7 @@ }, "message": { "type": "string", - "description": "String detailing the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information." + "description": "String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information." } } }, diff --git a/api/swagger-spec/storage.k8s.io_v1beta1.json b/api/swagger-spec/storage.k8s.io_v1beta1.json index aad4df3ded6..c32958d5658 100644 --- a/api/swagger-spec/storage.k8s.io_v1beta1.json +++ b/api/swagger-spec/storage.k8s.io_v1beta1.json @@ -2093,7 +2093,7 @@ }, "message": { "type": "string", - "description": "String detailing the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information." + "description": "String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information." } } }, diff --git a/build/BUILD b/build/BUILD index 8c5b6164b6a..c5657f7d008 100644 --- a/build/BUILD +++ b/build/BUILD @@ -89,12 +89,6 @@ grep ^STABLE_BUILD_SCM_REVISION bazel-out/stable-status.txt \ stamp = 1, ) -genrule( - name = "cni_package_version", - outs = ["cni_version"], - cmd = "echo 0.5.1 >$@", -) - release_filegroup( name = "docker-artifacts", srcs = [":%s.tar" % binary for binary in DOCKERIZED_BINARIES.keys()] + diff --git a/build/build-image/cross/Dockerfile b/build/build-image/cross/Dockerfile index 8e4bc0f8843..8df50c95669 100644 --- a/build/build-image/cross/Dockerfile +++ b/build/build-image/cross/Dockerfile @@ -15,7 +15,7 @@ # This file creates a standard build environment for building cross # platform go binary for the architecture kubernetes cares about. -FROM golang:1.11.2 +FROM golang:1.11.3 ENV GOARM 7 ENV KUBE_DYNAMIC_CROSSPLATFORMS \ @@ -44,7 +44,7 @@ RUN apt-get update \ # Use dynamic cgo linking for architectures other than amd64 for the server platforms # To install crossbuild essential for other architectures add the following repository. RUN echo "deb http://archive.ubuntu.com/ubuntu xenial main universe" > /etc/apt/sources.list.d/cgocrosscompiling.list \ - && apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 40976EAF437D05B5 3B4FE6ACC0B21F32 \ + && apt-key adv --no-tty --keyserver keyserver.ubuntu.com --recv-keys 40976EAF437D05B5 3B4FE6ACC0B21F32 \ && apt-get update \ && apt-get install -y build-essential \ && for platform in ${KUBE_DYNAMIC_CROSSPLATFORMS}; do apt-get install -y crossbuild-essential-${platform}; done \ diff --git a/build/build-image/cross/VERSION b/build/build-image/cross/VERSION index 19abf8ebbbf..153cef80928 100644 --- a/build/build-image/cross/VERSION +++ b/build/build-image/cross/VERSION @@ -1 +1 @@ -v1.11.2-1 +v1.11.3-1 diff --git a/build/debs/BUILD b/build/debs/BUILD index 4237e92739d..e1cbfeccebe 100644 --- a/build/debs/BUILD +++ b/build/debs/BUILD @@ -3,7 +3,7 @@ package(default_visibility = ["//visibility:public"]) load("@io_kubernetes_build//defs:deb.bzl", "k8s_deb", "deb_data") load("@io_kubernetes_build//defs:build.bzl", "release_filegroup") load("@io_kubernetes_build//defs:pkg.bzl", "pkg_tar") -load("//build:workspace.bzl", "CRI_TOOLS_VERSION") +load("//build:workspace.bzl", "CNI_VERSION", "CRI_TOOLS_VERSION") # We do not include kube-scheduler, kube-controller-manager, # kube-apiserver, and kube-proxy in this list even though we @@ -148,14 +148,15 @@ k8s_deb( k8s_deb( name = "kubelet", depends = [ - "iptables (>= 1.4.21)", - "kubernetes-cni (>= 0.5.1)", - "iproute2", - "socat", - "util-linux", - "mount", + "conntrack", "ebtables", "ethtool", + "iproute2", + "iptables (>= 1.4.21)", + "kubernetes-cni (>= %s)" % CNI_VERSION, + "mount", + "socat", + "util-linux", ], description = """Kubernetes Node Agent The node agent of Kubernetes, the container cluster manager @@ -168,7 +169,7 @@ k8s_deb( depends = [ "kubelet (>= 1.8.0)", "kubectl (>= 1.8.0)", - "kubernetes-cni (>= 0.5.1)", + "kubernetes-cni (>= %s)" % CNI_VERSION, "cri-tools (>= 1.11.0)", ], description = """Kubernetes Cluster Bootstrapping Tool @@ -183,7 +184,7 @@ k8s_deb( description = """Kubernetes Packaging of CNI The Container Networking Interface tools for provisioning container networks. """, - version_file = "//build:cni_package_version", + version = CNI_VERSION, ) k8s_deb( diff --git a/build/lib/release.sh b/build/lib/release.sh index 98f6767d958..18e2ca65440 100644 --- a/build/lib/release.sh +++ b/build/lib/release.sh @@ -30,6 +30,7 @@ readonly RELEASE_IMAGES="${LOCAL_OUTPUT_ROOT}/release-images" KUBE_BUILD_HYPERKUBE=${KUBE_BUILD_HYPERKUBE:-y} KUBE_BUILD_CONFORMANCE=${KUBE_BUILD_CONFORMANCE:-y} +KUBE_BUILD_PULL_LATEST_IMAGES=${KUBE_BUILD_PULL_LATEST_IMAGES:-y} # Validate a ci version # @@ -372,7 +373,15 @@ EOF if [[ "${base_image}" =~ busybox ]]; then echo "COPY nsswitch.conf /etc/" >> "${docker_file_path}" fi - "${DOCKER[@]}" build --pull -q -t "${docker_image_tag}" "${docker_build_path}" >/dev/null + + # provide `--pull` argument to `docker build` if `KUBE_BUILD_PULL_LATEST_IMAGES` + # is set to y or Y; otherwise try to build the image without forcefully + # pulling the latest base image. + local -a docker_build_opts=() + if [[ "${KUBE_BUILD_PULL_LATEST_IMAGES}" =~ [yY] ]]; then + docker_build_opts+=("--pull") + fi + "${DOCKER[@]}" build "${docker_build_opts[@]}" -q -t "${docker_image_tag}" "${docker_build_path}" >/dev/null "${DOCKER[@]}" save "${docker_image_tag}" > "${binary_dir}/${binary_name}.tar" echo "${docker_tag}" > "${binary_dir}/${binary_name}.docker_tag" rm -rf "${docker_build_path}" diff --git a/build/root/WORKSPACE b/build/root/WORKSPACE index cee89622008..b16e541c1a2 100644 --- a/build/root/WORKSPACE +++ b/build/root/WORKSPACE @@ -1,11 +1,11 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive", "http_file") load("//build:workspace_mirror.bzl", "mirror") -load("//build:workspace.bzl", "CRI_TOOLS_VERSION") +load("//build:workspace.bzl", "CNI_VERSION", "CRI_TOOLS_VERSION") http_archive( name = "io_bazel_rules_go", - sha256 = "f87fa87475ea107b3c69196f39c82b7bbf58fe27c62a338684c20ca17d1d8613", - urls = mirror("https://github.com/bazelbuild/rules_go/releases/download/0.16.2/rules_go-0.16.2.tar.gz"), + sha256 = "62ec3496a00445889a843062de9930c228b770218c735eca89c67949cd967c3f", + urls = mirror("https://github.com/bazelbuild/rules_go/releases/download/0.16.4/rules_go-0.16.4.tar.gz"), ) http_archive( @@ -22,12 +22,12 @@ http_archive( urls = mirror("https://github.com/bazelbuild/bazel-skylib/archive/2169ae1c374aab4a09aa90e65efe1a3aad4e279b.tar.gz"), ) -ETCD_VERSION = "3.2.24" +ETCD_VERSION = "3.3.10" http_archive( name = "com_coreos_etcd", build_file = "@//third_party:etcd.BUILD", - sha256 = "947849dbcfa13927c81236fb76a7c01d587bbab42ab1e807184cd91b026ebed7", + sha256 = "1620a59150ec0a0124a65540e23891243feb2d9a628092fb1edcc23974724a45", strip_prefix = "etcd-v%s-linux-amd64" % ETCD_VERSION, urls = mirror("https://github.com/coreos/etcd/releases/download/v%s/etcd-v%s-linux-amd64.tar.gz" % (ETCD_VERSION, ETCD_VERSION)), ) @@ -49,7 +49,7 @@ load("@io_bazel_rules_docker//docker:docker.bzl", "docker_pull", "docker_reposit go_rules_dependencies() go_register_toolchains( - go_version = "1.11.2", + go_version = "1.11.3", ) docker_repositories() @@ -58,7 +58,7 @@ http_file( name = "kubernetes_cni", downloaded_file_path = "kubernetes_cni.tgz", sha256 = "f04339a21b8edf76d415e7f17b620e63b8f37a76b2f706671587ab6464411f2d", - urls = mirror("https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-amd64-v0.6.0.tgz"), + urls = mirror("https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-amd64-v%s.tgz" % CNI_VERSION), ) http_file( diff --git a/build/rpms/BUILD b/build/rpms/BUILD index 8a5017212b0..be011df488e 100644 --- a/build/rpms/BUILD +++ b/build/rpms/BUILD @@ -1,7 +1,7 @@ package(default_visibility = ["//visibility:public"]) load("@bazel_tools//tools/build_defs/pkg:rpm.bzl", "pkg_rpm") -load("//build:workspace.bzl", "CRI_TOOLS_VERSION") +load("//build:workspace.bzl", "CNI_VERSION", "CRI_TOOLS_VERSION") filegroup( name = "rpms", @@ -47,6 +47,8 @@ pkg_rpm( changelog = "//:CHANGELOG.md", data = [ "10-kubeadm.conf", + "50-kubeadm.conf", + "kubeadm.conf", "kubelet.env", "//cmd/kubeadm", ], @@ -64,7 +66,7 @@ pkg_rpm( ], spec_file = "kubernetes-cni.spec", tags = ["manual"], - version_file = "//build:cni_package_version", + version = CNI_VERSION, ) pkg_rpm( diff --git a/build/rpms/cri-tools.spec b/build/rpms/cri-tools.spec index 5dc0489abfe..0f39c06de27 100644 --- a/build/rpms/cri-tools.spec +++ b/build/rpms/cri-tools.spec @@ -11,7 +11,7 @@ Binaries to interface with the container runtime. %prep # This has to be hard coded because bazel does a path substitution before rpm's %{version} is substituted. -tar -xzf {crictl-v1.12.0-linux-amd64.tar.gz} +tar -xzf {cri_tools.tgz} %install install -m 755 -d %{buildroot}%{_bindir} diff --git a/build/rpms/kubeadm.spec b/build/rpms/kubeadm.spec index 19a33442fcd..b5dc2c2dac1 100644 --- a/build/rpms/kubeadm.spec +++ b/build/rpms/kubeadm.spec @@ -5,7 +5,7 @@ License: ASL 2.0 Summary: Container Cluster Manager - Kubernetes Cluster Bootstrapping Tool Requires: kubelet >= 1.8.0 Requires: kubectl >= 1.8.0 -Requires: kubernetes-cni >= 0.5.1 +Requires: kubernetes-cni >= 0.6.0 Requires: cri-tools >= 1.11.0 URL: https://kubernetes.io @@ -22,9 +22,9 @@ install -p -m 755 -t %{buildroot}%{_bindir} {kubeadm} install -p -m 644 -t %{buildroot}%{_sysconfdir}/systemd/system/kubelet.service.d/ {10-kubeadm.conf} install -p -m 644 -T {kubelet.env} %{buildroot}%{_sysconfdir}/sysconfig/kubelet mkdir -p %{buildroot}%{_libexecdir}/modules-load.d -mkdir -p %{buildroot}%{_sysctldir} +mkdir -p %{buildroot}/usr/lib/sysctl.d/ install -p -m 0644 -t %{buildroot}%{_libexecdir}/modules-load.d/ {kubeadm.conf} -install -p -m 0644 -t %{buildroot}%{_sysctldir} %{50-kubeadm.conf} +install -p -m 0644 -t %{buildroot}/usr/lib/sysctl.d/ {50-kubeadm.conf} %files %{_bindir}/kubeadm @@ -32,4 +32,4 @@ install -p -m 0644 -t %{buildroot}%{_sysctldir} %{50-kubeadm.conf} %{_sysconfdir}/sysconfig/kubelet %dir %{_libexecdir}/modules-load.d %{_libexecdir}/modules-load.d/kubeadm.conf -%{_sysctldir}/50-kubeadm.conf +/usr/lib/sysctl.d/50-kubeadm.conf diff --git a/build/rpms/kubelet.spec b/build/rpms/kubelet.spec index 40abb09131f..3828f2e2cf0 100644 --- a/build/rpms/kubelet.spec +++ b/build/rpms/kubelet.spec @@ -6,13 +6,14 @@ Summary: Container Cluster Manager - Kubernetes Node Agent URL: https://kubernetes.io -Requires: iptables >= 1.4.21 -Requires: kubernetes-cni >= 0.5.1 -Requires: socat -Requires: util-linux +Requires: conntrack +Requires: ebtables Requires: ethtool Requires: iproute -Requires: ebtables +Requires: iptables >= 1.4.21 +Requires: kubernetes-cni >= 0.6.0 +Requires: socat +Requires: util-linux %description The node agent of Kubernetes, the container cluster manager. diff --git a/build/rpms/kubernetes-cni.spec b/build/rpms/kubernetes-cni.spec index 4b4751d11cf..e77fdae3b6c 100644 --- a/build/rpms/kubernetes-cni.spec +++ b/build/rpms/kubernetes-cni.spec @@ -3,7 +3,6 @@ Version: OVERRIDE_THIS Release: 00 License: ASL 2.0 Summary: Container Cluster Manager - CNI plugins - URL: https://kubernetes.io %description @@ -11,7 +10,7 @@ Binaries required to provision container networking. %prep mkdir -p ./bin -tar -C ./bin -xz -f {cni-plugins-amd64-v0.6.0.tgz} +tar -C ./bin -xz -f {kubernetes_cni.tgz} %install diff --git a/build/workspace.bzl b/build/workspace.bzl index 162839b6457..edd2af76e70 100644 --- a/build/workspace.bzl +++ b/build/workspace.bzl @@ -13,3 +13,5 @@ # limitations under the License. CRI_TOOLS_VERSION = "1.12.0" + +CNI_VERSION = "0.6.0" diff --git a/cluster/addons/calico-policy-controller/MAINTAINERS.md b/cluster/addons/calico-policy-controller/MAINTAINERS.md deleted file mode 100644 index cd7d55d6518..00000000000 --- a/cluster/addons/calico-policy-controller/MAINTAINERS.md +++ /dev/null @@ -1,6 +0,0 @@ -# Maintainers - -Matt Dupre , Casey Davenport and committers to the https://github.com/projectcalico/k8s-policy repository. - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/calico-policy-controller/MAINTAINERS.md?pixel)]() diff --git a/cluster/addons/calico-policy-controller/README.md b/cluster/addons/calico-policy-controller/README.md index 7ef70eede21..8176fdd83e8 100644 --- a/cluster/addons/calico-policy-controller/README.md +++ b/cluster/addons/calico-policy-controller/README.md @@ -1,11 +1,11 @@ # Calico Policy Controller -============== Calico is an implementation of the Kubernetes network policy API. The provided manifests install: - A DaemonSet which runs Calico on each node in the cluster. - A Deployment which installs the Calico Typha agent. - A Service for the Calico Typha agent. +- Horizontal and vertical autoscalers for Calico. ### Learn More diff --git a/cluster/addons/calico-policy-controller/bgpconfigurations-crd.yaml b/cluster/addons/calico-policy-controller/bgpconfigurations-crd.yaml new file mode 100644 index 00000000000..9916794c425 --- /dev/null +++ b/cluster/addons/calico-policy-controller/bgpconfigurations-crd.yaml @@ -0,0 +1,15 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: bgpconfigurations.crd.projectcalico.org + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: BGPConfiguration + plural: bgpconfigurations + singular: bgpconfiguration diff --git a/cluster/addons/calico-policy-controller/calico-clusterrole.yaml b/cluster/addons/calico-policy-controller/calico-clusterrole.yaml index b1b83498d8a..aff9ef7614f 100644 --- a/cluster/addons/calico-policy-controller/calico-clusterrole.yaml +++ b/cluster/addons/calico-policy-controller/calico-clusterrole.yaml @@ -30,13 +30,6 @@ rules: - pods/status verbs: - update - - apiGroups: [""] - resources: - - pods - verbs: - - get - - list - - watch - patch - apiGroups: [""] resources: @@ -46,13 +39,6 @@ rules: - list - update - watch - - apiGroups: ["extensions"] - resources: - - networkpolicies - verbs: - - get - - list - - watch - apiGroups: ["networking.k8s.io"] resources: - networkpolicies @@ -61,10 +47,8 @@ rules: - list - apiGroups: ["crd.projectcalico.org"] resources: - - globalfelixconfigs - felixconfigurations - bgppeers - - globalbgpconfigs - bgpconfigurations - ippools - globalnetworkpolicies @@ -78,3 +62,29 @@ rules: - list - update - watch + # Used in Calico v2.6 only - can be removed after upgrade. + - apiGroups: [""] + resources: + - pods + verbs: + - get + - list + - watch + - patch + - apiGroups: ["crd.projectcalico.org"] + resources: + - globalfelixconfigs + - globalbgpconfigs + verbs: + - create + - get + - list + - update + - watch + - apiGroups: ["extensions"] + resources: + - networkpolicies + verbs: + - get + - list + - watch diff --git a/cluster/addons/calico-policy-controller/calico-node-daemonset.yaml b/cluster/addons/calico-policy-controller/calico-node-daemonset.yaml index 96440e74d85..025818768f8 100644 --- a/cluster/addons/calico-policy-controller/calico-node-daemonset.yaml +++ b/cluster/addons/calico-policy-controller/calico-node-daemonset.yaml @@ -33,7 +33,7 @@ spec: # container programs network policy and routes on each # host. - name: calico-node - image: gcr.io/projectcalico-org/node:v2.6.7 + image: gcr.io/projectcalico-org/node:v3.3.1 env: - name: CALICO_DISABLE_FILE_LOGGING value: "true" @@ -73,6 +73,7 @@ spec: httpGet: path: /liveness port: 9099 + host: localhost periodSeconds: 10 initialDelaySeconds: 10 failureThreshold: 6 @@ -80,6 +81,7 @@ spec: httpGet: path: /readiness port: 9099 + host: localhost periodSeconds: 10 volumeMounts: - mountPath: /lib/modules @@ -97,7 +99,7 @@ spec: # This container installs the Calico CNI binaries # and CNI network config file on each node. - name: install-cni - image: gcr.io/projectcalico-org/cni:v1.11.2 + image: gcr.io/projectcalico-org/cni:v3.3.1 command: ["/install-cni.sh"] env: - name: CNI_CONF_NAME @@ -110,7 +112,7 @@ spec: "plugins": [ { "type": "calico", - "log_level": "debug", + "log_level": "info", "datastore_type": "kubernetes", "nodename": "__KUBERNETES_NODE_NAME__", "ipam": { @@ -118,11 +120,9 @@ spec: "subnet": "usePodCidr" }, "policy": { - "type": "k8s", - "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" + "type": "k8s" }, "kubernetes": { - "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", "kubeconfig": "__KUBECONFIG_FILEPATH__" } }, diff --git a/cluster/addons/calico-policy-controller/typha-deployment.yaml b/cluster/addons/calico-policy-controller/typha-deployment.yaml index ebc6d6dbafc..f8209868ddc 100644 --- a/cluster/addons/calico-policy-controller/typha-deployment.yaml +++ b/cluster/addons/calico-policy-controller/typha-deployment.yaml @@ -23,7 +23,7 @@ spec: hostNetwork: true serviceAccountName: calico containers: - - image: gcr.io/projectcalico-org/typha:v0.5.6 + - image: gcr.io/projectcalico-org/typha:v3.3.1 name: calico-typha ports: - containerPort: 5473 @@ -58,12 +58,14 @@ spec: httpGet: path: /liveness port: 9098 + host: localhost periodSeconds: 30 initialDelaySeconds: 30 readinessProbe: httpGet: path: /readiness port: 9098 + host: localhost periodSeconds: 10 volumes: - name: etc-calico diff --git a/cluster/addons/dns/nodelocaldns/README.md b/cluster/addons/dns/nodelocaldns/README.md index 976e440f768..fc84f783789 100644 --- a/cluster/addons/dns/nodelocaldns/README.md +++ b/cluster/addons/dns/nodelocaldns/README.md @@ -8,6 +8,8 @@ Design details [here](https://github.com/kubernetes/community/blob/master/keps/s This directory contains the addon config yaml - `nodelocaldns.yaml` The variables will be substituted by the configure scripts when the yaml is copied into master. +To create a GCE cluster with nodelocaldns enabled, use the command: +`KUBE_ENABLE_NODELOCAL_DNS=true go run hack/e2e.go -v --up` ### Network policy and DNS connectivity @@ -32,4 +34,4 @@ spec: policyTypes: - Ingress - Egress -``` \ No newline at end of file +``` diff --git a/cluster/addons/dns/nodelocaldns/nodelocaldns.yaml b/cluster/addons/dns/nodelocaldns/nodelocaldns.yaml index fdf0f421eac..b0fe946cd46 100644 --- a/cluster/addons/dns/nodelocaldns/nodelocaldns.yaml +++ b/cluster/addons/dns/nodelocaldns/nodelocaldns.yaml @@ -84,7 +84,7 @@ metadata: name: node-local-dns namespace: kube-system labels: - k8s-app: kube-dns + k8s-app: node-local-dns kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile spec: diff --git a/cluster/addons/fluentd-elasticsearch/OWNERS b/cluster/addons/fluentd-elasticsearch/OWNERS index 214b9bdcaf8..8637ca6100c 100644 --- a/cluster/addons/fluentd-elasticsearch/OWNERS +++ b/cluster/addons/fluentd-elasticsearch/OWNERS @@ -1,8 +1,10 @@ approvers: - coffeepac +- monotek - piosz reviewers: - coffeepac +- monotek - piosz labels: - sig/instrumentation diff --git a/cluster/addons/fluentd-elasticsearch/fluentd-es-configmap.yaml b/cluster/addons/fluentd-elasticsearch/fluentd-es-configmap.yaml index f035b349140..656c92d3848 100644 --- a/cluster/addons/fluentd-elasticsearch/fluentd-es-configmap.yaml +++ b/cluster/addons/fluentd-elasticsearch/fluentd-es-configmap.yaml @@ -320,7 +320,7 @@ data: @id journald-container-runtime @type systemd - matches [{ "_SYSTEMD_UNIT": "{{ container_runtime }}.service" }] + matches [{ "_SYSTEMD_UNIT": "{{ fluentd_container_runtime_service }}.service" }] @type local persistent true diff --git a/cluster/addons/fluentd-gcp/fluentd-gcp-configmap-old.yaml b/cluster/addons/fluentd-gcp/fluentd-gcp-configmap-old.yaml index 88125d1fa3f..bffda046ab6 100644 --- a/cluster/addons/fluentd-gcp/fluentd-gcp-configmap-old.yaml +++ b/cluster/addons/fluentd-gcp/fluentd-gcp-configmap-old.yaml @@ -61,16 +61,18 @@ data: # reform.var.log.containers.__-.log tag reform.* read_from_head true - format multi_format - - format json - time_key time - time_format %Y-%m-%dT%H:%M:%S.%NZ - - - format /^(? + + @type multi_format + + format json + time_key time + time_format %Y-%m-%dT%H:%M:%S.%NZ + + + format /^(? + @@ -251,7 +253,7 @@ data: @type systemd - filters [{ "_SYSTEMD_UNIT": "{{ container_runtime }}.service" }] + filters [{ "_SYSTEMD_UNIT": "{{ fluentd_container_runtime_service }}.service" }] pos_file /var/log/gcp-journald-container-runtime.pos read_from_head true tag container-runtime @@ -287,7 +289,7 @@ data: @type grep key _SYSTEMD_UNIT - pattern ^(docker|{{ container_runtime }}|kubelet|node-problem-detector)\.service$ + pattern ^(docker|{{ fluentd_container_runtime_service }}|kubelet|node-problem-detector)\.service$ # END_NODE_JOURNAL diff --git a/cluster/addons/fluentd-gcp/fluentd-gcp-configmap.yaml b/cluster/addons/fluentd-gcp/fluentd-gcp-configmap.yaml index f35eab968c8..549fef75a54 100644 --- a/cluster/addons/fluentd-gcp/fluentd-gcp-configmap.yaml +++ b/cluster/addons/fluentd-gcp/fluentd-gcp-configmap.yaml @@ -64,16 +64,18 @@ data: # reform.var.log.containers.__-.log tag reform.* read_from_head true - format multi_format - - format json - time_key time - time_format %Y-%m-%dT%H:%M:%S.%NZ - - - format /^(? + + @type multi_format + + format json + time_key time + time_format %Y-%m-%dT%H:%M:%S.%NZ + + + format /^(? + @@ -266,7 +268,7 @@ data: @type systemd - filters [{ "_SYSTEMD_UNIT": "{{ container_runtime }}.service" }] + filters [{ "_SYSTEMD_UNIT": "{{ fluentd_container_runtime_service }}.service" }] pos_file /var/log/gcp-journald-container-runtime.pos read_from_head true tag container-runtime @@ -302,7 +304,7 @@ data: @type grep key _SYSTEMD_UNIT - pattern ^(docker|{{ container_runtime }}|kubelet|node-problem-detector)\.service$ + pattern ^(docker|{{ fluentd_container_runtime_service }}|kubelet|node-problem-detector)\.service$ # END_NODE_JOURNAL diff --git a/cluster/gce/config-default.sh b/cluster/gce/config-default.sh index 4fd5fec4412..37e81cd53e6 100755 --- a/cluster/gce/config-default.sh +++ b/cluster/gce/config-default.sh @@ -409,8 +409,8 @@ fi # Fluentd requirements # YAML exists to trigger a configuration refresh when changes are made. -FLUENTD_GCP_YAML_VERSION="v3.1.0" -FLUENTD_GCP_VERSION="${FLUENTD_GCP_VERSION:-0.5-1.5.36-1-k8s}" +FLUENTD_GCP_YAML_VERSION="v3.2.0" +FLUENTD_GCP_VERSION="${FLUENTD_GCP_VERSION:-0.6-1.6.0-1}" FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-}" FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-}" FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-}" diff --git a/cluster/gce/config-test.sh b/cluster/gce/config-test.sh index ad350582a16..aca76802e76 100755 --- a/cluster/gce/config-test.sh +++ b/cluster/gce/config-test.sh @@ -177,7 +177,7 @@ ENABLE_METADATA_AGENT="${KUBE_ENABLE_METADATA_AGENT:-none}" # Useful for scheduling heapster in large clusters with nodes of small size. HEAPSTER_MACHINE_TYPE="${HEAPSTER_MACHINE_TYPE:-}" -# Set etcd image (e.g. k8s.gcr.io/etcd) and version (e.g. 3.2.24-1) if you need +# Set etcd image (e.g. k8s.gcr.io/etcd) and version (e.g. 3.3.10-0) if you need # non-default version. ETCD_IMAGE="${TEST_ETCD_IMAGE:-}" ETCD_DOCKER_REPOSITORY="${TEST_ETCD_DOCKER_REPOSITORY:-}" @@ -425,8 +425,8 @@ fi # Fluentd requirements # YAML exists to trigger a configuration refresh when changes are made. -FLUENTD_GCP_YAML_VERSION="v3.1.0" -FLUENTD_GCP_VERSION="${FLUENTD_GCP_VERSION:-0.5-1.5.36-1-k8s}" +FLUENTD_GCP_YAML_VERSION="v3.2.0" +FLUENTD_GCP_VERSION="${FLUENTD_GCP_VERSION:-0.6-1.6.0-1}" FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-}" FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-}" FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-}" diff --git a/cluster/gce/gci/apiserver_manifest_test.go b/cluster/gce/gci/apiserver_manifest_test.go index db327771b34..450c5648e3f 100644 --- a/cluster/gce/gci/apiserver_manifest_test.go +++ b/cluster/gce/gci/apiserver_manifest_test.go @@ -98,7 +98,7 @@ func TestEncryptionProviderFlag(t *testing.T) { // "-c", - Index 1 // "exec /usr/local/bin/kube-apiserver " - Index 2 execArgsIndex = 2 - encryptionConfigFlag = "--experimental-encryption-provider-config" + encryptionConfigFlag = "--encryption-provider-config" ) testCases := []struct { diff --git a/cluster/gce/gci/configure-helper.sh b/cluster/gce/gci/configure-helper.sh index e95f31598cb..cbef5d438c7 100644 --- a/cluster/gce/gci/configure-helper.sh +++ b/cluster/gce/gci/configure-helper.sh @@ -1089,6 +1089,11 @@ EOF } function create-node-problem-detector-kubeconfig { + local apiserver_address="${1}" + if [[ -z "${apiserver_address}" ]]; then + echo "Must provide API server address to create node-problem-detector kubeconfig file!" + exit 1 + fi echo "Creating node-problem-detector kubeconfig file" mkdir -p /var/lib/node-problem-detector cat </var/lib/node-problem-detector/kubeconfig @@ -1101,6 +1106,7 @@ users: clusters: - name: local cluster: + server: https://${apiserver_address} certificate-authority-data: ${CA_CERT} contexts: - context: @@ -1230,7 +1236,7 @@ function start-node-problem-detector { local -r km_config="${KUBE_HOME}/node-problem-detector/config/kernel-monitor.json" # TODO(random-liu): Handle this for alternative container runtime. local -r dm_config="${KUBE_HOME}/node-problem-detector/config/docker-monitor.json" - local -r custom_km_config="${KUBE_HOME}/node-problem-detector/config/kernel-monitor-counter.json" + local -r custom_km_config="${KUBE_HOME}/node-problem-detector/config/kernel-monitor-counter.json,${KUBE_HOME}/node-problem-detector/config/systemd-monitor-counter.json,${KUBE_HOME}/node-problem-detector/config/docker-monitor-counter.json" echo "Using node problem detector binary at ${npd_bin}" local flags="${NPD_TEST_LOG_LEVEL:-"--v=2"} ${NPD_TEST_ARGS:-}" flags+=" --logtostderr" @@ -1815,7 +1821,7 @@ function start-kube-apiserver { # Sets-up etcd encryption. # Configuration of etcd level encryption consists of the following steps: # 1. Writing encryption provider config to disk -# 2. Adding experimental-encryption-provider-config flag to kube-apiserver +# 2. Adding encryption-provider-config flag to kube-apiserver # 3. Add kms-socket-vol and kms-socket-vol-mnt to enable communication with kms-plugin (if requested) # # Expects parameters: @@ -1855,7 +1861,7 @@ function setup-etcd-encryption { encryption_provider_config_path=${ENCRYPTION_PROVIDER_CONFIG_PATH:-/etc/srv/kubernetes/encryption-provider-config.yml} echo "${ENCRYPTION_PROVIDER_CONFIG}" | base64 --decode > "${encryption_provider_config_path}" - kube_api_server_params+=" --experimental-encryption-provider-config=${encryption_provider_config_path}" + kube_api_server_params+=" --encryption-provider-config=${encryption_provider_config_path}" default_encryption_provider_config_vol=$(echo "{ \"name\": \"encryptionconfig\", \"hostPath\": {\"path\": \"${encryption_provider_config_path}\", \"type\": \"File\"}}" | base64 | tr -d '\r\n') default_encryption_provider_config_vol_mnt=$(echo "{ \"name\": \"encryptionconfig\", \"mountPath\": \"${encryption_provider_config_path}\", \"readOnly\": true}" | base64 | tr -d '\r\n') @@ -2232,14 +2238,14 @@ function start-fluentd-resource-update { wait-for-apiserver-and-update-fluentd & } -# Update {{ container-runtime }} with actual container runtime name, -# and {{ container-runtime-endpoint }} with actual container runtime +# Update {{ fluentd_container_runtime_service }} with actual container runtime name, +# and {{ container_runtime_endpoint }} with actual container runtime # endpoint. function update-container-runtime { local -r file="$1" local -r container_runtime_endpoint="${CONTAINER_RUNTIME_ENDPOINT:-unix:///var/run/dockershim.sock}" sed -i \ - -e "s@{{ *container_runtime *}}@${CONTAINER_RUNTIME_NAME:-docker}@g" \ + -e "s@{{ *fluentd_container_runtime_service *}}@${FLUENTD_CONTAINER_RUNTIME_SERVICE:-${CONTAINER_RUNTIME_NAME:-docker}}@g" \ -e "s@{{ *container_runtime_endpoint *}}@${container_runtime_endpoint#unix://}@g" \ "${file}" } @@ -2321,10 +2327,10 @@ function setup-fluentd { fluentd_gcp_configmap_name="fluentd-gcp-config-old" fi sed -i -e "s@{{ fluentd_gcp_configmap_name }}@${fluentd_gcp_configmap_name}@g" "${fluentd_gcp_yaml}" - fluentd_gcp_yaml_version="${FLUENTD_GCP_YAML_VERSION:-v3.1.0}" + fluentd_gcp_yaml_version="${FLUENTD_GCP_YAML_VERSION:-v3.2.0}" sed -i -e "s@{{ fluentd_gcp_yaml_version }}@${fluentd_gcp_yaml_version}@g" "${fluentd_gcp_yaml}" sed -i -e "s@{{ fluentd_gcp_yaml_version }}@${fluentd_gcp_yaml_version}@g" "${fluentd_gcp_scaler_yaml}" - fluentd_gcp_version="${FLUENTD_GCP_VERSION:-0.5-1.5.36-1-k8s}" + fluentd_gcp_version="${FLUENTD_GCP_VERSION:-0.6-1.6.0-1}" sed -i -e "s@{{ fluentd_gcp_version }}@${fluentd_gcp_version}@g" "${fluentd_gcp_yaml}" update-daemon-set-prometheus-to-sd-parameters ${fluentd_gcp_yaml} start-fluentd-resource-update ${fluentd_gcp_yaml} @@ -2622,7 +2628,7 @@ function setup-node-termination-handler-manifest { local -r nth_manifest="/etc/kubernetes/$1/$2/daemonset.yaml" if [[ -n "${NODE_TERMINATION_HANDLER_IMAGE}" ]]; then sed -i "s|image:.*|image: ${NODE_TERMINATION_HANDLER_IMAGE}|" "${nth_manifest}" - fi + fi } # Setups manifests for ingress controller and gce-specific policies for service controller. @@ -2826,7 +2832,7 @@ function main() { create-kubeproxy-user-kubeconfig fi if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" ]]; then - create-node-problem-detector-kubeconfig + create-node-problem-detector-kubeconfig ${KUBERNETES_MASTER_NAME} fi fi @@ -2839,8 +2845,10 @@ function main() { if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then compute-master-manifest-variables - start-etcd-servers - start-etcd-empty-dir-cleanup-pod + if [[ -z "${ETCD_SERVERS:-}" ]]; then + start-etcd-servers + start-etcd-empty-dir-cleanup-pod + fi start-kube-apiserver start-kube-controller-manager start-kube-scheduler diff --git a/cluster/gce/gci/configure.sh b/cluster/gce/gci/configure.sh index 80bb0608fab..3050e9d5791 100644 --- a/cluster/gce/gci/configure.sh +++ b/cluster/gce/gci/configure.sh @@ -26,8 +26,8 @@ set -o pipefail ### Hardcoded constants DEFAULT_CNI_VERSION="v0.6.0" DEFAULT_CNI_SHA1="d595d3ded6499a64e8dac02466e2f5f2ce257c9f" -DEFAULT_NPD_VERSION="v0.5.0" -DEFAULT_NPD_SHA1="650ecfb2ae495175ee43706d0bd862a1ea7f1395" +DEFAULT_NPD_VERSION="v0.6.0" +DEFAULT_NPD_SHA1="a28e960a21bb74bc0ae09c267b6a340f30e5b3a6" DEFAULT_CRICTL_VERSION="v1.12.0" DEFAULT_CRICTL_SHA1="82ef8b44849f9da0589c87e9865d4716573eec7f" DEFAULT_MOUNTER_TAR_SHA="8003b798cf33c7f91320cd6ee5cec4fa22244571" diff --git a/cluster/gce/manifests/cluster-autoscaler.manifest b/cluster/gce/manifests/cluster-autoscaler.manifest index b4f71ac1b07..049f5202004 100644 --- a/cluster/gce/manifests/cluster-autoscaler.manifest +++ b/cluster/gce/manifests/cluster-autoscaler.manifest @@ -17,7 +17,7 @@ "containers": [ { "name": "cluster-autoscaler", - "image": "k8s.gcr.io/cluster-autoscaler:v1.12.0", + "image": "k8s.gcr.io/cluster-autoscaler:v1.13.0", "livenessProbe": { "httpGet": { "path": "/health-check", diff --git a/cluster/gce/manifests/etcd-empty-dir-cleanup.yaml b/cluster/gce/manifests/etcd-empty-dir-cleanup.yaml index d3be590a888..6795aa23499 100644 --- a/cluster/gce/manifests/etcd-empty-dir-cleanup.yaml +++ b/cluster/gce/manifests/etcd-empty-dir-cleanup.yaml @@ -14,4 +14,4 @@ spec: dnsPolicy: Default containers: - name: etcd-empty-dir-cleanup - image: k8s.gcr.io/etcd-empty-dir-cleanup:3.2.24.0 + image: k8s.gcr.io/etcd-empty-dir-cleanup:3.3.10.0 diff --git a/cluster/gce/manifests/etcd.manifest b/cluster/gce/manifests/etcd.manifest index 2649f7a5234..361eeef1508 100644 --- a/cluster/gce/manifests/etcd.manifest +++ b/cluster/gce/manifests/etcd.manifest @@ -14,7 +14,7 @@ "containers":[ { "name": "etcd-container", - "image": "{{ pillar.get('etcd_docker_repository', 'k8s.gcr.io/etcd') }}:{{ pillar.get('etcd_docker_tag', '3.2.24-1') }}", + "image": "{{ pillar.get('etcd_docker_repository', 'k8s.gcr.io/etcd') }}:{{ pillar.get('etcd_docker_tag', '3.3.10-0') }}", "resources": { "requests": { "cpu": {{ cpulimit }} @@ -30,7 +30,7 @@ "value": "{{ pillar.get('storage_backend', 'etcd3') }}" }, { "name": "TARGET_VERSION", - "value": "{{ pillar.get('etcd_version', '3.2.24') }}" + "value": "{{ pillar.get('etcd_version', '3.3.10') }}" }, { "name": "DATA_DIRECTORY", "value": "/var/etcd/data{{ suffix }}" diff --git a/cluster/gce/upgrade-aliases.sh b/cluster/gce/upgrade-aliases.sh index be27a538413..92b2382074a 100755 --- a/cluster/gce/upgrade-aliases.sh +++ b/cluster/gce/upgrade-aliases.sh @@ -161,8 +161,8 @@ export KUBE_GCE_ENABLE_IP_ALIASES=true export SECONDARY_RANGE_NAME="pods-default" export STORAGE_BACKEND="etcd3" export STORAGE_MEDIA_TYPE="application/vnd.kubernetes.protobuf" -export ETCD_IMAGE=3.2.24-1 -export ETCD_VERSION=3.2.24 +export ETCD_IMAGE=3.3.10-0 +export ETCD_VERSION=3.3.10 # Upgrade master with updated kube envs ${KUBE_ROOT}/cluster/gce/upgrade.sh -M -l diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index 79ba5f7db85..c6129198cec 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -2342,7 +2342,6 @@ function create-nodes() { local instances_left=${nodes} - #TODO: parallelize this loop to speed up the process for ((i=1; i<=${NUM_MIGS}; i++)); do local group_name="${NODE_INSTANCE_PREFIX}-group-$i" if [[ $i == ${NUM_MIGS} ]]; then @@ -2365,8 +2364,9 @@ function create-nodes() { "${group_name}" \ --zone "${ZONE}" \ --project "${PROJECT}" \ - --timeout "${MIG_WAIT_UNTIL_STABLE_TIMEOUT}" || true; + --timeout "${MIG_WAIT_UNTIL_STABLE_TIMEOUT}" || true & done + wait } # Assumes: diff --git a/cluster/images/etcd-empty-dir-cleanup/Makefile b/cluster/images/etcd-empty-dir-cleanup/Makefile index c2b86977968..8745b6fc17e 100644 --- a/cluster/images/etcd-empty-dir-cleanup/Makefile +++ b/cluster/images/etcd-empty-dir-cleanup/Makefile @@ -14,13 +14,13 @@ .PHONY: build push -ETCD_VERSION = 3.2.24 +ETCD_VERSION = 3.3.10 # Image should be pulled from k8s.gcr.io, which will auto-detect # region (us, eu, asia, ...) and pull from the closest. REGISTRY = k8s.gcr.io # Images should be pushed to staging-k8s.gcr.io. PUSH_REGISTRY = staging-k8s.gcr.io -TAG = 3.2.24.0 +TAG = 3.3.10.0 clean: rm -rf etcdctl etcd-v$(ETCD_VERSION)-linux-amd64 etcd-v$(ETCD_VERSION)-linux-amd64.tar.gz diff --git a/cluster/images/etcd/Makefile b/cluster/images/etcd/Makefile index 0aa28362918..804347c2a02 100644 --- a/cluster/images/etcd/Makefile +++ b/cluster/images/etcd/Makefile @@ -15,7 +15,7 @@ # Build the etcd image # # Usage: -# [BUNDLED_ETCD_VERSIONS=2.2.1 2.3.7 3.0.17 3.1.12 3.2.24] [REGISTRY=k8s.gcr.io] [ARCH=amd64] [BASEIMAGE=busybox] make (build|push) +# [BUNDLED_ETCD_VERSIONS=2.2.1 2.3.7 3.0.17 3.1.12 3.2.24 3.3.10] [REGISTRY=k8s.gcr.io] [ARCH=amd64] [BASEIMAGE=busybox] make (build|push) # # The image contains different etcd versions to simplify # upgrades. Thus be careful when removing any versions from here. @@ -26,15 +26,15 @@ # Except from etcd-$(version) and etcdctl-$(version) binaries, we also # need etcd and etcdctl binaries for backward compatibility reasons. # That binary will be set to the last version from $(BUNDLED_ETCD_VERSIONS). -BUNDLED_ETCD_VERSIONS?=2.2.1 2.3.7 3.0.17 3.1.12 3.2.24 +BUNDLED_ETCD_VERSIONS?=2.2.1 2.3.7 3.0.17 3.1.12 3.2.24 3.3.10 # LATEST_ETCD_VERSION identifies the most recent etcd version available. -LATEST_ETCD_VERSION?=3.2.24 +LATEST_ETCD_VERSION?=3.3.10 # REVISION provides a version number fo this image and all it's bundled # artifacts. It should start at zero for each LATEST_ETCD_VERSION and increment # for each revision of this image at that etcd version. -REVISION?=1 +REVISION?=0 # IMAGE_TAG Uniquely identifies k8s.gcr.io/etcd docker image with a tag of the form "-". IMAGE_TAG=$(LATEST_ETCD_VERSION)-$(REVISION) @@ -52,7 +52,7 @@ MANIFEST_IMAGE := $(PUSH_REGISTRY)/etcd # This option is for running docker manifest command export DOCKER_CLI_EXPERIMENTAL := enabled # golang version should match the golang version from https://github.com/coreos/etcd/releases for the current ETCD_VERSION. -GOLANG_VERSION?=1.8.7 +GOLANG_VERSION?=1.10.4 GOARM=7 TEMP_DIR:=$(shell mktemp -d) @@ -159,7 +159,7 @@ build-integration-test-image: build integration-test: docker run --interactive -v $(shell pwd)/../../../:/go/src/k8s.io/kubernetes -e GOARCH=$(ARCH) etcd-integration-test \ - /bin/bash -c "CGO_ENABLED=0 go test -tags=integration -v k8s.io/kubernetes/cluster/images/etcd/migrate -args -v 10 -logtostderr true" + /bin/bash -c "CGO_ENABLED=0 go test -tags=integration k8s.io/kubernetes/cluster/images/etcd/migrate -args -v 10 -logtostderr true" integration-build-test: build-integration-test-image integration-test test: unit-test integration-build-test diff --git a/cluster/images/etcd/README.md b/cluster/images/etcd/README.md index 25b4026be77..6c768f9ba18 100644 --- a/cluster/images/etcd/README.md +++ b/cluster/images/etcd/README.md @@ -26,7 +26,7 @@ server. `migrate` writes a `version.txt` file to track the "current" version of etcd that was used to persist data to disk. A "target" version may also be provided -by the `TARGET_STORAGE` (e.g. "etcd3") and `TARGET_VERSION` (e.g. "3.2.24" ) +by the `TARGET_STORAGE` (e.g. "etcd3") and `TARGET_VERSION` (e.g. "3.3.10" ) environment variables. If the persisted version differs from the target version, `migrate-if-needed.sh` will migrate the data from the current to the target version. diff --git a/cluster/images/etcd/migrate-if-needed.sh b/cluster/images/etcd/migrate-if-needed.sh index 38e2565fcf1..1998e237dff 100755 --- a/cluster/images/etcd/migrate-if-needed.sh +++ b/cluster/images/etcd/migrate-if-needed.sh @@ -18,7 +18,7 @@ # This script performs etcd upgrade based on the following environmental # variables: # TARGET_STORAGE - API of etcd to be used (supported: 'etcd2', 'etcd3') -# TARGET_VERSION - etcd release to be used (supported: '2.2.1', '2.3.7', '3.0.17', '3.1.12', '3.2.24') +# TARGET_VERSION - etcd release to be used (supported: '2.2.1', '2.3.7', '3.0.17', '3.1.12', '3.2.24', "3.3.10") # DATA_DIRECTORY - directory with etcd data # # The current etcd version and storage format is detected based on the @@ -30,6 +30,7 @@ # - 2.3.7/etcd2 -> 3.0.17/etcd2 # - 3.0.17/etcd3 -> 3.1.12/etcd3 # - 3.1.12/etcd3 -> 3.2.24/etcd3 +# - 3.2.24/etcd3 -> 3.3.10/etcd3 # # NOTE: The releases supported in this script has to match release binaries # present in the etcd image (to make this script work correctly). @@ -42,7 +43,7 @@ set -o nounset # NOTE: BUNDLED_VERSION has to match release binaries present in the # etcd image (to make this script work correctly). -BUNDLED_VERSIONS="2.2.1, 2.3.7, 3.0.17, 3.1.12, 3.2.24" +BUNDLED_VERSIONS="2.2.1, 2.3.7, 3.0.17, 3.1.12, 3.2.24, 3.3.10" ETCD_NAME="${ETCD_NAME:-etcd-$(hostname)}" if [ -z "${DATA_DIRECTORY:-}" ]; then diff --git a/cluster/images/etcd/migrate/data_dir.go b/cluster/images/etcd/migrate/data_dir.go index 3052afa735d..7b25d7acd59 100644 --- a/cluster/images/etcd/migrate/data_dir.go +++ b/cluster/images/etcd/migrate/data_dir.go @@ -122,7 +122,7 @@ type VersionFile struct { path string } -// Exists returns true if a version.txt file exists on the filesystem. +// Exists returns true if a version.txt file exists on the file system. func (v *VersionFile) Exists() (bool, error) { return exists(v.path) } diff --git a/cluster/kubemark/gce/config-default.sh b/cluster/kubemark/gce/config-default.sh index e80feb79bd1..9f0f9669257 100644 --- a/cluster/kubemark/gce/config-default.sh +++ b/cluster/kubemark/gce/config-default.sh @@ -66,6 +66,8 @@ ETCD_COMPACTION_INTERVAL_SEC="${KUBEMARK_ETCD_COMPACTION_INTERVAL_SEC:-}" # non-default version. ETCD_IMAGE="${TEST_ETCD_IMAGE:-}" ETCD_VERSION="${TEST_ETCD_VERSION:-}" +ETCD_SERVERS="${KUBEMARK_ETCD_SERVERS:-}" +ETCD_SERVERS_OVERRIDES="${KUBEMARK_ETCD_SERVERS_OVERRIDES:-}" # Storage backend. 'etcd2' and 'etcd3' are supported. STORAGE_BACKEND=${STORAGE_BACKEND:-} diff --git a/cmd/cloud-controller-manager/BUILD b/cmd/cloud-controller-manager/BUILD index e9f571902c3..e76c21f2211 100644 --- a/cmd/cloud-controller-manager/BUILD +++ b/cmd/cloud-controller-manager/BUILD @@ -23,9 +23,7 @@ go_library( "//pkg/client/metrics/prometheus:go_default_library", "//pkg/cloudprovider/providers:go_default_library", "//pkg/version/prometheus:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/util/flag:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/logs:go_default_library", - "//vendor/github.com/spf13/pflag:go_default_library", ], ) diff --git a/cmd/cloud-controller-manager/app/BUILD b/cmd/cloud-controller-manager/app/BUILD index 78e517a9b30..75d7f8b3351 100644 --- a/cmd/cloud-controller-manager/app/BUILD +++ b/cmd/cloud-controller-manager/app/BUILD @@ -9,6 +9,7 @@ go_library( "//cmd/cloud-controller-manager/app/config:go_default_library", "//cmd/cloud-controller-manager/app/options:go_default_library", "//cmd/controller-manager/app:go_default_library", + "//cmd/controller-manager/app/options:go_default_library", "//pkg/controller/cloud:go_default_library", "//pkg/controller/route:go_default_library", "//pkg/controller/service:go_default_library", @@ -21,6 +22,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/server:go_default_library", "//staging/src/k8s.io/apiserver/pkg/server/healthz:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/flag:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/globalflag:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/tools/leaderelection:go_default_library", "//staging/src/k8s.io/client-go/tools/leaderelection/resourcelock:go_default_library", diff --git a/cmd/cloud-controller-manager/app/controllermanager.go b/cmd/cloud-controller-manager/app/controllermanager.go index 99cf6741926..73f99ecf778 100644 --- a/cmd/cloud-controller-manager/app/controllermanager.go +++ b/cmd/cloud-controller-manager/app/controllermanager.go @@ -25,20 +25,22 @@ import ( "time" "github.com/spf13/cobra" - "k8s.io/klog" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/server" "k8s.io/apiserver/pkg/server/healthz" apiserverflag "k8s.io/apiserver/pkg/util/flag" + "k8s.io/apiserver/pkg/util/globalflag" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/leaderelection" "k8s.io/client-go/tools/leaderelection/resourcelock" cloudprovider "k8s.io/cloud-provider" + "k8s.io/klog" cloudcontrollerconfig "k8s.io/kubernetes/cmd/cloud-controller-manager/app/config" "k8s.io/kubernetes/cmd/cloud-controller-manager/app/options" genericcontrollermanager "k8s.io/kubernetes/cmd/controller-manager/app" + cmoptions "k8s.io/kubernetes/cmd/controller-manager/app/options" cloudcontrollers "k8s.io/kubernetes/pkg/controller/cloud" routecontroller "k8s.io/kubernetes/pkg/controller/route" servicecontroller "k8s.io/kubernetes/pkg/controller/service" @@ -86,6 +88,9 @@ the cloud specific control loops shipped with Kubernetes.`, fs := cmd.Flags() namedFlagSets := s.Flags() + verflag.AddFlags(namedFlagSets.FlagSet("global")) + globalflag.AddGlobalFlags(namedFlagSets.FlagSet("global"), cmd.Name()) + cmoptions.AddCustomGlobalFlags(namedFlagSets.FlagSet("generic")) for _, f := range namedFlagSets.FlagSets { fs.AddFlagSet(f) } diff --git a/cmd/cloud-controller-manager/app/options/options.go b/cmd/cloud-controller-manager/app/options/options.go index 27c60157a29..5e2a5979493 100644 --- a/cmd/cloud-controller-manager/app/options/options.go +++ b/cmd/cloud-controller-manager/app/options/options.go @@ -143,7 +143,7 @@ func (o *CloudControllerManagerOptions) Flags() apiserverflag.NamedFlagSets { fs.StringVar(&o.Kubeconfig, "kubeconfig", o.Kubeconfig, "Path to kubeconfig file with authorization and master location information.") fs.DurationVar(&o.NodeStatusUpdateFrequency.Duration, "node-status-update-frequency", o.NodeStatusUpdateFrequency.Duration, "Specifies how often the controller updates nodes' status.") - utilfeature.DefaultFeatureGate.AddFlag(fss.FlagSet("generic")) + utilfeature.DefaultMutableFeatureGate.AddFlag(fss.FlagSet("generic")) return fss } diff --git a/cmd/cloud-controller-manager/app/testing/testserver.go b/cmd/cloud-controller-manager/app/testing/testserver.go index 7f470ac38d7..e5a3e9e5bad 100644 --- a/cmd/cloud-controller-manager/app/testing/testserver.go +++ b/cmd/cloud-controller-manager/app/testing/testserver.go @@ -113,9 +113,10 @@ func StartTestServer(t Logger, customFlags []string) (result TestServer, err err return result, fmt.Errorf("failed to create config from options: %v", err) } + errCh := make(chan error) go func(stopCh <-chan struct{}) { if err := app.Run(config.Complete(), stopCh); err != nil { - t.Errorf("cloud-apiserver failed run: %v", err) + errCh <- err } }(stopCh) @@ -125,6 +126,12 @@ func StartTestServer(t Logger, customFlags []string) (result TestServer, err err return result, fmt.Errorf("failed to create a client: %v", err) } err = wait.Poll(100*time.Millisecond, 30*time.Second, func() (bool, error) { + select { + case err := <-errCh: + return false, err + default: + } + result := client.CoreV1().RESTClient().Get().AbsPath("/healthz").Do() status := 0 result.StatusCode(&status) diff --git a/cmd/cloud-controller-manager/controller-manager.go b/cmd/cloud-controller-manager/controller-manager.go index 39eacd52d61..280069e4d8f 100644 --- a/cmd/cloud-controller-manager/controller-manager.go +++ b/cmd/cloud-controller-manager/controller-manager.go @@ -20,13 +20,11 @@ limitations under the License. package main import ( - goflag "flag" "fmt" "math/rand" "os" "time" - utilflag "k8s.io/apiserver/pkg/util/flag" "k8s.io/apiserver/pkg/util/logs" "k8s.io/kubernetes/cmd/cloud-controller-manager/app" _ "k8s.io/kubernetes/pkg/client/metrics/prometheus" // for client metric registration @@ -34,8 +32,6 @@ import ( // implementing an out-of-tree cloud-provider. _ "k8s.io/kubernetes/pkg/cloudprovider/providers" _ "k8s.io/kubernetes/pkg/version/prometheus" // for version metric registration - - "github.com/spf13/pflag" ) func main() { @@ -46,8 +42,6 @@ func main() { // TODO: once we switch everything over to Cobra commands, we can go back to calling // utilflag.InitFlags() (by removing its pflag.Parse() call). For now, we have to set the // normalize func and add the go flag set by hand. - pflag.CommandLine.SetNormalizeFunc(utilflag.WordSepNormalizeFunc) - pflag.CommandLine.AddGoFlagSet(goflag.CommandLine) // utilflag.InitFlags() logs.InitLogs() defer logs.FlushLogs() diff --git a/cmd/controller-manager/app/options/BUILD b/cmd/controller-manager/app/options/BUILD index 81939e51df2..aa5744e335a 100644 --- a/cmd/controller-manager/app/options/BUILD +++ b/cmd/controller-manager/app/options/BUILD @@ -1,4 +1,4 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -6,6 +6,7 @@ go_library( "cloudprovider.go", "debugging.go", "generic.go", + "globalflags.go", "kubecloudshared.go", "servicecontroller.go", ], @@ -13,12 +14,14 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/client/leaderelectionconfig:go_default_library", + "//pkg/cloudprovider/providers:go_default_library", "//pkg/controller/apis/config:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/config:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apiserver/pkg/apis/config:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/flag:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/globalflag:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", ], ) @@ -36,3 +39,14 @@ filegroup( tags = ["automanaged"], visibility = ["//visibility:public"], ) + +go_test( + name = "go_default_test", + srcs = ["globalflags_test.go"], + embed = [":go_default_library"], + deps = [ + "//staging/src/k8s.io/apiserver/pkg/util/flag:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/globalflag:go_default_library", + "//vendor/github.com/spf13/pflag:go_default_library", + ], +) diff --git a/cmd/controller-manager/app/options/globalflags.go b/cmd/controller-manager/app/options/globalflags.go new file mode 100644 index 00000000000..b7a05ee2b06 --- /dev/null +++ b/cmd/controller-manager/app/options/globalflags.go @@ -0,0 +1,35 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "github.com/spf13/pflag" + + "k8s.io/apiserver/pkg/util/globalflag" + + // ensure libs have a chance to globally register their flags + _ "k8s.io/kubernetes/pkg/cloudprovider/providers" +) + +// AddCustomGlobalFlags explicitly registers flags that internal packages register +// against the global flagsets from "flag". We do this in order to prevent +// unwanted flags from leaking into the *-controller-manager's flagset. +func AddCustomGlobalFlags(fs *pflag.FlagSet) { + // lookup flags in global flag set and re-register the values with our flagset + // adds flags from k8s.io/kubernetes/pkg/cloudprovider/providers + globalflag.Register(fs, "cloud-provider-gce-lb-src-cidrs") +} diff --git a/cmd/controller-manager/app/options/globalflags_test.go b/cmd/controller-manager/app/options/globalflags_test.go new file mode 100644 index 00000000000..4c27d0223f7 --- /dev/null +++ b/cmd/controller-manager/app/options/globalflags_test.go @@ -0,0 +1,60 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "flag" + "reflect" + "sort" + "strings" + "testing" + + "github.com/spf13/pflag" + + apiserverflag "k8s.io/apiserver/pkg/util/flag" + "k8s.io/apiserver/pkg/util/globalflag" +) + +func TestAddCustomGlobalFlags(t *testing.T) { + namedFlagSets := &apiserverflag.NamedFlagSets{} + // Note that we will register all flags (including klog flags) into the same + // flag set. This allows us to test against all global flags from + // flags.CommandLine. + nfs := namedFlagSets.FlagSet("generic") + globalflag.AddGlobalFlags(nfs, "test-cmd") + AddCustomGlobalFlags(nfs) + + actualFlag := []string{} + nfs.VisitAll(func(flag *pflag.Flag) { + actualFlag = append(actualFlag, flag.Name) + }) + + // Get all flags from flags.CommandLine, except flag `test.*`. + wantedFlag := []string{"help"} + pflag.CommandLine.SetNormalizeFunc(apiserverflag.WordSepNormalizeFunc) + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + pflag.VisitAll(func(flag *pflag.Flag) { + if !strings.Contains(flag.Name, "test.") { + wantedFlag = append(wantedFlag, flag.Name) + } + }) + sort.Strings(wantedFlag) + + if !reflect.DeepEqual(wantedFlag, actualFlag) { + t.Errorf("Got different flags than expected: expected %+v, got %+v", wantedFlag, actualFlag) + } +} diff --git a/cmd/hyperkube/main.go b/cmd/hyperkube/main.go index 8cb1ec2ad42..617050a0cd1 100644 --- a/cmd/hyperkube/main.go +++ b/cmd/hyperkube/main.go @@ -85,7 +85,7 @@ func commandFor(basename string, defaultCommand *cobra.Command, commands []func( // NewHyperKubeCommand is the entry point for hyperkube func NewHyperKubeCommand(stopCh <-chan struct{}) (*cobra.Command, []func() *cobra.Command) { - // these have to be functions since the command is polymorphic. Cobra wants you to be top level + // these have to be functions since the command is polymorphic. Cobra wants you to be top level // command to get executed apiserver := func() *cobra.Command { ret := kubeapiserver.NewAPIServerCommand(stopCh) diff --git a/cmd/kube-apiserver/BUILD b/cmd/kube-apiserver/BUILD index 8b8d0438d18..5d7baa2632f 100644 --- a/cmd/kube-apiserver/BUILD +++ b/cmd/kube-apiserver/BUILD @@ -23,9 +23,7 @@ go_library( "//pkg/client/metrics/prometheus:go_default_library", "//pkg/version/prometheus:go_default_library", "//staging/src/k8s.io/apiserver/pkg/server:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/util/flag:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/logs:go_default_library", - "//vendor/github.com/spf13/pflag:go_default_library", ], ) diff --git a/cmd/kube-apiserver/apiserver.go b/cmd/kube-apiserver/apiserver.go index 22148596e66..a73077593ea 100644 --- a/cmd/kube-apiserver/apiserver.go +++ b/cmd/kube-apiserver/apiserver.go @@ -19,16 +19,12 @@ limitations under the License. package main import ( - goflag "flag" "fmt" "math/rand" "os" "time" - "github.com/spf13/pflag" - "k8s.io/apiserver/pkg/server" - utilflag "k8s.io/apiserver/pkg/util/flag" "k8s.io/apiserver/pkg/util/logs" "k8s.io/kubernetes/cmd/kube-apiserver/app" _ "k8s.io/kubernetes/pkg/client/metrics/prometheus" // for client metric registration @@ -43,8 +39,6 @@ func main() { // TODO: once we switch everything over to Cobra commands, we can go back to calling // utilflag.InitFlags() (by removing its pflag.Parse() call). For now, we have to set the // normalize func and add the go flag set by hand. - pflag.CommandLine.SetNormalizeFunc(utilflag.WordSepNormalizeFunc) - pflag.CommandLine.AddGoFlagSet(goflag.CommandLine) // utilflag.InitFlags() logs.InitLogs() defer logs.FlushLogs() diff --git a/cmd/kube-apiserver/app/BUILD b/cmd/kube-apiserver/app/BUILD index 8ec3a3c952d..a2098f5b418 100644 --- a/cmd/kube-apiserver/app/BUILD +++ b/cmd/kube-apiserver/app/BUILD @@ -57,6 +57,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/storage/etcd3/preflight:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/flag:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/globalflag:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/webhook:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", diff --git a/cmd/kube-apiserver/app/options/BUILD b/cmd/kube-apiserver/app/options/BUILD index 770d36dbb8e..0fe1c6159f6 100644 --- a/cmd/kube-apiserver/app/options/BUILD +++ b/cmd/kube-apiserver/app/options/BUILD @@ -9,6 +9,7 @@ load( go_library( name = "go_default_library", srcs = [ + "globalflags.go", "options.go", "validation.go", ], @@ -16,6 +17,7 @@ go_library( deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/core:go_default_library", + "//pkg/cloudprovider/providers:go_default_library", "//pkg/features:go_default_library", "//pkg/kubeapiserver/options:go_default_library", "//pkg/kubelet/client:go_default_library", @@ -24,17 +26,23 @@ go_library( "//pkg/serviceaccount:go_default_library", "//staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", "//staging/src/k8s.io/apiserver/pkg/server/options:go_default_library", "//staging/src/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/flag:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/globalflag:go_default_library", "//staging/src/k8s.io/kube-aggregator/pkg/apiserver/scheme:go_default_library", + "//vendor/github.com/spf13/pflag:go_default_library", ], ) go_test( name = "go_default_test", - srcs = ["options_test.go"], + srcs = [ + "globalflags_test.go", + "options_test.go", + ], embed = [":go_default_library"], deps = [ "//pkg/api/legacyscheme:go_default_library", @@ -46,6 +54,7 @@ go_test( "//staging/src/k8s.io/apiserver/pkg/server/options:go_default_library", "//staging/src/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/flag:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/globalflag:go_default_library", "//staging/src/k8s.io/apiserver/plugin/pkg/audit/buffered:go_default_library", "//staging/src/k8s.io/apiserver/plugin/pkg/audit/truncate:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", diff --git a/cmd/kube-apiserver/app/options/globalflags.go b/cmd/kube-apiserver/app/options/globalflags.go new file mode 100644 index 00000000000..96c955ac0bd --- /dev/null +++ b/cmd/kube-apiserver/app/options/globalflags.go @@ -0,0 +1,41 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "github.com/spf13/pflag" + + "k8s.io/apiserver/pkg/util/globalflag" + + // ensure libs have a chance to globally register their flags + _ "k8s.io/apiserver/pkg/admission" + _ "k8s.io/kubernetes/pkg/cloudprovider/providers" +) + +// AddCustomGlobalFlags explicitly registers flags that internal packages register +// against the global flagsets from "flag". We do this in order to prevent +// unwanted flags from leaking into the kube-apiserver's flagset. +func AddCustomGlobalFlags(fs *pflag.FlagSet) { + // Lookup flags in global flag set and re-register the values with our flagset. + + // Adds flags from k8s.io/kubernetes/pkg/cloudprovider/providers. + globalflag.Register(fs, "cloud-provider-gce-lb-src-cidrs") + + // Adds flags from k8s.io/apiserver/pkg/admission. + globalflag.Register(fs, "default-not-ready-toleration-seconds") + globalflag.Register(fs, "default-unreachable-toleration-seconds") +} diff --git a/cmd/kube-apiserver/app/options/globalflags_test.go b/cmd/kube-apiserver/app/options/globalflags_test.go new file mode 100644 index 00000000000..0c3c1b843b3 --- /dev/null +++ b/cmd/kube-apiserver/app/options/globalflags_test.go @@ -0,0 +1,61 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "flag" + "reflect" + "sort" + "strings" + "testing" + + "github.com/spf13/pflag" + + apiserverflag "k8s.io/apiserver/pkg/util/flag" + "k8s.io/apiserver/pkg/util/globalflag" +) + +func TestAddCustomGlobalFlags(t *testing.T) { + namedFlagSets := &apiserverflag.NamedFlagSets{} + + // Note that we will register all flags (including klog flags) into the same + // flag set. This allows us to test against all global flags from + // flags.CommandLine. + nfs := namedFlagSets.FlagSet("test") + globalflag.AddGlobalFlags(nfs, "test-cmd") + AddCustomGlobalFlags(nfs) + + actualFlag := []string{} + nfs.VisitAll(func(flag *pflag.Flag) { + actualFlag = append(actualFlag, flag.Name) + }) + + // Get all flags from flags.CommandLine, except flag `test.*`. + wantedFlag := []string{"help"} + pflag.CommandLine.SetNormalizeFunc(apiserverflag.WordSepNormalizeFunc) + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + pflag.VisitAll(func(flag *pflag.Flag) { + if !strings.Contains(flag.Name, "test.") { + wantedFlag = append(wantedFlag, flag.Name) + } + }) + sort.Strings(wantedFlag) + + if !reflect.DeepEqual(wantedFlag, actualFlag) { + t.Errorf("[Default]: expected %+v, got %+v", wantedFlag, actualFlag) + } +} diff --git a/cmd/kube-apiserver/app/server.go b/cmd/kube-apiserver/app/server.go index e913571f9b5..0151fa1b7fb 100644 --- a/cmd/kube-apiserver/app/server.go +++ b/cmd/kube-apiserver/app/server.go @@ -50,6 +50,7 @@ import ( serverstorage "k8s.io/apiserver/pkg/server/storage" "k8s.io/apiserver/pkg/storage/etcd3/preflight" apiserverflag "k8s.io/apiserver/pkg/util/flag" + "k8s.io/apiserver/pkg/util/globalflag" "k8s.io/apiserver/pkg/util/webhook" clientgoinformers "k8s.io/client-go/informers" clientgoclientset "k8s.io/client-go/kubernetes" @@ -117,6 +118,9 @@ cluster's shared state through which all other components interact.`, fs := cmd.Flags() namedFlagSets := s.Flags() + verflag.AddFlags(namedFlagSets.FlagSet("global")) + globalflag.AddGlobalFlags(namedFlagSets.FlagSet("global"), cmd.Name()) + options.AddCustomGlobalFlags(namedFlagSets.FlagSet("generic")) for _, f := range namedFlagSets.FlagSets { fs.AddFlagSet(f) } diff --git a/cmd/kube-apiserver/app/testing/testserver.go b/cmd/kube-apiserver/app/testing/testserver.go index 21e13576fc2..d9cde3599de 100644 --- a/cmd/kube-apiserver/app/testing/testserver.go +++ b/cmd/kube-apiserver/app/testing/testserver.go @@ -145,9 +145,10 @@ func StartTestServer(t Logger, instanceOptions *TestServerInstanceOptions, custo if err != nil { return result, fmt.Errorf("failed to create server chain: %v", err) } + errCh := make(chan error) go func(stopCh <-chan struct{}) { if err := server.PrepareRun().Run(stopCh); err != nil { - t.Errorf("kube-apiserver failed run: %v", err) + errCh <- err } }(stopCh) @@ -158,6 +159,12 @@ func StartTestServer(t Logger, instanceOptions *TestServerInstanceOptions, custo return result, fmt.Errorf("failed to create a client: %v", err) } err = wait.Poll(100*time.Millisecond, 30*time.Second, func() (bool, error) { + select { + case err := <-errCh: + return false, err + default: + } + result := client.CoreV1().RESTClient().Get().AbsPath("/healthz").Do() status := 0 result.StatusCode(&status) diff --git a/cmd/kube-controller-manager/BUILD b/cmd/kube-controller-manager/BUILD index 20874148aa1..e21b302681b 100644 --- a/cmd/kube-controller-manager/BUILD +++ b/cmd/kube-controller-manager/BUILD @@ -24,9 +24,7 @@ go_library( "//pkg/util/reflector/prometheus:go_default_library", "//pkg/util/workqueue/prometheus:go_default_library", "//pkg/version/prometheus:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/util/flag:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/logs:go_default_library", - "//vendor/github.com/spf13/pflag:go_default_library", ], ) diff --git a/cmd/kube-controller-manager/app/BUILD b/cmd/kube-controller-manager/app/BUILD index 2fa0f43dc78..f4fdeb6af49 100644 --- a/cmd/kube-controller-manager/app/BUILD +++ b/cmd/kube-controller-manager/app/BUILD @@ -20,6 +20,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//cmd/controller-manager/app:go_default_library", + "//cmd/controller-manager/app/options:go_default_library", "//cmd/kube-controller-manager/app/config:go_default_library", "//cmd/kube-controller-manager/app/options:go_default_library", "//pkg/apis/apps/install:go_default_library", @@ -117,6 +118,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/server/mux:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/flag:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/globalflag:go_default_library", "//staging/src/k8s.io/client-go/discovery/cached:go_default_library", "//staging/src/k8s.io/client-go/dynamic:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library", diff --git a/cmd/kube-controller-manager/app/controllermanager.go b/cmd/kube-controller-manager/app/controllermanager.go index 5e686092194..2cf1ddf932f 100644 --- a/cmd/kube-controller-manager/app/controllermanager.go +++ b/cmd/kube-controller-manager/app/controllermanager.go @@ -30,7 +30,6 @@ import ( "time" "github.com/spf13/cobra" - "k8s.io/klog" "k8s.io/apimachinery/pkg/runtime/schema" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -41,6 +40,7 @@ import ( "k8s.io/apiserver/pkg/server/healthz" "k8s.io/apiserver/pkg/server/mux" apiserverflag "k8s.io/apiserver/pkg/util/flag" + "k8s.io/apiserver/pkg/util/globalflag" cacheddiscovery "k8s.io/client-go/discovery/cached" "k8s.io/client-go/informers" restclient "k8s.io/client-go/rest" @@ -49,7 +49,9 @@ import ( "k8s.io/client-go/tools/leaderelection/resourcelock" certutil "k8s.io/client-go/util/cert" cloudprovider "k8s.io/cloud-provider" + "k8s.io/klog" genericcontrollermanager "k8s.io/kubernetes/cmd/controller-manager/app" + cmoptions "k8s.io/kubernetes/cmd/controller-manager/app/options" "k8s.io/kubernetes/cmd/kube-controller-manager/app/config" "k8s.io/kubernetes/cmd/kube-controller-manager/app/options" "k8s.io/kubernetes/pkg/controller" @@ -112,6 +114,9 @@ controller, and serviceaccounts controller.`, fs := cmd.Flags() namedFlagSets := s.Flags(KnownControllers(), ControllersDisabledByDefault.List()) + verflag.AddFlags(namedFlagSets.FlagSet("global")) + globalflag.AddGlobalFlags(namedFlagSets.FlagSet("global"), cmd.Name()) + cmoptions.AddCustomGlobalFlags(namedFlagSets.FlagSet("generic")) for _, f := range namedFlagSets.FlagSets { fs.AddFlagSet(f) } diff --git a/cmd/kube-controller-manager/app/options/BUILD b/cmd/kube-controller-manager/app/options/BUILD index 9d14caa78f7..3f3330f7dfa 100644 --- a/cmd/kube-controller-manager/app/options/BUILD +++ b/cmd/kube-controller-manager/app/options/BUILD @@ -34,20 +34,19 @@ go_library( deps = [ "//cmd/controller-manager/app/options:go_default_library", "//cmd/kube-controller-manager/app/config:go_default_library", - "//pkg/api/legacyscheme:go_default_library", "//pkg/controller/apis/config:go_default_library", - "//pkg/controller/apis/config/v1alpha1:go_default_library", + "//pkg/controller/apis/config/scheme:go_default_library", "//pkg/controller/garbagecollector:go_default_library", "//pkg/features:go_default_library", "//pkg/master/ports:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apiserver/pkg/server/options:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/flag:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library", diff --git a/cmd/kube-controller-manager/app/options/options.go b/cmd/kube-controller-manager/app/options/options.go index a93d3fd8fae..21f6dc050eb 100644 --- a/cmd/kube-controller-manager/app/options/options.go +++ b/cmd/kube-controller-manager/app/options/options.go @@ -23,12 +23,12 @@ import ( "net" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" utilerrors "k8s.io/apimachinery/pkg/util/errors" apiserveroptions "k8s.io/apiserver/pkg/server/options" utilfeature "k8s.io/apiserver/pkg/util/feature" apiserverflag "k8s.io/apiserver/pkg/util/flag" clientset "k8s.io/client-go/kubernetes" + clientgokubescheme "k8s.io/client-go/kubernetes/scheme" v1core "k8s.io/client-go/kubernetes/typed/core/v1" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" @@ -36,9 +36,8 @@ import ( kubectrlmgrconfigv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1" cmoptions "k8s.io/kubernetes/cmd/controller-manager/app/options" kubecontrollerconfig "k8s.io/kubernetes/cmd/kube-controller-manager/app/config" - "k8s.io/kubernetes/pkg/api/legacyscheme" kubectrlmgrconfig "k8s.io/kubernetes/pkg/controller/apis/config" - kubectrlmgrschemev1alpha1 "k8s.io/kubernetes/pkg/controller/apis/config/v1alpha1" + kubectrlmgrconfigscheme "k8s.io/kubernetes/pkg/controller/apis/config/scheme" "k8s.io/kubernetes/pkg/controller/garbagecollector" "k8s.io/kubernetes/pkg/master/ports" @@ -207,19 +206,11 @@ func NewKubeControllerManagerOptions() (*KubeControllerManagerOptions, error) { // NewDefaultComponentConfig returns kube-controller manager configuration object. func NewDefaultComponentConfig(insecurePort int32) (kubectrlmgrconfig.KubeControllerManagerConfiguration, error) { - scheme := runtime.NewScheme() - if err := kubectrlmgrschemev1alpha1.AddToScheme(scheme); err != nil { - return kubectrlmgrconfig.KubeControllerManagerConfiguration{}, err - } - if err := kubectrlmgrconfig.AddToScheme(scheme); err != nil { - return kubectrlmgrconfig.KubeControllerManagerConfiguration{}, err - } - versioned := kubectrlmgrconfigv1alpha1.KubeControllerManagerConfiguration{} - scheme.Default(&versioned) + kubectrlmgrconfigscheme.Scheme.Default(&versioned) internal := kubectrlmgrconfig.KubeControllerManagerConfiguration{} - if err := scheme.Convert(&versioned, &internal, nil); err != nil { + if err := kubectrlmgrconfigscheme.Scheme.Convert(&versioned, &internal, nil); err != nil { return internal, err } internal.Generic.Port = insecurePort @@ -261,7 +252,7 @@ func (s *KubeControllerManagerOptions) Flags(allControllers []string, disabledBy fs := fss.FlagSet("misc") fs.StringVar(&s.Master, "master", s.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig).") fs.StringVar(&s.Kubeconfig, "kubeconfig", s.Kubeconfig, "Path to kubeconfig file with authorization and master location information.") - utilfeature.DefaultFeatureGate.AddFlag(fss.FlagSet("generic")) + utilfeature.DefaultMutableFeatureGate.AddFlag(fss.FlagSet("generic")) return fss } @@ -440,6 +431,5 @@ func createRecorder(kubeClient clientset.Interface, userAgent string) record.Eve eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(klog.Infof) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")}) - // TODO: remove dependency on the legacyscheme - return eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: userAgent}) + return eventBroadcaster.NewRecorder(clientgokubescheme.Scheme, v1.EventSource{Component: userAgent}) } diff --git a/cmd/kube-controller-manager/app/testing/testserver.go b/cmd/kube-controller-manager/app/testing/testserver.go index 34ba72c5dbf..b864e120ea1 100644 --- a/cmd/kube-controller-manager/app/testing/testserver.go +++ b/cmd/kube-controller-manager/app/testing/testserver.go @@ -114,9 +114,10 @@ func StartTestServer(t Logger, customFlags []string) (result TestServer, err err return result, fmt.Errorf("failed to create config from options: %v", err) } + errCh := make(chan error) go func(stopCh <-chan struct{}) { if err := app.Run(config.Complete(), stopCh); err != nil { - t.Errorf("kube-apiserver failed run: %v", err) + errCh <- err } }(stopCh) @@ -126,6 +127,12 @@ func StartTestServer(t Logger, customFlags []string) (result TestServer, err err return result, fmt.Errorf("failed to create a client: %v", err) } err = wait.Poll(100*time.Millisecond, 30*time.Second, func() (bool, error) { + select { + case err := <-errCh: + return false, err + default: + } + result := client.CoreV1().RESTClient().Get().AbsPath("/healthz").Do() status := 0 result.StatusCode(&status) diff --git a/cmd/kube-controller-manager/controller-manager.go b/cmd/kube-controller-manager/controller-manager.go index 8dd1f29883f..7ae1192f97d 100644 --- a/cmd/kube-controller-manager/controller-manager.go +++ b/cmd/kube-controller-manager/controller-manager.go @@ -21,15 +21,11 @@ limitations under the License. package main import ( - goflag "flag" "fmt" "math/rand" "os" "time" - "github.com/spf13/pflag" - - utilflag "k8s.io/apiserver/pkg/util/flag" "k8s.io/apiserver/pkg/util/logs" "k8s.io/kubernetes/cmd/kube-controller-manager/app" _ "k8s.io/kubernetes/pkg/client/metrics/prometheus" // for client metric registration @@ -46,8 +42,6 @@ func main() { // TODO: once we switch everything over to Cobra commands, we can go back to calling // utilflag.InitFlags() (by removing its pflag.Parse() call). For now, we have to set the // normalize func and add the go flag set by hand. - pflag.CommandLine.SetNormalizeFunc(utilflag.WordSepNormalizeFunc) - pflag.CommandLine.AddGoFlagSet(goflag.CommandLine) // utilflag.InitFlags() logs.InitLogs() defer logs.FlushLogs() diff --git a/cmd/kube-proxy/app/server.go b/cmd/kube-proxy/app/server.go index 04189b98d30..e1774499657 100644 --- a/cmd/kube-proxy/app/server.go +++ b/cmd/kube-proxy/app/server.go @@ -208,7 +208,7 @@ func (o *Options) Complete() error { return err } - if err := utilfeature.DefaultFeatureGate.SetFromMap(o.config.FeatureGates); err != nil { + if err := utilfeature.DefaultMutableFeatureGate.SetFromMap(o.config.FeatureGates); err != nil { return err } diff --git a/cmd/kube-scheduler/app/options/insecure_serving.go b/cmd/kube-scheduler/app/options/insecure_serving.go index db3c7351100..6f6f93591d2 100644 --- a/cmd/kube-scheduler/app/options/insecure_serving.go +++ b/cmd/kube-scheduler/app/options/insecure_serving.go @@ -157,7 +157,7 @@ func (o *CombinedInsecureServingOptions) Validate() []error { } if len(o.BindAddress) > 0 && net.ParseIP(o.BindAddress) == nil { - errors = append(errors, fmt.Errorf("--address has no valid IP address")) + errors = append(errors, fmt.Errorf("--address %v is an invalid IP address", o.BindAddress)) } return errors diff --git a/cmd/kube-scheduler/app/options/options.go b/cmd/kube-scheduler/app/options/options.go index 15c79acd7c5..972a6b7f09b 100644 --- a/cmd/kube-scheduler/app/options/options.go +++ b/cmd/kube-scheduler/app/options/options.go @@ -104,6 +104,7 @@ func NewOptions() (*Options, error) { }, } + o.Authentication.TolerateInClusterLookupFailure = true o.Authentication.RemoteKubeConfigFileOptional = true o.Authorization.RemoteKubeConfigFileOptional = true o.Authorization.AlwaysAllowPaths = []string{"/healthz"} @@ -152,7 +153,7 @@ func (o *Options) Flags() (nfs apiserverflag.NamedFlagSets) { o.Deprecated.AddFlags(nfs.FlagSet("deprecated"), &o.ComponentConfig) leaderelectionconfig.BindFlags(&o.ComponentConfig.LeaderElection.LeaderElectionConfiguration, nfs.FlagSet("leader election")) - utilfeature.DefaultFeatureGate.AddFlag(nfs.FlagSet("feature gate")) + utilfeature.DefaultMutableFeatureGate.AddFlag(nfs.FlagSet("feature gate")) return nfs } diff --git a/cmd/kubeadm/.import-restrictions b/cmd/kubeadm/.import-restrictions index 988c92e551e..b2ceea03ec9 100644 --- a/cmd/kubeadm/.import-restrictions +++ b/cmd/kubeadm/.import-restrictions @@ -71,7 +71,7 @@ "k8s.io/kubernetes/pkg/registry/core/service/ipallocator", "k8s.io/kubernetes/pkg/scheduler/algorithm", "k8s.io/kubernetes/pkg/scheduler/api", - "k8s.io/kubernetes/pkg/scheduler/cache", + "k8s.io/kubernetes/pkg/scheduler/nodeinfo", "k8s.io/kubernetes/pkg/scheduler/internal/cache", "k8s.io/kubernetes/pkg/scheduler/util", "k8s.io/kubernetes/pkg/security/apparmor", diff --git a/cmd/kubeadm/OWNERS b/cmd/kubeadm/OWNERS index 6a0eda43c3b..7a6ea030db8 100644 --- a/cmd/kubeadm/OWNERS +++ b/cmd/kubeadm/OWNERS @@ -2,19 +2,20 @@ approvers: - luxas - timothysc - fabriziopandini +- neolit123 reviewers: - luxas - timothysc - fabriziopandini +- neolit123 - kad -- xiangpengzhao -- stealthybox - liztio - chuckha - detiber - dixudx -- neolit123 -# Might want to add @kargakis, @jamiehannaford, @krousey and/or @dmmcquay back in the future +- rosti +- yagonobre +# Might want to add @xiangpengzhao and @stealthybox back in the future labels: - area/kubeadm - sig/cluster-lifecycle diff --git a/cmd/kubeadm/app/apis/kubeadm/v1beta1/doc.go b/cmd/kubeadm/app/apis/kubeadm/v1beta1/doc.go index 892c12bfbcd..e88276027bb 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1beta1/doc.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1beta1/doc.go @@ -19,11 +19,11 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:conversion-gen=k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm -// Package v1beta1 defines the v1beta1 version of the kubeadm config file format. -// This version graduates the kubeadm config to BETA and is a big step towards GA. +// Package v1beta1 defines the v1beta1 version of the kubeadm configuration file format. +// This version graduates the configuration format to BETA and is a big step towards GA. // //A list of changes since v1alpha3: -// - "apiServerEndpoint" in InitConfiguration was renamed to "localAPIServerEndpoint" for better clarity of what the field +// - "apiServerEndpoint" in InitConfiguration was renamed to "localAPIEndpoint" for better clarity of what the field // represents. // - Common fields in ClusterConfiguration such as "*extraArgs" and "*extraVolumes" for control plane components are now moved // under component structs - i.e. "apiServer", "controllerManager", "scheduler". @@ -33,7 +33,7 @@ limitations under the License. // - "featureGates" still exists under ClusterConfiguration, but there are no supported feature gates in 1.13. // See the Kubernetes 1.13 changelog for further details. // - Both "localEtcd" and "dns" configurations now support custom image repositories. -// - the "controlPlane*" related fields in JoinConfiguration were refactored into a sub structure. +// - The "controlPlane*"-related fields in JoinConfiguration were refactored into a sub-structure. // - "clusterName" was removed from JoinConfiguration and the name is now fetched from the existing cluster. // // Migration from old kubeadm config versions @@ -53,27 +53,26 @@ limitations under the License. // // A kubeadm config file could contain multiple configuration types separated using three dashes (“---”). // -// The kubeadm config print-defaults command print the default values for all the kubeadm supported configuration types. +// kubeadm supports the following configuration types: // // apiVersion: kubeadm.k8s.io/v1beta1 // kind: InitConfiguration -// ... -// --- +// // apiVersion: kubeadm.k8s.io/v1beta1 // kind: ClusterConfiguration -// ... -// --- +// // apiVersion: kubelet.config.k8s.io/v1beta1 // kind: KubeletConfiguration -// ... -// --- +// // apiVersion: kubeproxy.config.k8s.io/v1alpha1 // kind: KubeProxyConfiguration -// ... -// --- +// // apiVersion: kubeadm.k8s.io/v1beta1 // kind: JoinConfiguration -// ... +// +// To print the defaults for "init" and "join" actions use the following commands: +// kubeadm config print init-defaults +// kubeadm config print join-defaults // // The list of configuration types that must be included in a configuration file depends by the action you are // performing (init or join) and by the configuration options you are going to use (defaults or advanced customization). @@ -100,8 +99,6 @@ limitations under the License. // ... // nodeRegistration: // ... -// localApiEndpoint: -// ... // // The InitConfiguration type should be used to configure runtime settings, that in case of kubeadm init // are the configuration of the bootstrap token and all the setting which are specific to the node where kubeadm @@ -157,7 +154,7 @@ limitations under the License. // deployed in the cluster. If this object is not provided or provided only partially, kubeadm applies defaults. // // See https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/ or https://godoc.org/k8s.io/kubelet/config/v1beta1#KubeletConfiguration -// for kube proxy official documentation. +// for kubelet official documentation. // // Here is a fully populated example of a single YAML file containing multiple // configuration types to be used during a `kubeadm init` run. @@ -171,9 +168,10 @@ limitations under the License. // - token: "783bde.3f89s0fje9f38fhf" // description: "another bootstrap token" // usages: +// - authentication // - signing // groups: -// - system:anonymous +// - system:bootstrappers:kubeadm:default-node-token // nodeRegistration: // name: "ec2-10-100-0-1" // criSocket: "/var/run/dockershim.sock" @@ -183,7 +181,7 @@ limitations under the License. // effect: "NoSchedule" // kubeletExtraArgs: // cgroupDriver: "cgroupfs" -// localApiEndpoint: +// localAPIEndpoint: // advertiseAddress: "10.100.0.1" // bindPort: 6443 // --- @@ -192,7 +190,8 @@ limitations under the License. // etcd: // # one of local or external // local: -// image: "k8s.gcr.io/etcd-amd64:3.2.18" +// imageRepository: "k8s.gcr.io" +// imageTag: "3.2.24" // dataDir: "/var/lib/etcd" // extraArgs: // listen-client-urls: "http://10.100.0.1:2379" @@ -200,54 +199,62 @@ limitations under the License. // - "ec2-10-100-0-1.compute-1.amazonaws.com" // peerCertSANs: // - "10.100.0.1" -// external: -// endpoints: -// - "10.100.0.1:2379" -// - "10.100.0.2:2379" -// caFile: "/etcd/kubernetes/pki/etcd/etcd-ca.crt" -// certFile: "/etcd/kubernetes/pki/etcd/etcd.crt" -// certKey: "/etcd/kubernetes/pki/etcd/etcd.key" +// # external: +// # endpoints: +// # - "10.100.0.1:2379" +// # - "10.100.0.2:2379" +// # caFile: "/etcd/kubernetes/pki/etcd/etcd-ca.crt" +// # certFile: "/etcd/kubernetes/pki/etcd/etcd.crt" +// # keyFile: "/etcd/kubernetes/pki/etcd/etcd.key" // networking: // serviceSubnet: "10.96.0.0/12" // podSubnet: "10.100.0.1/24" // dnsDomain: "cluster.local" // kubernetesVersion: "v1.12.0" // controlPlaneEndpoint: "10.100.0.1:6443" -// apiServer: -// extraArgs: -// authorization-mode: "Node,RBAC" -// extraVolumes: -// - name: "some-volume" -// hostPath: "/etc/some-path" -// mountPath: "/etc/some-pod-path" -// readOnly: false -// pathType: File -// certSANs: -// - "10.100.1.1" -// - "ec2-10-100-0-1.compute-1.amazonaws.com" -// timeoutForControlPlane: 4m0s -// controllerManager: -// extraArgs: -// node-cidr-mask-size: 20 -// extraVolumes: -// - name: "some-volume" -// hostPath: "/etc/some-path" -// mountPath: "/etc/some-pod-path" -// readOnly: false -// pathType: File -// scheduler: -// extraArgs: -// address: "10.100.0.1" -// extraVolumes: -// - name: "some-volume" -// hostPath: "/etc/some-path" -// mountPath: "/etc/some-pod-path" -// readOnly: false -// pathType: File -// certificatesDir: "/etc/kubernetes/pki" -// imageRepository: "k8s.gcr.io" -// useHyperKubeImage: false -// clusterName: "example-cluster" +// apiServer: +// extraArgs: +// authorization-mode: "Node,RBAC" +// extraVolumes: +// - name: "some-volume" +// hostPath: "/etc/some-path" +// mountPath: "/etc/some-pod-path" +// readOnly: false +// pathType: File +// certSANs: +// - "10.100.1.1" +// - "ec2-10-100-0-1.compute-1.amazonaws.com" +// timeoutForControlPlane: 4m0s +// controllerManager: +// extraArgs: +// "node-cidr-mask-size": "20" +// extraVolumes: +// - name: "some-volume" +// hostPath: "/etc/some-path" +// mountPath: "/etc/some-pod-path" +// readOnly: false +// pathType: File +// scheduler: +// extraArgs: +// address: "10.100.0.1" +// extraVolumes: +// - name: "some-volume" +// hostPath: "/etc/some-path" +// mountPath: "/etc/some-pod-path" +// readOnly: false +// pathType: File +// certificatesDir: "/etc/kubernetes/pki" +// imageRepository: "k8s.gcr.io" +// useHyperKubeImage: false +// clusterName: "example-cluster" +// --- +// apiVersion: kubelet.config.k8s.io/v1beta1 +// kind: KubeletConfiguration +// # kubelet specific options here +// --- +// apiVersion: kubeproxy.config.k8s.io/v1alpha1 +// kind: KubeProxyConfiguration +// # kube-proxy specific options here // // Kubeadm join configuration types // diff --git a/cmd/kubeadm/app/apis/kubeadm/validation/validation.go b/cmd/kubeadm/app/apis/kubeadm/validation/validation.go index c97566b6ec8..77f12dea1c2 100644 --- a/cmd/kubeadm/app/apis/kubeadm/validation/validation.go +++ b/cmd/kubeadm/app/apis/kubeadm/validation/validation.go @@ -399,7 +399,7 @@ func ValidateMixedArguments(flag *pflag.FlagSet) error { mixedInvalidFlags := []string{} flag.Visit(func(f *pflag.Flag) { - if f.Name == "config" || f.Name == "ignore-preflight-errors" || strings.HasPrefix(f.Name, "skip-") || f.Name == "dry-run" || f.Name == "kubeconfig" || f.Name == "v" || f.Name == "rootfs" || f.Name == "print-join-command" { + if f.Name == "config" || f.Name == "ignore-preflight-errors" || strings.HasPrefix(f.Name, "skip-") || f.Name == "dry-run" || f.Name == "kubeconfig" || f.Name == "v" || f.Name == "rootfs" || f.Name == "print-join-command" || f.Name == "node-name" || f.Name == "cri-socket" { // "--skip-*" flags or other whitelisted flags can be set with --config return } diff --git a/cmd/kubeadm/app/cmd/alpha/BUILD b/cmd/kubeadm/app/cmd/alpha/BUILD index 1e2b35048c7..2046387f563 100644 --- a/cmd/kubeadm/app/cmd/alpha/BUILD +++ b/cmd/kubeadm/app/cmd/alpha/BUILD @@ -63,9 +63,9 @@ go_test( deps = [ "//cmd/kubeadm/app/constants:go_default_library", "//cmd/kubeadm/app/phases/certs:go_default_library", + "//cmd/kubeadm/app/util/certs:go_default_library", "//cmd/kubeadm/app/util/pkiutil:go_default_library", "//cmd/kubeadm/test:go_default_library", - "//cmd/kubeadm/test/certs:go_default_library", "//cmd/kubeadm/test/cmd:go_default_library", "//cmd/kubeadm/test/kubeconfig:go_default_library", "//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library", diff --git a/cmd/kubeadm/app/cmd/alpha/certs_test.go b/cmd/kubeadm/app/cmd/alpha/certs_test.go index 47798fd76cf..9e184a3509e 100644 --- a/cmd/kubeadm/app/cmd/alpha/certs_test.go +++ b/cmd/kubeadm/app/cmd/alpha/certs_test.go @@ -31,9 +31,9 @@ import ( "github.com/spf13/cobra" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs" + certstestutil "k8s.io/kubernetes/cmd/kubeadm/app/util/certs" "k8s.io/kubernetes/cmd/kubeadm/app/util/pkiutil" testutil "k8s.io/kubernetes/cmd/kubeadm/test" - certstestutil "k8s.io/kubernetes/cmd/kubeadm/test/certs" cmdtestutil "k8s.io/kubernetes/cmd/kubeadm/test/cmd" ) diff --git a/cmd/kubeadm/app/cmd/config.go b/cmd/kubeadm/app/cmd/config.go index 5f4ff01b87d..e4545c9f5c2 100644 --- a/cmd/kubeadm/app/cmd/config.go +++ b/cmd/kubeadm/app/cmd/config.go @@ -85,7 +85,6 @@ func NewCmdConfig(out io.Writer) *cobra.Command { kubeConfigFile = cmdutil.FindExistingKubeConfig(kubeConfigFile) cmd.AddCommand(NewCmdConfigPrint(out)) - cmd.AddCommand(NewCmdConfigPrintDefault(out)) cmd.AddCommand(NewCmdConfigMigrate(out)) cmd.AddCommand(NewCmdConfigUpload(out, &kubeConfigFile)) cmd.AddCommand(NewCmdConfigView(out, &kubeConfigFile)) @@ -142,7 +141,7 @@ func runConfigPrintActionDefaults(out io.Writer, componentConfigs []string, conf allBytes := [][]byte{initialConfig} for _, componentConfig := range componentConfigs { - cfgBytes, err := getDefaultComponentConfigAPIObjectBytes(componentConfig) + cfgBytes, err := getDefaultComponentConfigBytes(componentConfig) kubeadmutil.CheckErr(err) allBytes = append(allBytes, cfgBytes) } @@ -150,68 +149,23 @@ func runConfigPrintActionDefaults(out io.Writer, componentConfigs []string, conf fmt.Fprint(out, string(bytes.Join(allBytes, []byte(constants.YAMLDocumentSeparator)))) } -// NewCmdConfigPrintDefault returns cobra.Command for "kubeadm config print-default" command -func NewCmdConfigPrintDefault(out io.Writer) *cobra.Command { - apiObjects := []string{} - cmd := &cobra.Command{ - Use: "print-default", - Aliases: []string{"print-defaults"}, - Short: "Print the default values for a kubeadm configuration object.", - Long: fmt.Sprintf(dedent.Dedent(` - This command prints objects such as the default InitConfiguration that is used for 'kubeadm init' and 'kubeadm upgrade', - and the default JoinConfiguration object that is used for 'kubeadm join'. - - For documentation visit: https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha3 - - Note that sensitive values like the Bootstrap Token fields are replaced with placeholder values like %q in order to pass validation but - not perform the real computation for creating a token. - `), placeholderToken), - Deprecated: "Please, use `kubeadm config print` instead.", - Run: func(cmd *cobra.Command, args []string) { - if len(apiObjects) == 0 { - apiObjects = getSupportedAPIObjects() - } - allBytes := [][]byte{} - for _, apiObject := range apiObjects { - cfgBytes, err := getDefaultAPIObjectBytes(apiObject) - kubeadmutil.CheckErr(err) - allBytes = append(allBytes, cfgBytes) - } - fmt.Fprint(out, string(bytes.Join(allBytes, []byte(constants.YAMLDocumentSeparator)))) - }, - } - cmd.Flags().StringSliceVar(&apiObjects, "api-objects", apiObjects, - fmt.Sprintf("A comma-separated list for API objects to print the default values for. Available values: %v. This flag unset means 'print all known objects'", getAllAPIObjectNames())) - return cmd -} - -func getDefaultComponentConfigAPIObjectBytes(apiObject string) ([]byte, error) { +func getDefaultComponentConfigBytes(apiObject string) ([]byte, error) { registration, ok := componentconfigs.Known[componentconfigs.RegistrationKind(apiObject)] if !ok { return []byte{}, errors.Errorf("--component-configs needs to contain some of %v", getSupportedComponentConfigAPIObjects()) } - return getDefaultComponentConfigBytes(registration) -} -func getDefaultAPIObjectBytes(apiObject string) ([]byte, error) { - switch apiObject { - case constants.InitConfigurationKind: - return getDefaultInitConfigBytesByKind(constants.InitConfigurationKind) - - case constants.ClusterConfigurationKind: - return getDefaultInitConfigBytesByKind(constants.ClusterConfigurationKind) - - case constants.JoinConfigurationKind: - return getDefaultNodeConfigBytes() - - default: - // Is this a component config? - registration, ok := componentconfigs.Known[componentconfigs.RegistrationKind(apiObject)] - if !ok { - return []byte{}, errors.Errorf("--api-object needs to be one of %v", getAllAPIObjectNames()) - } - return getDefaultComponentConfigBytes(registration) + defaultedInitConfig, err := getDefaultedInitConfig() + if err != nil { + return []byte{}, err } + + realObj, ok := registration.GetFromInternalConfig(&defaultedInitConfig.ClusterConfiguration) + if !ok { + return []byte{}, errors.New("GetFromInternalConfig failed") + } + + return registration.Marshal(realObj) } // getSupportedComponentConfigAPIObjects returns all currently supported component config API object names @@ -223,23 +177,6 @@ func getSupportedComponentConfigAPIObjects() []string { return objects } -// getSupportedAPIObjects returns all currently supported API object names -func getSupportedAPIObjects() []string { - baseObjects := []string{constants.InitConfigurationKind, constants.ClusterConfigurationKind, constants.JoinConfigurationKind} - objects := getSupportedComponentConfigAPIObjects() - objects = append(objects, baseObjects...) - return objects -} - -// getAllAPIObjectNames returns currently supported API object names and their historical aliases -// NB. currently there is no historical supported API objects, but we keep this function for future changes -func getAllAPIObjectNames() []string { - historicAPIObjectAliases := []string{} - objects := getSupportedAPIObjects() - objects = append(objects, historicAPIObjectAliases...) - return objects -} - func getDefaultedInitConfig() (*kubeadmapi.InitConfiguration, error) { return configutil.ConfigFileAndDefaultsToInternalConfig("", &kubeadmapiv1beta1.InitConfiguration{ // TODO: Probably move to getDefaultedClusterConfig? @@ -260,18 +197,6 @@ func getDefaultInitConfigBytes() ([]byte, error) { return configutil.MarshalKubeadmConfigObject(internalcfg) } -func getDefaultInitConfigBytesByKind(kind string) ([]byte, error) { - b, err := getDefaultInitConfigBytes() - if err != nil { - return []byte{}, err - } - gvkmap, err := kubeadmutil.SplitYAMLDocuments(b) - if err != nil { - return []byte{}, err - } - return gvkmap[kubeadmapiv1beta1.SchemeGroupVersion.WithKind(kind)], nil -} - func getDefaultNodeConfigBytes() ([]byte, error) { internalcfg, err := configutil.JoinConfigFileAndDefaultsToInternalConfig("", &kubeadmapiv1beta1.JoinConfiguration{ Discovery: kubeadmapiv1beta1.Discovery{ @@ -289,20 +214,6 @@ func getDefaultNodeConfigBytes() ([]byte, error) { return configutil.MarshalKubeadmConfigObject(internalcfg) } -func getDefaultComponentConfigBytes(registration componentconfigs.Registration) ([]byte, error) { - defaultedInitConfig, err := getDefaultedInitConfig() - if err != nil { - return []byte{}, err - } - - realobj, ok := registration.GetFromInternalConfig(&defaultedInitConfig.ClusterConfiguration) - if !ok { - return []byte{}, errors.New("GetFromInternalConfig failed") - } - - return registration.Marshal(realobj) -} - // NewCmdConfigMigrate returns cobra.Command for "kubeadm config migrate" command func NewCmdConfigMigrate(out io.Writer) *cobra.Command { var oldCfgPath, newCfgPath string @@ -329,10 +240,7 @@ func NewCmdConfigMigrate(out io.Writer) *cobra.Command { kubeadmutil.CheckErr(errors.New("The --old-config flag is mandatory")) } - internalcfg, err := configutil.AnyConfigFileAndDefaultsToInternal(oldCfgPath) - kubeadmutil.CheckErr(err) - - outputBytes, err := configutil.MarshalKubeadmConfigObject(internalcfg) + outputBytes, err := configutil.MigrateOldConfigFromFile(oldCfgPath) kubeadmutil.CheckErr(err) if newCfgPath == "" { diff --git a/cmd/kubeadm/app/cmd/config_test.go b/cmd/kubeadm/app/cmd/config_test.go index 3c0cdf38aca..a9bffee2b71 100644 --- a/cmd/kubeadm/app/cmd/config_test.go +++ b/cmd/kubeadm/app/cmd/config_test.go @@ -43,7 +43,7 @@ const ( defaultNumberOfImages = 8 // dummyKubernetesVersion is just used for unit testing, in order to not make // kubeadm lookup dl.k8s.io to resolve what the latest stable release is - dummyKubernetesVersion = "v1.11.0" + dummyKubernetesVersion = "v1.12.0" ) func TestNewCmdConfigImagesList(t *testing.T) { @@ -69,12 +69,12 @@ func TestImagesListRunWithCustomConfigPath(t *testing.T) { name: "set k8s version", expectedImageCount: defaultNumberOfImages, expectedImageSubstrings: []string{ - ":v1.11.1", + ":v1.12.1", }, configContents: []byte(dedent.Dedent(` apiVersion: kubeadm.k8s.io/v1beta1 kind: ClusterConfiguration - kubernetesVersion: v1.11.1 + kubernetesVersion: v1.12.1 `)), }, { @@ -86,7 +86,7 @@ func TestImagesListRunWithCustomConfigPath(t *testing.T) { configContents: []byte(dedent.Dedent(` apiVersion: kubeadm.k8s.io/v1beta1 kind: ClusterConfiguration - kubernetesVersion: v1.11.0 + kubernetesVersion: v1.12.0 `)), }, } diff --git a/cmd/kubeadm/app/cmd/init.go b/cmd/kubeadm/app/cmd/init.go index 4e779f2f2b2..22939087d9a 100644 --- a/cmd/kubeadm/app/cmd/init.go +++ b/cmd/kubeadm/app/cmd/init.go @@ -305,6 +305,15 @@ func newInitData(cmd *cobra.Command, options *initOptions, out io.Writer) (initD if err != nil { return initData{}, err } + + // override node name and CRI socket from the command line options + if options.externalcfg.NodeRegistration.Name != "" { + cfg.NodeRegistration.Name = options.externalcfg.NodeRegistration.Name + } + if options.externalcfg.NodeRegistration.CRISocket != kubeadmapiv1beta1.DefaultCRISocket { + cfg.NodeRegistration.CRISocket = options.externalcfg.NodeRegistration.CRISocket + } + if err := configutil.VerifyAPIServerBindAddress(cfg.LocalAPIEndpoint.AdvertiseAddress); err != nil { return initData{}, err } diff --git a/cmd/kubeadm/app/cmd/join.go b/cmd/kubeadm/app/cmd/join.go index cce58a1f337..54548e66b36 100644 --- a/cmd/kubeadm/app/cmd/join.go +++ b/cmd/kubeadm/app/cmd/join.go @@ -307,7 +307,16 @@ func NewJoin(cfgPath string, defaultcfg *kubeadmapiv1beta1.JoinConfiguration, ig if err != nil { return nil, err } - if defaultcfg.ControlPlane != nil { + + // override node name and CRI socket from the command line options + if defaultcfg.NodeRegistration.Name != "" { + internalCfg.NodeRegistration.Name = defaultcfg.NodeRegistration.Name + } + if defaultcfg.NodeRegistration.CRISocket != kubeadmapiv1beta1.DefaultCRISocket { + internalCfg.NodeRegistration.CRISocket = defaultcfg.NodeRegistration.CRISocket + } + + if internalCfg.ControlPlane != nil { if err := configutil.VerifyAPIServerBindAddress(internalCfg.ControlPlane.LocalAPIEndpoint.AdvertiseAddress); err != nil { return nil, err } diff --git a/cmd/kubeadm/app/cmd/options/constant.go b/cmd/kubeadm/app/cmd/options/constant.go index acb51ecd554..f966750e05f 100644 --- a/cmd/kubeadm/app/cmd/options/constant.go +++ b/cmd/kubeadm/app/cmd/options/constant.go @@ -31,7 +31,7 @@ const APIServerExtraArgs = "apiserver-extra-args" // CertificatesDir flag sets the path where to save and read the certificates. const CertificatesDir = "cert-dir" -// CfgPath flag sets the path to kubeadm config file. WARNING: Usage of a configuration file is experimental. +// CfgPath flag sets the path to kubeadm config file. const CfgPath = "config" // ControllerManagerExtraArgs flag sets extra flags to pass to the Controller Manager or override default ones in form of =. diff --git a/cmd/kubeadm/app/cmd/options/generic.go b/cmd/kubeadm/app/cmd/options/generic.go index 3a08838b099..16f8f0c7aae 100644 --- a/cmd/kubeadm/app/cmd/options/generic.go +++ b/cmd/kubeadm/app/cmd/options/generic.go @@ -33,7 +33,7 @@ func AddKubeConfigDirFlag(fs *pflag.FlagSet, kubeConfigDir *string) { // AddConfigFlag adds the --config flag to the given flagset func AddConfigFlag(fs *pflag.FlagSet, cfgPath *string) { - fs.StringVar(cfgPath, CfgPath, *cfgPath, "Path to kubeadm config file (WARNING: Usage of a configuration file is experimental).") + fs.StringVar(cfgPath, CfgPath, *cfgPath, "Path to a kubeadm configuration file.") } // AddIgnorePreflightErrorsFlag adds the --ignore-preflight-errors flag to the given flagset diff --git a/cmd/kubeadm/app/cmd/phases/BUILD b/cmd/kubeadm/app/cmd/phases/BUILD index 932f6b732f7..ca17f2dc792 100644 --- a/cmd/kubeadm/app/cmd/phases/BUILD +++ b/cmd/kubeadm/app/cmd/phases/BUILD @@ -22,7 +22,6 @@ go_library( "//cmd/kubeadm/app/apis/kubeadm:go_default_library", "//cmd/kubeadm/app/apis/kubeadm/scheme:go_default_library", "//cmd/kubeadm/app/apis/kubeadm/v1beta1:go_default_library", - "//cmd/kubeadm/app/apis/kubeadm/validation:go_default_library", "//cmd/kubeadm/app/cmd/options:go_default_library", "//cmd/kubeadm/app/cmd/phases/workflow:go_default_library", "//cmd/kubeadm/app/cmd/util:go_default_library", @@ -42,8 +41,8 @@ go_library( "//cmd/kubeadm/app/preflight:go_default_library", "//cmd/kubeadm/app/util:go_default_library", "//cmd/kubeadm/app/util/apiclient:go_default_library", - "//cmd/kubeadm/app/util/config:go_default_library", "//cmd/kubeadm/app/util/dryrun:go_default_library", + "//cmd/kubeadm/app/util/pkiutil:go_default_library", "//pkg/util/normalizer:go_default_library", "//pkg/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -51,7 +50,6 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//vendor/github.com/pkg/errors:go_default_library", "//vendor/github.com/renstrom/dedent:go_default_library", - "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", @@ -70,9 +68,11 @@ go_test( "//cmd/kubeadm/app/apis/kubeadm/v1beta1:go_default_library", "//cmd/kubeadm/app/cmd/phases/workflow:go_default_library", "//cmd/kubeadm/app/phases/certs:go_default_library", + "//cmd/kubeadm/app/util/certs:go_default_library", "//cmd/kubeadm/app/util/pkiutil:go_default_library", "//cmd/kubeadm/test:go_default_library", "//pkg/version:go_default_library", + "//vendor/github.com/spf13/cobra:go_default_library", ], ) diff --git a/cmd/kubeadm/app/cmd/phases/bootstraptoken.go b/cmd/kubeadm/app/cmd/phases/bootstraptoken.go index b6492960f8b..4bbbb96bd73 100644 --- a/cmd/kubeadm/app/cmd/phases/bootstraptoken.go +++ b/cmd/kubeadm/app/cmd/phases/bootstraptoken.go @@ -18,7 +18,6 @@ package phases import ( "fmt" - "path/filepath" "github.com/pkg/errors" @@ -26,7 +25,6 @@ import ( kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/options" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/workflow" - kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" clusterinfophase "k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/clusterinfo" nodebootstraptokenphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/node" "k8s.io/kubernetes/pkg/util/normalizer" @@ -51,7 +49,7 @@ var ( type bootstrapTokenData interface { Cfg() *kubeadmapi.InitConfiguration Client() (clientset.Interface, error) - KubeConfigDir() string + KubeConfigPath() string SkipTokenPrint() bool Tokens() []string } @@ -66,7 +64,7 @@ func NewBootstrapTokenPhase() workflow.Phase { Long: bootstrapTokenLongDesc, InheritFlags: []string{ options.CfgPath, - options.KubeconfigDir, + options.KubeconfigPath, options.SkipTokenPrint, }, Run: runBoostrapToken, @@ -113,8 +111,7 @@ func runBoostrapToken(c workflow.RunData) error { } // Create the cluster-info ConfigMap with the associated RBAC rules - adminKubeConfigPath := filepath.Join(data.KubeConfigDir(), kubeadmconstants.AdminKubeConfigFileName) - if err := clusterinfophase.CreateBootstrapConfigMapIfNotExists(client, adminKubeConfigPath); err != nil { + if err := clusterinfophase.CreateBootstrapConfigMapIfNotExists(client, data.KubeConfigPath()); err != nil { return errors.Wrap(err, "error creating bootstrap ConfigMap") } if err := clusterinfophase.CreateClusterInfoRBACRules(client); err != nil { diff --git a/cmd/kubeadm/app/cmd/phases/certs.go b/cmd/kubeadm/app/cmd/phases/certs.go index 778cd6c3689..cf82a5be68b 100644 --- a/cmd/kubeadm/app/cmd/phases/certs.go +++ b/cmd/kubeadm/app/cmd/phases/certs.go @@ -31,6 +31,7 @@ import ( kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" certsphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs" kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" + "k8s.io/kubernetes/cmd/kubeadm/app/util/pkiutil" "k8s.io/kubernetes/pkg/util/normalizer" ) @@ -228,9 +229,13 @@ func runCAPhase(ca *certsphase.KubeadmCert) func(c workflow.RunData) error { return errors.New("certs phase invoked with an invalid data struct") } - // if external CA mode, skips certificate authority generation - if data.ExternalCA() { - fmt.Printf("[certs] External CA mode: Using existing %s certificate authority\n", ca.BaseName) + // TODO(EKF): can we avoid loading these certificates every time? + if _, err := pkiutil.TryLoadCertFromDisk(data.CertificateDir(), ca.BaseName); err == nil { + if _, err := pkiutil.TryLoadKeyFromDisk(data.CertificateDir(), ca.BaseName); err == nil { + fmt.Printf("[certs] Using existing %s certificate authority\n", ca.BaseName) + return nil + } + fmt.Printf("[certs] Using existing %s keyless certificate authority", ca.BaseName) return nil } @@ -257,9 +262,18 @@ func runCertPhase(cert *certsphase.KubeadmCert, caCert *certsphase.KubeadmCert) return errors.New("certs phase invoked with an invalid data struct") } - // if external CA mode, skip certificate generation - if data.ExternalCA() { - fmt.Printf("[certs] External CA mode: Using existing %s certificate\n", cert.BaseName) + // TODO(EKF): can we avoid loading these certificates every time? + if certData, _, err := pkiutil.TryLoadCertAndKeyFromDisk(data.CertificateDir(), cert.BaseName); err == nil { + caCertData, err := pkiutil.TryLoadCertFromDisk(data.CertificateDir(), caCert.BaseName) + if err != nil { + return errors.Wrapf(err, "couldn't load CA certificate %s", caCert.Name) + } + + if err := certData.CheckSignatureFrom(caCertData); err != nil { + return errors.Wrapf(err, "[certs] certificate %s not signed by CA certificate %s", cert.BaseName, caCert.BaseName) + } + + fmt.Printf("[certs] Using existing %s certificate and key on disk\n", cert.BaseName) return nil } diff --git a/cmd/kubeadm/app/cmd/phases/certs_test.go b/cmd/kubeadm/app/cmd/phases/certs_test.go index 06fc61ce63f..f72208c5f31 100644 --- a/cmd/kubeadm/app/cmd/phases/certs_test.go +++ b/cmd/kubeadm/app/cmd/phases/certs_test.go @@ -20,9 +20,11 @@ import ( "os" "testing" + "github.com/spf13/cobra" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/workflow" "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs" + certstestutil "k8s.io/kubernetes/cmd/kubeadm/app/util/certs" "k8s.io/kubernetes/cmd/kubeadm/app/util/pkiutil" testutil "k8s.io/kubernetes/cmd/kubeadm/test" ) @@ -51,6 +53,9 @@ func TestCertsWithCSRs(t *testing.T) { // global vars csrOnly = true csrDir = certDir + defer func() { + csrOnly = false + }() phase := NewCertsPhase() // find the api cert phase @@ -75,3 +80,28 @@ func TestCertsWithCSRs(t *testing.T) { t.Fatalf("couldn't load certificate %q: %v", cert.BaseName, err) } } + +func TestCreateSparseCerts(t *testing.T) { + for _, test := range certstestutil.GetSparseCertTestCases(t) { + t.Run(test.Name, func(t *testing.T) { + tmpdir := testutil.SetupTempDir(t) + defer os.RemoveAll(tmpdir) + + certstestutil.WritePKIFiles(t, tmpdir, test.Files) + + r := workflow.NewRunner() + r.AppendPhase(NewCertsPhase()) + r.SetDataInitializer(func(*cobra.Command) (workflow.RunData, error) { + certsData := &testCertsData{ + cfg: testutil.GetDefaultInternalConfig(t), + } + certsData.cfg.CertificatesDir = tmpdir + return certsData, nil + }) + + if err := r.Run(); (err != nil) != test.ExpectError { + t.Fatalf("expected error to be %t, got %t (%v)", test.ExpectError, (err != nil), err) + } + }) + } +} diff --git a/cmd/kubeadm/app/cmd/phases/markcontrolplane.go b/cmd/kubeadm/app/cmd/phases/markcontrolplane.go index eac8a1ed4f9..37dd80564d3 100644 --- a/cmd/kubeadm/app/cmd/phases/markcontrolplane.go +++ b/cmd/kubeadm/app/cmd/phases/markcontrolplane.go @@ -50,6 +50,7 @@ func NewMarkControlPlanePhase() workflow.Phase { Example: markControlPlaneExample, InheritFlags: []string{ options.NodeName, + options.CfgPath, }, Run: runMarkControlPlane, } diff --git a/cmd/kubeadm/app/cmd/phases/uploadconfig.go b/cmd/kubeadm/app/cmd/phases/uploadconfig.go index c11844ad81a..0db494d66e7 100644 --- a/cmd/kubeadm/app/cmd/phases/uploadconfig.go +++ b/cmd/kubeadm/app/cmd/phases/uploadconfig.go @@ -75,6 +75,7 @@ func NewUploadConfigPhase() workflow.Phase { Name: "all", Short: "Uploads all configuration to a config map", RunAllSiblings: true, + InheritFlags: getUploadConfigPhaseFlags(), }, { Name: "kubeadm", diff --git a/cmd/kubeadm/app/cmd/phases/util.go b/cmd/kubeadm/app/cmd/phases/util.go index 4a4d280cefb..c0a2c2f6492 100644 --- a/cmd/kubeadm/app/cmd/phases/util.go +++ b/cmd/kubeadm/app/cmd/phases/util.go @@ -17,50 +17,10 @@ limitations under the License. package phases import ( - "github.com/spf13/cobra" - - kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmapiv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1" - "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation" - kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" - configutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config" "k8s.io/kubernetes/pkg/version" ) -// runCmdPhase creates a cobra.Command Run function, by composing the call to the given cmdFunc with necessary additional steps (e.g preparation of input parameters) -func runCmdPhase(cmdFunc func(outDir string, cfg *kubeadmapi.InitConfiguration) error, outDir, cfgPath *string, cfg *kubeadmapiv1beta1.InitConfiguration, defaultKubernetesVersion string) func(cmd *cobra.Command, args []string) { - - // the following statement build a closure that wraps a call to a cmdFunc, binding - // the function itself with the specific parameters of each sub command. - // Please note that specific parameter should be passed as value, while other parameters - passed as reference - - // are shared between sub commands and gets access to current value e.g. flags value. - - return func(cmd *cobra.Command, args []string) { - if err := validation.ValidateMixedArguments(cmd.Flags()); err != nil { - kubeadmutil.CheckErr(err) - } - - // This is used for unit testing only... - // If we wouldn't set this to something, the code would dynamically look up the version from the internet - // By setting this explicitly for tests workarounds that - if defaultKubernetesVersion != "" { - cfg.KubernetesVersion = defaultKubernetesVersion - } else { - // KubernetesVersion is not used, but we set it explicitly to avoid the lookup - // of the version from the internet when executing ConfigFileAndDefaultsToInternalConfig - SetKubernetesVersion(cfg) - } - - // This call returns the ready-to-use configuration based on the configuration file that might or might not exist and the default cfg populated by flags - internalcfg, err := configutil.ConfigFileAndDefaultsToInternalConfig(*cfgPath, cfg) - kubeadmutil.CheckErr(err) - - // Execute the cmdFunc - err = cmdFunc(*outDir, internalcfg) - kubeadmutil.CheckErr(err) - } -} - // SetKubernetesVersion gets the current Kubeadm version and sets it as KubeadmVersion in the config, // unless it's already set to a value different from the default. func SetKubernetesVersion(cfg *kubeadmapiv1beta1.InitConfiguration) { diff --git a/cmd/kubeadm/app/cmd/reset.go b/cmd/kubeadm/app/cmd/reset.go index 26599c8c385..f181011eadf 100644 --- a/cmd/kubeadm/app/cmd/reset.go +++ b/cmd/kubeadm/app/cmd/reset.go @@ -156,7 +156,7 @@ func (r *Reset) Run(out io.Writer, client clientset.Interface) error { // Try to unmount mounted directories under kubeadmconstants.KubeletRunDirectory in order to be able to remove the kubeadmconstants.KubeletRunDirectory directory later fmt.Printf("[reset] unmounting mounted directories in %q\n", kubeadmconstants.KubeletRunDirectory) - umountDirsCmd := fmt.Sprintf("awk '$2 ~ path {print $2}' path=%s /proc/mounts | xargs -r umount", kubeadmconstants.KubeletRunDirectory) + umountDirsCmd := fmt.Sprintf("awk '$2 ~ path {print $2}' path=%s/ /proc/mounts | xargs -r umount", kubeadmconstants.KubeletRunDirectory) klog.V(1).Infof("[reset] executing command %q", umountDirsCmd) umountOutputBytes, err := exec.Command("sh", "-c", umountDirsCmd).Output() @@ -207,7 +207,7 @@ func getEtcdDataDir(manifestPath string, client clientset.Interface) (string, er if client != nil { cfg, err := configutil.FetchConfigFromFileOrCluster(client, os.Stdout, "reset", "", false) - if err == nil { + if err == nil && cfg.Etcd.Local != nil { return cfg.Etcd.Local.DataDir, nil } klog.Warningf("[reset] Unable to fetch the kubeadm-config ConfigMap, using etcd pod spec as fallback: %v", err) diff --git a/cmd/kubeadm/app/cmd/token_test.go b/cmd/kubeadm/app/cmd/token_test.go index b5ca50b5652..8de0595a6c9 100644 --- a/cmd/kubeadm/app/cmd/token_test.go +++ b/cmd/kubeadm/app/cmd/token_test.go @@ -175,7 +175,7 @@ func TestRunCreateToken(t *testing.T) { ClusterConfiguration: kubeadmapiv1beta1.ClusterConfiguration{ // KubernetesVersion is not used, but we set this explicitly to avoid // the lookup of the version from the internet when executing ConfigFileAndDefaultsToInternalConfig - KubernetesVersion: "v1.11.0", + KubernetesVersion: "v1.12.0", }, BootstrapTokens: []kubeadmapiv1beta1.BootstrapToken{ { diff --git a/cmd/kubeadm/app/cmd/upgrade/node.go b/cmd/kubeadm/app/cmd/upgrade/node.go index 9e3c99a097c..a915d2c9134 100644 --- a/cmd/kubeadm/app/cmd/upgrade/node.go +++ b/cmd/kubeadm/app/cmd/upgrade/node.go @@ -48,11 +48,11 @@ var ( upgradeNodeConfigExample = normalizer.Examples(` # Downloads the kubelet configuration from the ConfigMap in the cluster. Uses a specific desired kubelet version. - kubeadm upgrade node config --kubelet-version v1.12.0 + kubeadm upgrade node config --kubelet-version v1.13.0 # Simulates the downloading of the kubelet configuration from the ConfigMap in the cluster with a specific desired # version. Does not change any state locally on the node. - kubeadm upgrade node config --kubelet-version v1.12.0 --dry-run + kubeadm upgrade node config --kubelet-version v1.13.0 --dry-run `) ) diff --git a/cmd/kubeadm/app/constants/constants.go b/cmd/kubeadm/app/constants/constants.go index 89f07d26df7..c08c7fb3e38 100644 --- a/cmd/kubeadm/app/constants/constants.go +++ b/cmd/kubeadm/app/constants/constants.go @@ -26,7 +26,6 @@ import ( "time" "github.com/pkg/errors" - "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/version" bootstrapapi "k8s.io/cluster-bootstrap/token/api" @@ -251,7 +250,7 @@ const ( MinExternalEtcdVersion = "3.2.18" // DefaultEtcdVersion indicates the default etcd version that kubeadm uses - DefaultEtcdVersion = "3.2.24" + DefaultEtcdVersion = "3.3.10" // PauseVersion indicates the default pause image version for kubeadm PauseVersion = "3.1" @@ -378,16 +377,18 @@ var ( MasterComponents = []string{KubeAPIServer, KubeControllerManager, KubeScheduler} // MinimumControlPlaneVersion specifies the minimum control plane version kubeadm can deploy - MinimumControlPlaneVersion = version.MustParseSemantic("v1.11.0") + MinimumControlPlaneVersion = version.MustParseSemantic("v1.12.0") // MinimumKubeletVersion specifies the minimum version of kubelet which kubeadm supports - MinimumKubeletVersion = version.MustParseSemantic("v1.11.0") + MinimumKubeletVersion = version.MustParseSemantic("v1.12.0") // SupportedEtcdVersion lists officially supported etcd versions with corresponding Kubernetes releases SupportedEtcdVersion = map[uint8]string{ 10: "3.1.12", 11: "3.2.18", 12: "3.2.24", + 13: "3.2.24", + 14: "3.3.10", } ) diff --git a/cmd/kubeadm/app/constants/constants_test.go b/cmd/kubeadm/app/constants/constants_test.go index bbcab17240c..daffc118318 100644 --- a/cmd/kubeadm/app/constants/constants_test.go +++ b/cmd/kubeadm/app/constants/constants_test.go @@ -155,11 +155,6 @@ func TestEtcdSupportedVersion(t *testing.T) { expectedVersion: nil, expectedError: errors.New("Unsupported or unknown Kubernetes version(1.99.0)"), }, - { - kubernetesVersion: "1.10.0", - expectedVersion: version.MustParseSemantic("3.1.12"), - expectedError: nil, - }, { kubernetesVersion: "1.10.2", expectedVersion: version.MustParseSemantic("3.1.12"), @@ -175,6 +170,16 @@ func TestEtcdSupportedVersion(t *testing.T) { expectedVersion: version.MustParseSemantic("3.2.24"), expectedError: nil, }, + { + kubernetesVersion: "1.13.1", + expectedVersion: version.MustParseSemantic("3.2.24"), + expectedError: nil, + }, + { + kubernetesVersion: "1.14.0", + expectedVersion: version.MustParseSemantic("3.3.10"), + expectedError: nil, + }, } for _, rt := range tests { actualVersion, actualError := EtcdSupportedVersion(rt.kubernetesVersion) diff --git a/cmd/kubeadm/app/features/features.go b/cmd/kubeadm/app/features/features.go index 5f0eb350dd7..46139b1812c 100644 --- a/cmd/kubeadm/app/features/features.go +++ b/cmd/kubeadm/app/features/features.go @@ -31,9 +31,6 @@ const ( // CoreDNS is GA in v1.11 CoreDNS = "CoreDNS" - - // DynamicKubeletConfig is beta in v1.11 - DynamicKubeletConfig = "DynamicKubeletConfig" ) var coreDNSMessage = "featureGates:CoreDNS has been removed in v1.13\n" + diff --git a/cmd/kubeadm/app/features/features_test.go b/cmd/kubeadm/app/features/features_test.go index c8af0e6d05f..e0679eabc93 100644 --- a/cmd/kubeadm/app/features/features_test.go +++ b/cmd/kubeadm/app/features/features_test.go @@ -24,7 +24,7 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" ) -var TestMinVersion = version.MustParseSemantic("v1.11.0-alpha.1") +var TestMinVersion = version.MustParseSemantic("v1.12.0-alpha.1") func TestKnownFeatures(t *testing.T) { var someFeatures = FeatureList{ @@ -144,12 +144,12 @@ func TestValidateVersion(t *testing.T) { }, { //min version but correct value given requestedFeatures: map[string]bool{"feature2": true}, - requestedVersion: "v1.11.0", + requestedVersion: "v1.12.0", expectedError: false, }, { //min version and incorrect value given requestedFeatures: map[string]bool{"feature2": true}, - requestedVersion: "v1.10.2", + requestedVersion: "v1.11.2", expectedError: true, }, } diff --git a/cmd/kubeadm/app/phases/addons/proxy/manifests.go b/cmd/kubeadm/app/phases/addons/proxy/manifests.go index 93a3bf1dc6f..c47e6a7eaae 100644 --- a/cmd/kubeadm/app/phases/addons/proxy/manifests.go +++ b/cmd/kubeadm/app/phases/addons/proxy/manifests.go @@ -80,6 +80,7 @@ spec: command: - /usr/local/bin/kube-proxy - --config=/var/lib/kube-proxy/{{ .ProxyConfigMapKey }} + - --hostname-override=$(NODE_NAME) securityContext: privileged: true volumeMounts: @@ -91,6 +92,11 @@ spec: - mountPath: /lib/modules name: lib-modules readOnly: true + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName hostNetwork: true serviceAccountName: kube-proxy volumes: diff --git a/cmd/kubeadm/app/phases/addons/proxy/proxy_test.go b/cmd/kubeadm/app/phases/addons/proxy/proxy_test.go index 9bfe210d1a5..2f3dc32d16c 100644 --- a/cmd/kubeadm/app/phases/addons/proxy/proxy_test.go +++ b/cmd/kubeadm/app/phases/addons/proxy/proxy_test.go @@ -183,7 +183,7 @@ func TestEnsureProxyAddon(t *testing.T) { PodSubnet: "5.6.7.8/24", }, ImageRepository: "someRepo", - KubernetesVersion: "v1.11.0", + KubernetesVersion: "v1.12.0", }, } diff --git a/cmd/kubeadm/app/phases/certs/BUILD b/cmd/kubeadm/app/phases/certs/BUILD index a138ced2e8a..c8a637aa156 100644 --- a/cmd/kubeadm/app/phases/certs/BUILD +++ b/cmd/kubeadm/app/phases/certs/BUILD @@ -16,9 +16,9 @@ go_test( deps = [ "//cmd/kubeadm/app/apis/kubeadm:go_default_library", "//cmd/kubeadm/app/constants:go_default_library", + "//cmd/kubeadm/app/util/certs:go_default_library", "//cmd/kubeadm/app/util/pkiutil:go_default_library", "//cmd/kubeadm/test:go_default_library", - "//cmd/kubeadm/test/certs:go_default_library", "//staging/src/k8s.io/client-go/util/cert:go_default_library", "//vendor/github.com/pkg/errors:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", diff --git a/cmd/kubeadm/app/phases/certs/certs_test.go b/cmd/kubeadm/app/phases/certs/certs_test.go index cd5561b0efe..f83437de1f4 100644 --- a/cmd/kubeadm/app/phases/certs/certs_test.go +++ b/cmd/kubeadm/app/phases/certs/certs_test.go @@ -32,32 +32,11 @@ import ( certutil "k8s.io/client-go/util/cert" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" + certstestutil "k8s.io/kubernetes/cmd/kubeadm/app/util/certs" "k8s.io/kubernetes/cmd/kubeadm/app/util/pkiutil" testutil "k8s.io/kubernetes/cmd/kubeadm/test" - certstestutil "k8s.io/kubernetes/cmd/kubeadm/test/certs" ) -func createCACert(t *testing.T) (*x509.Certificate, *rsa.PrivateKey) { - certCfg := &certutil.Config{CommonName: "kubernetes"} - cert, key, err := NewCACertAndKey(certCfg) - if err != nil { - t.Fatalf("couldn't create CA: %v", err) - } - return cert, key -} - -func createTestCert(t *testing.T, caCert *x509.Certificate, caKey *rsa.PrivateKey) (*x509.Certificate, *rsa.PrivateKey) { - cert, key, err := pkiutil.NewCertAndKey(caCert, caKey, - &certutil.Config{ - CommonName: "testCert", - Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, - }) - if err != nil { - t.Fatalf("couldn't create test cert: %v", err) - } - return cert, key -} - func createTestCSR(t *testing.T) (*x509.CertificateRequest, *rsa.PrivateKey) { csr, key, err := pkiutil.NewCSRAndKey( &certutil.Config{ @@ -71,8 +50,8 @@ func createTestCSR(t *testing.T) (*x509.CertificateRequest, *rsa.PrivateKey) { } func TestWriteCertificateAuthorithyFilesIfNotExist(t *testing.T) { - setupCert, setupKey := createCACert(t) - caCert, caKey := createCACert(t) + setupCert, setupKey := certstestutil.CreateCACert(t) + caCert, caKey := certstestutil.CreateCACert(t) var tests = []struct { setupFunc func(pkiDir string) error @@ -97,7 +76,7 @@ func TestWriteCertificateAuthorithyFilesIfNotExist(t *testing.T) { }, { // cert exists, but it is not a ca > err setupFunc: func(pkiDir string) error { - cert, key := createTestCert(t, setupCert, setupKey) + cert, key := certstestutil.CreateTestCert(t, setupCert, setupKey) return writeCertificateFilesIfNotExist(pkiDir, "dummy", setupCert, cert, key) }, expectedError: true, @@ -147,9 +126,9 @@ func TestWriteCertificateAuthorithyFilesIfNotExist(t *testing.T) { func TestWriteCertificateFilesIfNotExist(t *testing.T) { - caCert, caKey := createCACert(t) - setupCert, setupKey := createTestCert(t, caCert, caKey) - cert, key := createTestCert(t, caCert, caKey) + caCert, caKey := certstestutil.CreateCACert(t) + setupCert, setupKey := certstestutil.CreateTestCert(t, caCert, caKey) + cert, key := certstestutil.CreateTestCert(t, caCert, caKey) var tests = []struct { setupFunc func(pkiDir string) error @@ -174,8 +153,8 @@ func TestWriteCertificateFilesIfNotExist(t *testing.T) { }, { // cert exists, is signed by another ca > err setupFunc: func(pkiDir string) error { - anotherCaCert, anotherCaKey := createCACert(t) - anotherCert, anotherKey := createTestCert(t, anotherCaCert, anotherCaKey) + anotherCaCert, anotherCaKey := certstestutil.CreateCACert(t) + anotherCert, anotherKey := certstestutil.CreateTestCert(t, anotherCaCert, anotherCaKey) return writeCertificateFilesIfNotExist(pkiDir, "dummy", anotherCaCert, anotherCert, anotherKey) }, @@ -375,18 +354,18 @@ func TestNewCACertAndKey(t *testing.T) { } func TestSharedCertificateExists(t *testing.T) { - caCert, caKey := createCACert(t) - _, key := createTestCert(t, caCert, caKey) + caCert, caKey := certstestutil.CreateCACert(t) + _, key := certstestutil.CreateTestCert(t, caCert, caKey) publicKey := &key.PublicKey var tests = []struct { name string - files pkiFiles + files certstestutil.PKIFiles expectedError bool }{ { name: "success", - files: pkiFiles{ + files: certstestutil.PKIFiles{ "ca.crt": caCert, "ca.key": caKey, "front-proxy-ca.crt": caCert, @@ -399,7 +378,7 @@ func TestSharedCertificateExists(t *testing.T) { }, { name: "missing ca.crt", - files: pkiFiles{ + files: certstestutil.PKIFiles{ "ca.key": caKey, "front-proxy-ca.crt": caCert, "front-proxy-ca.key": caKey, @@ -412,7 +391,7 @@ func TestSharedCertificateExists(t *testing.T) { }, { name: "missing sa.key", - files: pkiFiles{ + files: certstestutil.PKIFiles{ "ca.crt": caCert, "ca.key": caKey, "front-proxy-ca.crt": caCert, @@ -425,7 +404,7 @@ func TestSharedCertificateExists(t *testing.T) { }, { name: "missing front-proxy.crt", - files: pkiFiles{ + files: certstestutil.PKIFiles{ "ca.crt": caCert, "ca.key": caKey, "front-proxy-ca.key": caKey, @@ -438,7 +417,7 @@ func TestSharedCertificateExists(t *testing.T) { }, { name: "missing etcd/ca.crt", - files: pkiFiles{ + files: certstestutil.PKIFiles{ "ca.crt": caCert, "ca.key": caKey, "front-proxy-ca.key": caKey, @@ -464,7 +443,7 @@ func TestSharedCertificateExists(t *testing.T) { } // created expected keys - writePKIFiles(t, tmpdir, test.files) + certstestutil.WritePKIFiles(t, tmpdir, test.files) // executes create func ret, err := SharedCertificateExists(cfg) @@ -482,80 +461,24 @@ func TestSharedCertificateExists(t *testing.T) { } func TestCreatePKIAssetsWithSparseCerts(t *testing.T) { - caCert, caKey := createCACert(t) - fpCACert, fpCAKey := createCACert(t) - etcdCACert, etcdCAKey := createCACert(t) - - fpCert, fpKey := createTestCert(t, fpCACert, fpCAKey) - - tests := []struct { - name string - files pkiFiles - expectError bool - }{ - { - name: "nothing present", - }, - { - name: "CAs already exist", - files: pkiFiles{ - "ca.crt": caCert, - "ca.key": caKey, - "front-proxy-ca.crt": fpCACert, - "front-proxy-ca.key": fpCAKey, - "etcd/ca.crt": etcdCACert, - "etcd/ca.key": etcdCAKey, - }, - }, - { - name: "CA certs only", - files: pkiFiles{ - "ca.crt": caCert, - "front-proxy-ca.crt": fpCACert, - "etcd/ca.crt": etcdCACert, - }, - expectError: true, - }, - { - name: "FrontProxyCA with certs", - files: pkiFiles{ - "ca.crt": caCert, - "ca.key": caKey, - "front-proxy-ca.crt": fpCACert, - "front-proxy-client.crt": fpCert, - "front-proxy-client.key": fpKey, - "etcd/ca.crt": etcdCACert, - "etcd/ca.key": etcdCAKey, - }, - }, - { - name: "FrontProxy certs missing CA", - files: pkiFiles{ - "front-proxy-client.crt": fpCert, - "front-proxy-client.key": fpKey, - }, - expectError: true, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { + for _, test := range certstestutil.GetSparseCertTestCases(t) { + t.Run(test.Name, func(t *testing.T) { tmpdir := testutil.SetupTempDir(t) defer os.RemoveAll(tmpdir) cfg := testutil.GetDefaultInternalConfig(t) cfg.ClusterConfiguration.CertificatesDir = tmpdir - writePKIFiles(t, tmpdir, test.files) + certstestutil.WritePKIFiles(t, tmpdir, test.Files) err := CreatePKIAssets(cfg) if err != nil { - if test.expectError { + if test.ExpectError { return } t.Fatalf("Unexpected error: %v", err) } - if test.expectError { + if test.ExpectError { t.Fatal("Expected error from CreatePKIAssets, got none") } assertCertsExist(t, tmpdir) @@ -612,19 +535,19 @@ func TestUsingExternalCA(t *testing.T) { func TestValidateMethods(t *testing.T) { - caCert, caKey := createCACert(t) - cert, key := createTestCert(t, caCert, caKey) + caCert, caKey := certstestutil.CreateCACert(t) + cert, key := certstestutil.CreateTestCert(t, caCert, caKey) tests := []struct { name string - files pkiFiles + files certstestutil.PKIFiles validateFunc func(l certKeyLocation) error loc certKeyLocation expectedSuccess bool }{ { name: "validateCACert", - files: pkiFiles{ + files: certstestutil.PKIFiles{ "ca.crt": caCert, }, validateFunc: validateCACert, @@ -633,7 +556,7 @@ func TestValidateMethods(t *testing.T) { }, { name: "validateCACertAndKey (files present)", - files: pkiFiles{ + files: certstestutil.PKIFiles{ "ca.crt": caCert, "ca.key": caKey, }, @@ -642,7 +565,7 @@ func TestValidateMethods(t *testing.T) { expectedSuccess: true, }, { - files: pkiFiles{ + files: certstestutil.PKIFiles{ "ca.crt": caCert, }, name: "validateCACertAndKey (key missing)", @@ -652,7 +575,7 @@ func TestValidateMethods(t *testing.T) { }, { name: "validateSignedCert", - files: pkiFiles{ + files: certstestutil.PKIFiles{ "ca.crt": caCert, "ca.key": caKey, "apiserver.crt": cert, @@ -664,7 +587,7 @@ func TestValidateMethods(t *testing.T) { }, { name: "validatePrivatePublicKey", - files: pkiFiles{ + files: certstestutil.PKIFiles{ "sa.pub": &key.PublicKey, "sa.key": key, }, @@ -679,7 +602,7 @@ func TestValidateMethods(t *testing.T) { defer os.RemoveAll(dir) test.loc.pkiDir = dir - writePKIFiles(t, dir, test.files) + certstestutil.WritePKIFiles(t, dir, test.files) err := test.validateFunc(test.loc) if test.expectedSuccess && err != nil { @@ -722,31 +645,6 @@ func TestNewCSR(t *testing.T) { } } -type pkiFiles map[string]interface{} - -func writePKIFiles(t *testing.T, dir string, files pkiFiles) { - for filename, body := range files { - switch body := body.(type) { - case *x509.Certificate: - if err := certutil.WriteCert(path.Join(dir, filename), certutil.EncodeCertPEM(body)); err != nil { - t.Errorf("unable to write certificate to file %q: [%v]", dir, err) - } - case *rsa.PublicKey: - publicKeyBytes, err := certutil.EncodePublicKeyPEM(body) - if err != nil { - t.Errorf("unable to write public key to file %q: [%v]", filename, err) - } - if err := certutil.WriteKey(path.Join(dir, filename), publicKeyBytes); err != nil { - t.Errorf("unable to write public key to file %q: [%v]", filename, err) - } - case *rsa.PrivateKey: - if err := certutil.WriteKey(path.Join(dir, filename), certutil.EncodePrivateKeyPEM(body)); err != nil { - t.Errorf("unable to write private key to file %q: [%v]", filename, err) - } - } - } -} - func TestCreateCertificateFilesMethods(t *testing.T) { var tests = []struct { diff --git a/cmd/kubeadm/app/phases/certs/renewal/BUILD b/cmd/kubeadm/app/phases/certs/renewal/BUILD index 6172ca48370..98da00fd170 100644 --- a/cmd/kubeadm/app/phases/certs/renewal/BUILD +++ b/cmd/kubeadm/app/phases/certs/renewal/BUILD @@ -31,9 +31,9 @@ go_test( embed = [":go_default_library"], deps = [ "//cmd/kubeadm/app/phases/certs:go_default_library", + "//cmd/kubeadm/app/util/certs:go_default_library", "//cmd/kubeadm/app/util/pkiutil:go_default_library", "//cmd/kubeadm/test:go_default_library", - "//cmd/kubeadm/test/certs:go_default_library", "//staging/src/k8s.io/api/certificates/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/cmd/kubeadm/app/phases/certs/renewal/renewal_test.go b/cmd/kubeadm/app/phases/certs/renewal/renewal_test.go index 6b24c6f0606..199127862ce 100644 --- a/cmd/kubeadm/app/phases/certs/renewal/renewal_test.go +++ b/cmd/kubeadm/app/phases/certs/renewal/renewal_test.go @@ -33,9 +33,9 @@ import ( k8stesting "k8s.io/client-go/testing" certutil "k8s.io/client-go/util/cert" "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs" + certtestutil "k8s.io/kubernetes/cmd/kubeadm/app/util/certs" "k8s.io/kubernetes/cmd/kubeadm/app/util/pkiutil" testutil "k8s.io/kubernetes/cmd/kubeadm/test" - certtestutil "k8s.io/kubernetes/cmd/kubeadm/test/certs" ) func TestRenewImplementations(t *testing.T) { diff --git a/cmd/kubeadm/app/phases/controlplane/manifests_test.go b/cmd/kubeadm/app/phases/controlplane/manifests_test.go index 87f2847d252..fa45da80194 100644 --- a/cmd/kubeadm/app/phases/controlplane/manifests_test.go +++ b/cmd/kubeadm/app/phases/controlplane/manifests_test.go @@ -597,7 +597,7 @@ func TestGetControllerManagerCommand(t *testing.T) { }, }, { - name: "custom cloudprovider for v1.12.0-beta.2", + name: "custom cluster-cidr for v1.12.0-beta.2", cfg: &kubeadmapi.ClusterConfiguration{ Networking: kubeadmapi.Networking{PodSubnet: "10.0.1.15/16"}, CertificatesDir: testCertsDir, @@ -700,7 +700,7 @@ func TestGetControllerManagerCommand(t *testing.T) { }, }, { - name: "custom cloudprovider for v1.11.3", + name: "custom cluster-cidr for v1.11.3", cfg: &kubeadmapi.ClusterConfiguration{ Networking: kubeadmapi.Networking{PodSubnet: "10.0.1.15/16"}, CertificatesDir: testCertsDir, diff --git a/cmd/kubeadm/app/phases/etcd/local.go b/cmd/kubeadm/app/phases/etcd/local.go index afe36b53702..c1b7eb565ad 100644 --- a/cmd/kubeadm/app/phases/etcd/local.go +++ b/cmd/kubeadm/app/phases/etcd/local.go @@ -18,6 +18,7 @@ package etcd import ( "fmt" + "os" "path/filepath" "strings" @@ -48,6 +49,12 @@ func CreateLocalEtcdStaticPodManifestFile(manifestDir string, cfg *kubeadmapi.In } // gets etcd StaticPodSpec emptyInitialCluster := []etcdutil.Member{} + + // creates target folder if not already exists + if err := os.MkdirAll(cfg.Etcd.Local.DataDir, 0700); err != nil { + return errors.Wrapf(err, "failed to create etcd directory %q", cfg.Etcd.Local.DataDir) + } + spec := GetEtcdPodSpec(cfg, emptyInitialCluster) // writes etcd StaticPod to disk if err := staticpodutil.WriteStaticPodToDisk(kubeadmconstants.Etcd, manifestDir, spec); err != nil { @@ -90,7 +97,7 @@ func CreateStackedEtcdStaticPodManifestFile(client clientset.Interface, manifest } // notifies the other members of the etcd cluster about the joining member - etcdPeerAddress := fmt.Sprintf("https://%s:%d", cfg.LocalAPIEndpoint.AdvertiseAddress, kubeadmconstants.EtcdListenPeerPort) + etcdPeerAddress := etcdutil.GetPeerURL(cfg) klog.V(1).Infof("Adding etcd member: %s", etcdPeerAddress) initialCluster, err := etcdClient.AddMember(cfg.NodeRegistration.Name, etcdPeerAddress) @@ -100,6 +107,11 @@ func CreateStackedEtcdStaticPodManifestFile(client clientset.Interface, manifest fmt.Println("[etcd] Announced new etcd member joining to the existing etcd cluster") klog.V(1).Infof("Updated etcd member list: %v", initialCluster) + // creates target folder if not already exists + if err := os.MkdirAll(cfg.Etcd.Local.DataDir, 0700); err != nil { + return errors.Wrapf(err, "failed to create etcd directory %q", cfg.Etcd.Local.DataDir) + } + klog.V(1).Info("Creating local etcd static pod manifest file") // gets etcd StaticPodSpec, actualized for the current InitConfiguration and the new list of etcd members spec := GetEtcdPodSpec(cfg, initialCluster) @@ -141,10 +153,10 @@ func GetEtcdPodSpec(cfg *kubeadmapi.InitConfiguration, initialCluster []etcdutil func getEtcdCommand(cfg *kubeadmapi.InitConfiguration, initialCluster []etcdutil.Member) []string { defaultArguments := map[string]string{ "name": cfg.GetNodeName(), - "listen-client-urls": fmt.Sprintf("https://127.0.0.1:%d,https://%s:%d", kubeadmconstants.EtcdListenClientPort, cfg.LocalAPIEndpoint.AdvertiseAddress, kubeadmconstants.EtcdListenClientPort), - "advertise-client-urls": fmt.Sprintf("https://%s:%d", cfg.LocalAPIEndpoint.AdvertiseAddress, kubeadmconstants.EtcdListenClientPort), - "listen-peer-urls": fmt.Sprintf("https://%s:%d", cfg.LocalAPIEndpoint.AdvertiseAddress, kubeadmconstants.EtcdListenPeerPort), - "initial-advertise-peer-urls": fmt.Sprintf("https://%s:%d", cfg.LocalAPIEndpoint.AdvertiseAddress, kubeadmconstants.EtcdListenPeerPort), + "listen-client-urls": fmt.Sprintf("%s,%s", etcdutil.GetClientURLByIP("127.0.0.1"), etcdutil.GetClientURL(cfg)), + "advertise-client-urls": etcdutil.GetClientURL(cfg), + "listen-peer-urls": etcdutil.GetPeerURL(cfg), + "initial-advertise-peer-urls": etcdutil.GetPeerURL(cfg), "data-dir": cfg.Etcd.Local.DataDir, "cert-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.EtcdServerCertName), "key-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.EtcdServerKeyName), @@ -158,7 +170,7 @@ func getEtcdCommand(cfg *kubeadmapi.InitConfiguration, initialCluster []etcdutil } if len(initialCluster) == 0 { - defaultArguments["initial-cluster"] = fmt.Sprintf("%s=https://%s:%d", cfg.GetNodeName(), cfg.LocalAPIEndpoint.AdvertiseAddress, kubeadmconstants.EtcdListenPeerPort) + defaultArguments["initial-cluster"] = fmt.Sprintf("%s=%s", cfg.GetNodeName(), etcdutil.GetPeerURL(cfg)) } else { // NB. the joining etcd instance should be part of the initialCluster list endpoints := []string{} diff --git a/cmd/kubeadm/app/phases/etcd/local_test.go b/cmd/kubeadm/app/phases/etcd/local_test.go index e08c4d583ab..8c2aa4ae30d 100644 --- a/cmd/kubeadm/app/phases/etcd/local_test.go +++ b/cmd/kubeadm/app/phases/etcd/local_test.go @@ -67,7 +67,7 @@ func TestCreateLocalEtcdStaticPodManifestFile(t *testing.T) { KubernetesVersion: "v1.7.0", Etcd: kubeadmapi.Etcd{ Local: &kubeadmapi.LocalEtcd{ - DataDir: "/var/lib/etcd", + DataDir: tmpdir + "/etcd", }, }, }, @@ -114,28 +114,17 @@ func TestCreateLocalEtcdStaticPodManifestFile(t *testing.T) { func TestGetEtcdCommand(t *testing.T) { var tests = []struct { - name string - cfg *kubeadmapi.InitConfiguration - initialCluster []etcdutil.Member - expected []string + name string + advertiseAddress string + nodeName string + extraArgs map[string]string + initialCluster []etcdutil.Member + expected []string }{ { - name: "Default args - with empty etcd initial cluster", - cfg: &kubeadmapi.InitConfiguration{ - LocalAPIEndpoint: kubeadmapi.APIEndpoint{ - AdvertiseAddress: "1.2.3.4", - }, - NodeRegistration: kubeadmapi.NodeRegistrationOptions{ - Name: "foo", - }, - ClusterConfiguration: kubeadmapi.ClusterConfiguration{ - Etcd: kubeadmapi.Etcd{ - Local: &kubeadmapi.LocalEtcd{ - DataDir: "/var/lib/etcd", - }, - }, - }, - }, + name: "Default args - with empty etcd initial cluster", + advertiseAddress: "1.2.3.4", + nodeName: "foo", expected: []string{ "etcd", "--name=foo", @@ -157,22 +146,9 @@ func TestGetEtcdCommand(t *testing.T) { }, }, { - name: "Default args - With an existing etcd cluster", - cfg: &kubeadmapi.InitConfiguration{ - LocalAPIEndpoint: kubeadmapi.APIEndpoint{ - AdvertiseAddress: "1.2.3.4", - }, - NodeRegistration: kubeadmapi.NodeRegistrationOptions{ - Name: "foo", - }, - ClusterConfiguration: kubeadmapi.ClusterConfiguration{ - Etcd: kubeadmapi.Etcd{ - Local: &kubeadmapi.LocalEtcd{ - DataDir: "/var/lib/etcd", - }, - }, - }, - }, + name: "Default args - With an existing etcd cluster", + advertiseAddress: "1.2.3.4", + nodeName: "foo", initialCluster: []etcdutil.Member{ {Name: "foo", PeerURL: fmt.Sprintf("https://1.2.3.4:%d", kubeadmconstants.EtcdListenPeerPort)}, // NB. the joining etcd instance should be part of the initialCluster list {Name: "bar", PeerURL: fmt.Sprintf("https://5.6.7.8:%d", kubeadmconstants.EtcdListenPeerPort)}, @@ -199,25 +175,12 @@ func TestGetEtcdCommand(t *testing.T) { }, }, { - name: "Extra args", - cfg: &kubeadmapi.InitConfiguration{ - LocalAPIEndpoint: kubeadmapi.APIEndpoint{ - AdvertiseAddress: "1.2.3.4", - }, - NodeRegistration: kubeadmapi.NodeRegistrationOptions{ - Name: "bar", - }, - ClusterConfiguration: kubeadmapi.ClusterConfiguration{ - Etcd: kubeadmapi.Etcd{ - Local: &kubeadmapi.LocalEtcd{ - DataDir: "/var/lib/etcd", - ExtraArgs: map[string]string{ - "listen-client-urls": "https://10.0.1.10:2379", - "advertise-client-urls": "https://10.0.1.10:2379", - }, - }, - }, - }, + name: "Extra args", + advertiseAddress: "1.2.3.4", + nodeName: "bar", + extraArgs: map[string]string{ + "listen-client-urls": "https://10.0.1.10:2379", + "advertise-client-urls": "https://10.0.1.10:2379", }, expected: []string{ "etcd", @@ -239,11 +202,51 @@ func TestGetEtcdCommand(t *testing.T) { fmt.Sprintf("--initial-cluster=bar=https://1.2.3.4:%d", kubeadmconstants.EtcdListenPeerPort), }, }, + { + name: "IPv6 advertise address", + advertiseAddress: "2001:db8::3", + nodeName: "foo", + expected: []string{ + "etcd", + "--name=foo", + fmt.Sprintf("--listen-client-urls=https://127.0.0.1:%d,https://[2001:db8::3]:%d", kubeadmconstants.EtcdListenClientPort, kubeadmconstants.EtcdListenClientPort), + fmt.Sprintf("--advertise-client-urls=https://[2001:db8::3]:%d", kubeadmconstants.EtcdListenClientPort), + fmt.Sprintf("--listen-peer-urls=https://[2001:db8::3]:%d", kubeadmconstants.EtcdListenPeerPort), + fmt.Sprintf("--initial-advertise-peer-urls=https://[2001:db8::3]:%d", kubeadmconstants.EtcdListenPeerPort), + "--data-dir=/var/lib/etcd", + "--cert-file=" + kubeadmconstants.EtcdServerCertName, + "--key-file=" + kubeadmconstants.EtcdServerKeyName, + "--trusted-ca-file=" + kubeadmconstants.EtcdCACertName, + "--client-cert-auth=true", + "--peer-cert-file=" + kubeadmconstants.EtcdPeerCertName, + "--peer-key-file=" + kubeadmconstants.EtcdPeerKeyName, + "--peer-trusted-ca-file=" + kubeadmconstants.EtcdCACertName, + "--snapshot-count=10000", + "--peer-client-cert-auth=true", + fmt.Sprintf("--initial-cluster=foo=https://[2001:db8::3]:%d", kubeadmconstants.EtcdListenPeerPort), + }, + }, } for _, rt := range tests { t.Run(rt.name, func(t *testing.T) { - actual := getEtcdCommand(rt.cfg, rt.initialCluster) + cfg := &kubeadmapi.InitConfiguration{ + LocalAPIEndpoint: kubeadmapi.APIEndpoint{ + AdvertiseAddress: rt.advertiseAddress, + }, + NodeRegistration: kubeadmapi.NodeRegistrationOptions{ + Name: rt.nodeName, + }, + ClusterConfiguration: kubeadmapi.ClusterConfiguration{ + Etcd: kubeadmapi.Etcd{ + Local: &kubeadmapi.LocalEtcd{ + DataDir: "/var/lib/etcd", + ExtraArgs: rt.extraArgs, + }, + }, + }, + } + actual := getEtcdCommand(cfg, rt.initialCluster) sort.Strings(actual) sort.Strings(rt.expected) if !reflect.DeepEqual(actual, rt.expected) { diff --git a/cmd/kubeadm/app/phases/kubeconfig/BUILD b/cmd/kubeadm/app/phases/kubeconfig/BUILD index c23e735b972..8df7d1038ec 100644 --- a/cmd/kubeadm/app/phases/kubeconfig/BUILD +++ b/cmd/kubeadm/app/phases/kubeconfig/BUILD @@ -48,9 +48,9 @@ go_test( "//cmd/kubeadm/app/apis/kubeadm:go_default_library", "//cmd/kubeadm/app/constants:go_default_library", "//cmd/kubeadm/app/util:go_default_library", + "//cmd/kubeadm/app/util/certs:go_default_library", "//cmd/kubeadm/app/util/pkiutil:go_default_library", "//cmd/kubeadm/test:go_default_library", - "//cmd/kubeadm/test/certs:go_default_library", "//cmd/kubeadm/test/kubeconfig:go_default_library", "//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library", "//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library", diff --git a/cmd/kubeadm/app/phases/kubeconfig/kubeconfig_test.go b/cmd/kubeadm/app/phases/kubeconfig/kubeconfig_test.go index 8416c672fc0..2d278d990f3 100644 --- a/cmd/kubeadm/app/phases/kubeconfig/kubeconfig_test.go +++ b/cmd/kubeadm/app/phases/kubeconfig/kubeconfig_test.go @@ -34,9 +34,9 @@ import ( kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" + certstestutil "k8s.io/kubernetes/cmd/kubeadm/app/util/certs" pkiutil "k8s.io/kubernetes/cmd/kubeadm/app/util/pkiutil" testutil "k8s.io/kubernetes/cmd/kubeadm/test" - certstestutil "k8s.io/kubernetes/cmd/kubeadm/test/certs" kubeconfigtestutil "k8s.io/kubernetes/cmd/kubeadm/test/kubeconfig" ) diff --git a/cmd/kubeadm/app/phases/kubelet/config_test.go b/cmd/kubeadm/app/phases/kubelet/config_test.go index 1949c472dff..3783423411e 100644 --- a/cmd/kubeadm/app/phases/kubelet/config_test.go +++ b/cmd/kubeadm/app/phases/kubelet/config_test.go @@ -35,7 +35,7 @@ func TestCreateConfigMap(t *testing.T) { cfg := &kubeadmapi.InitConfiguration{ NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: nodeName}, ClusterConfiguration: kubeadmapi.ClusterConfiguration{ - KubernetesVersion: "v1.12.0", + KubernetesVersion: "v1.13.0", ComponentConfigs: kubeadmapi.ComponentConfigs{ Kubelet: &kubeletconfig.KubeletConfiguration{}, }, diff --git a/cmd/kubeadm/app/phases/kubelet/flags_test.go b/cmd/kubeadm/app/phases/kubelet/flags_test.go index 26d4774c39e..1b06330504c 100644 --- a/cmd/kubeadm/app/phases/kubelet/flags_test.go +++ b/cmd/kubeadm/app/phases/kubelet/flags_test.go @@ -34,14 +34,19 @@ type fakeCmd struct { err error } -func (f fakeCmd) Run() error { return f.err } -func (f fakeCmd) CombinedOutput() ([]byte, error) { return f.b, f.err } -func (f fakeCmd) Output() ([]byte, error) { return f.b, f.err } -func (f fakeCmd) SetDir(dir string) {} -func (f fakeCmd) SetStdin(in io.Reader) {} -func (f fakeCmd) SetStdout(out io.Writer) {} -func (f fakeCmd) SetStderr(out io.Writer) {} -func (f fakeCmd) Stop() {} +func (f fakeCmd) Run() error { return f.err } +func (f fakeCmd) CombinedOutput() ([]byte, error) { return f.b, f.err } +func (f fakeCmd) Output() ([]byte, error) { return f.b, f.err } +func (f fakeCmd) SetDir(dir string) {} +func (f fakeCmd) SetStdin(in io.Reader) {} +func (f fakeCmd) SetStdout(out io.Writer) {} +func (f fakeCmd) SetStderr(out io.Writer) {} +func (f fakeCmd) SetEnv([]string) {} +func (f fakeCmd) Stop() {} +func (f fakeCmd) Start() error { return nil } +func (f fakeCmd) Wait() error { return nil } +func (f fakeCmd) StdoutPipe() (io.ReadCloser, error) { return nil, nil } +func (f fakeCmd) StderrPipe() (io.ReadCloser, error) { return nil, nil } type fakeExecer struct { ioMap map[string]fakeCmd diff --git a/cmd/kubeadm/app/phases/upgrade/BUILD b/cmd/kubeadm/app/phases/upgrade/BUILD index d804a9e2e70..ffaae1e84c2 100644 --- a/cmd/kubeadm/app/phases/upgrade/BUILD +++ b/cmd/kubeadm/app/phases/upgrade/BUILD @@ -82,11 +82,11 @@ go_test( "//cmd/kubeadm/app/phases/controlplane:go_default_library", "//cmd/kubeadm/app/phases/etcd:go_default_library", "//cmd/kubeadm/app/util/apiclient:go_default_library", + "//cmd/kubeadm/app/util/certs:go_default_library", "//cmd/kubeadm/app/util/config:go_default_library", "//cmd/kubeadm/app/util/etcd:go_default_library", "//cmd/kubeadm/app/util/pkiutil:go_default_library", "//cmd/kubeadm/test:go_default_library", - "//cmd/kubeadm/test/certs:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/cmd/kubeadm/app/phases/upgrade/compute_test.go b/cmd/kubeadm/app/phases/upgrade/compute_test.go index fd99f637e4c..a84f0c60f4a 100644 --- a/cmd/kubeadm/app/phases/upgrade/compute_test.go +++ b/cmd/kubeadm/app/phases/upgrade/compute_test.go @@ -76,11 +76,9 @@ type fakeEtcdClient struct { mismatchedVersions bool } -func (f fakeEtcdClient) HasTLS() bool { return f.TLS } - func (f fakeEtcdClient) ClusterAvailable() (bool, error) { return true, nil } -func (f fakeEtcdClient) WaitForClusterAvailable(delay time.Duration, retries int, retryInterval time.Duration) (bool, error) { +func (f fakeEtcdClient) WaitForClusterAvailable(retries int, retryInterval time.Duration) (bool, error) { return true, nil } diff --git a/cmd/kubeadm/app/phases/upgrade/policy_test.go b/cmd/kubeadm/app/phases/upgrade/policy_test.go index 696788d7247..5808353c705 100644 --- a/cmd/kubeadm/app/phases/upgrade/policy_test.go +++ b/cmd/kubeadm/app/phases/upgrade/policy_test.go @@ -34,38 +34,38 @@ func TestEnforceVersionPolicies(t *testing.T) { { name: "minor upgrade", vg: &fakeVersionGetter{ - clusterVersion: "v1.11.3", - kubeletVersion: "v1.11.3", - kubeadmVersion: "v1.11.5", + clusterVersion: "v1.12.3", + kubeletVersion: "v1.12.3", + kubeadmVersion: "v1.12.5", }, - newK8sVersion: "v1.11.5", + newK8sVersion: "v1.12.5", }, { name: "major upgrade", vg: &fakeVersionGetter{ - clusterVersion: "v1.11.3", - kubeletVersion: "v1.11.2", - kubeadmVersion: "v1.12.1", + clusterVersion: "v1.12.3", + kubeletVersion: "v1.12.2", + kubeadmVersion: "v1.13.1", }, - newK8sVersion: "v1.12.0", + newK8sVersion: "v1.13.0", }, { name: "downgrade", vg: &fakeVersionGetter{ - clusterVersion: "v1.11.3", - kubeletVersion: "v1.11.3", - kubeadmVersion: "v1.11.3", + clusterVersion: "v1.12.3", + kubeletVersion: "v1.12.3", + kubeadmVersion: "v1.12.3", }, - newK8sVersion: "v1.11.2", + newK8sVersion: "v1.12.2", }, { name: "same version upgrade", vg: &fakeVersionGetter{ - clusterVersion: "v1.11.3", - kubeletVersion: "v1.11.3", - kubeadmVersion: "v1.11.3", + clusterVersion: "v1.12.3", + kubeletVersion: "v1.12.3", + kubeadmVersion: "v1.12.3", }, - newK8sVersion: "v1.11.3", + newK8sVersion: "v1.12.3", }, { name: "new version must be higher than v1.10.0", @@ -81,114 +81,114 @@ func TestEnforceVersionPolicies(t *testing.T) { { name: "upgrading two minor versions in one go is not supported", vg: &fakeVersionGetter{ - clusterVersion: "v1.10.3", - kubeletVersion: "v1.10.3", - kubeadmVersion: "v1.12.0", + clusterVersion: "v1.11.3", + kubeletVersion: "v1.11.3", + kubeadmVersion: "v1.13.0", }, - newK8sVersion: "v1.12.0", + newK8sVersion: "v1.13.0", expectedMandatoryErrs: 1, // can't upgrade two minor versions expectedSkippableErrs: 1, // kubelet <-> apiserver skew too large }, { name: "downgrading two minor versions in one go is not supported", vg: &fakeVersionGetter{ - clusterVersion: "v1.13.3", - kubeletVersion: "v1.13.3", - kubeadmVersion: "v1.13.0", + clusterVersion: "v1.14.3", + kubeletVersion: "v1.14.3", + kubeadmVersion: "v1.14.0", }, - newK8sVersion: "v1.11.3", + newK8sVersion: "v1.12.3", expectedMandatoryErrs: 1, // can't downgrade two minor versions expectedSkippableErrs: 1, // can't upgrade old k8s with newer kubeadm }, { name: "kubeadm version must be higher than the new kube version. However, patch version skews may be forced", vg: &fakeVersionGetter{ - clusterVersion: "v1.11.3", - kubeletVersion: "v1.11.3", - kubeadmVersion: "v1.11.3", + clusterVersion: "v1.12.3", + kubeletVersion: "v1.12.3", + kubeadmVersion: "v1.12.3", }, - newK8sVersion: "v1.11.5", + newK8sVersion: "v1.12.5", expectedSkippableErrs: 1, }, { name: "kubeadm version must be higher than the new kube version. Trying to upgrade k8s to a higher minor version than kubeadm itself should never be supported", vg: &fakeVersionGetter{ - clusterVersion: "v1.11.3", - kubeletVersion: "v1.11.3", - kubeadmVersion: "v1.11.3", + clusterVersion: "v1.12.3", + kubeletVersion: "v1.12.3", + kubeadmVersion: "v1.12.3", }, - newK8sVersion: "v1.12.0", + newK8sVersion: "v1.13.0", expectedMandatoryErrs: 1, }, { name: "the maximum skew between the cluster version and the kubelet versions should be one minor version. This may be forced through though.", vg: &fakeVersionGetter{ - clusterVersion: "v1.11.3", - kubeletVersion: "v1.10.8", - kubeadmVersion: "v1.12.0", + clusterVersion: "v1.12.3", + kubeletVersion: "v1.11.8", + kubeadmVersion: "v1.13.0", }, - newK8sVersion: "v1.12.0", + newK8sVersion: "v1.13.0", expectedSkippableErrs: 1, }, { name: "experimental upgrades supported if the flag is set", vg: &fakeVersionGetter{ - clusterVersion: "v1.11.3", - kubeletVersion: "v1.11.3", - kubeadmVersion: "v1.12.0-beta.1", + clusterVersion: "v1.12.3", + kubeletVersion: "v1.12.3", + kubeadmVersion: "v1.13.0-beta.1", }, - newK8sVersion: "v1.12.0-beta.1", + newK8sVersion: "v1.13.0-beta.1", allowExperimental: true, }, { name: "release candidate upgrades supported if the flag is set", vg: &fakeVersionGetter{ - clusterVersion: "v1.11.3", - kubeletVersion: "v1.11.3", - kubeadmVersion: "v1.12.0-rc.1", + clusterVersion: "v1.12.3", + kubeletVersion: "v1.12.3", + kubeadmVersion: "v1.13.0-rc.1", }, - newK8sVersion: "v1.12.0-rc.1", + newK8sVersion: "v1.13.0-rc.1", allowRCs: true, }, { name: "release candidate upgrades supported if the flag is set", vg: &fakeVersionGetter{ - clusterVersion: "v1.11.3", - kubeletVersion: "v1.11.3", - kubeadmVersion: "v1.12.0-rc.1", + clusterVersion: "v1.12.3", + kubeletVersion: "v1.12.3", + kubeadmVersion: "v1.13.0-rc.1", }, - newK8sVersion: "v1.12.0-rc.1", + newK8sVersion: "v1.13.0-rc.1", allowExperimental: true, }, { name: "the user should not be able to upgrade to an experimental version if they haven't opted into that", vg: &fakeVersionGetter{ - clusterVersion: "v1.11.3", - kubeletVersion: "v1.11.3", - kubeadmVersion: "v1.12.0-beta.1", + clusterVersion: "v1.12.3", + kubeletVersion: "v1.12.3", + kubeadmVersion: "v1.13.0-beta.1", }, - newK8sVersion: "v1.12.0-beta.1", + newK8sVersion: "v1.13.0-beta.1", allowRCs: true, expectedSkippableErrs: 1, }, { name: "the user should not be able to upgrade to an release candidate version if they haven't opted into that", vg: &fakeVersionGetter{ - clusterVersion: "v1.11.3", - kubeletVersion: "v1.11.3", - kubeadmVersion: "v1.12.0-rc.1", + clusterVersion: "v1.12.3", + kubeletVersion: "v1.12.3", + kubeadmVersion: "v1.13.0-rc.1", }, - newK8sVersion: "v1.12.0-rc.1", + newK8sVersion: "v1.13.0-rc.1", expectedSkippableErrs: 1, }, { name: "the user can't use a newer minor version of kubeadm to upgrade an older version of kubeadm", vg: &fakeVersionGetter{ - clusterVersion: "v1.11.3", - kubeletVersion: "v1.11.3", - kubeadmVersion: "v1.12.0", + clusterVersion: "v1.12.3", + kubeletVersion: "v1.12.3", + kubeadmVersion: "v1.13.0", }, - newK8sVersion: "v1.11.6", + newK8sVersion: "v1.12.6", expectedSkippableErrs: 1, // can't upgrade old k8s with newer kubeadm }, } diff --git a/cmd/kubeadm/app/phases/upgrade/staticpods.go b/cmd/kubeadm/app/phases/upgrade/staticpods.go index a64c2163c1e..bcefea2f843 100644 --- a/cmd/kubeadm/app/phases/upgrade/staticpods.go +++ b/cmd/kubeadm/app/phases/upgrade/staticpods.go @@ -165,7 +165,7 @@ func (spm *KubeStaticPodPathManager) CleanupDirs() error { return nil } -func upgradeComponent(component string, waiter apiclient.Waiter, pathMgr StaticPodPathManager, cfg *kubeadmapi.InitConfiguration, beforePodHash string, recoverManifests map[string]string, isTLSUpgrade bool) error { +func upgradeComponent(component string, waiter apiclient.Waiter, pathMgr StaticPodPathManager, cfg *kubeadmapi.InitConfiguration, beforePodHash string, recoverManifests map[string]string) error { // Special treatment is required for etcd case, when rollbackOldManifests should roll back etcd // manifests only for the case when component is Etcd recoverEtcd := false @@ -173,21 +173,6 @@ func upgradeComponent(component string, waiter apiclient.Waiter, pathMgr StaticP if component == constants.Etcd { recoverEtcd = true } - if isTLSUpgrade { - // We currently depend on getting the Etcd mirror Pod hash from the KubeAPIServer; - // Upgrading the Etcd protocol takes down the apiserver, so we can't verify component restarts if we restart Etcd independently. - // Skip waiting for Etcd to restart and immediately move on to updating the apiserver. - if component == constants.Etcd { - waitForComponentRestart = false - } - // Normally, if an Etcd upgrade is successful, but the apiserver upgrade fails, Etcd is not rolled back. - // In the case of a TLS upgrade, the old KubeAPIServer config is incompatible with the new Etcd confg, so we rollback Etcd - // if the APIServer upgrade fails. - if component == constants.KubeAPIServer { - recoverEtcd = true - fmt.Printf("[upgrade/staticpods] The %s manifest will be restored if component %q fails to upgrade\n", constants.Etcd, component) - } - } if err := renewCerts(cfg, component); err != nil { return errors.Wrapf(err, "failed to renew certificates for component %q", component) @@ -252,7 +237,7 @@ func upgradeComponent(component string, waiter apiclient.Waiter, pathMgr StaticP } // performEtcdStaticPodUpgrade performs upgrade of etcd, it returns bool which indicates fatal error or not and the actual error. -func performEtcdStaticPodUpgrade(client clientset.Interface, waiter apiclient.Waiter, pathMgr StaticPodPathManager, cfg *kubeadmapi.InitConfiguration, recoverManifests map[string]string, isTLSUpgrade bool, oldEtcdClient, newEtcdClient etcdutil.ClusterInterrogator) (bool, error) { +func performEtcdStaticPodUpgrade(client clientset.Interface, waiter apiclient.Waiter, pathMgr StaticPodPathManager, cfg *kubeadmapi.InitConfiguration, recoverManifests map[string]string, oldEtcdClient, newEtcdClient etcdutil.ClusterInterrogator) (bool, error) { // Add etcd static pod spec only if external etcd is not configured if cfg.Etcd.External != nil { return false, errors.New("external etcd detected, won't try to change any etcd state") @@ -282,9 +267,8 @@ func performEtcdStaticPodUpgrade(client clientset.Interface, waiter apiclient.Wa if err != nil { return true, errors.Wrap(err, "failed to retrieve the current etcd version") } - currentEtcdVersionStr, ok := currentEtcdVersions[fmt.Sprintf("https://%s:%d", cfg.LocalAPIEndpoint.AdvertiseAddress, constants.EtcdListenClientPort)] + currentEtcdVersionStr, ok := currentEtcdVersions[etcdutil.GetClientURL(cfg)] if !ok { - fmt.Println(currentEtcdVersions) return true, errors.Wrap(err, "failed to retrieve the current etcd version") } @@ -313,26 +297,16 @@ func performEtcdStaticPodUpgrade(client clientset.Interface, waiter apiclient.Wa return true, errors.Wrap(err, "error creating local etcd static pod manifest file") } - // Waiter configurations for checking etcd status - noDelay := 0 * time.Second - podRestartDelay := noDelay - if isTLSUpgrade { - // If we are upgrading TLS we need to wait for old static pod to be removed. - // This is needed because we are not able to currently verify that the static pod - // has been updated through the apiserver across an etcd TLS upgrade. - // This value is arbitrary but seems to be long enough in manual testing. - podRestartDelay = 30 * time.Second - } retries := 10 retryInterval := 15 * time.Second // Perform etcd upgrade using common to all control plane components function - if err := upgradeComponent(constants.Etcd, waiter, pathMgr, cfg, beforeEtcdPodHash, recoverManifests, isTLSUpgrade); err != nil { + if err := upgradeComponent(constants.Etcd, waiter, pathMgr, cfg, beforeEtcdPodHash, recoverManifests); err != nil { fmt.Printf("[upgrade/etcd] Failed to upgrade etcd: %v\n", err) // Since upgrade component failed, the old etcd manifest has either been restored or was never touched // Now we need to check the health of etcd cluster if it is up with old manifest fmt.Println("[upgrade/etcd] Waiting for previous etcd to become available") - if _, err := oldEtcdClient.WaitForClusterAvailable(noDelay, retries, retryInterval); err != nil { + if _, err := oldEtcdClient.WaitForClusterAvailable(retries, retryInterval); err != nil { fmt.Printf("[upgrade/etcd] Failed to healthcheck previous etcd: %v\n", err) // At this point we know that etcd cluster is dead and it is safe to copy backup datastore and to rollback old etcd manifest @@ -345,7 +319,7 @@ func performEtcdStaticPodUpgrade(client clientset.Interface, waiter apiclient.Wa // Now that we've rolled back the data, let's check if the cluster comes up fmt.Println("[upgrade/etcd] Waiting for previous etcd to become available") - if _, err := oldEtcdClient.WaitForClusterAvailable(noDelay, retries, retryInterval); err != nil { + if _, err := oldEtcdClient.WaitForClusterAvailable(retries, retryInterval); err != nil { fmt.Printf("[upgrade/etcd] Failed to healthcheck previous etcd: %v\n", err) // Nothing else left to try to recover etcd cluster return true, errors.Wrapf(err, "fatal error rolling back local etcd cluster manifest, the backup of etcd database is stored here:(%s)", backupEtcdDir) @@ -370,7 +344,7 @@ func performEtcdStaticPodUpgrade(client clientset.Interface, waiter apiclient.Wa // Checking health state of etcd after the upgrade fmt.Println("[upgrade/etcd] Waiting for etcd to become available") - if _, err = newEtcdClient.WaitForClusterAvailable(podRestartDelay, retries, retryInterval); err != nil { + if _, err = newEtcdClient.WaitForClusterAvailable(retries, retryInterval); err != nil { fmt.Printf("[upgrade/etcd] Failed to healthcheck etcd: %v\n", err) // Despite the fact that upgradeComponent was successful, there is something wrong with the etcd cluster // First step is to restore back up of datastore @@ -388,7 +362,7 @@ func performEtcdStaticPodUpgrade(client clientset.Interface, waiter apiclient.Wa // Assuming rollback of the old etcd manifest was successful, check the status of etcd cluster again fmt.Println("[upgrade/etcd] Waiting for previous etcd to become available") - if _, err := oldEtcdClient.WaitForClusterAvailable(noDelay, retries, retryInterval); err != nil { + if _, err := oldEtcdClient.WaitForClusterAvailable(retries, retryInterval); err != nil { fmt.Printf("[upgrade/etcd] Failed to healthcheck previous etcd: %v\n", err) // Nothing else left to try to recover etcd cluster return true, errors.Wrapf(err, "fatal error rolling back local etcd cluster manifest, the backup of etcd database is stored here:(%s)", backupEtcdDir) @@ -405,7 +379,6 @@ func performEtcdStaticPodUpgrade(client clientset.Interface, waiter apiclient.Wa // StaticPodControlPlane upgrades a static pod-hosted control plane func StaticPodControlPlane(client clientset.Interface, waiter apiclient.Waiter, pathMgr StaticPodPathManager, cfg *kubeadmapi.InitConfiguration, etcdUpgrade bool, oldEtcdClient, newEtcdClient etcdutil.ClusterInterrogator) error { recoverManifests := map[string]string{} - var isTLSUpgrade bool var isExternalEtcd bool beforePodHashMap, err := waiter.WaitForStaticPodControlPlaneHashes(cfg.NodeRegistration.Name) @@ -443,16 +416,11 @@ func StaticPodControlPlane(client clientset.Interface, waiter apiclient.Waiter, // etcd upgrade is done prior to other control plane components if !isExternalEtcd && etcdUpgrade { - previousEtcdHasTLS := oldEtcdClient.HasTLS() - // set the TLS upgrade flag for all components - isTLSUpgrade = !previousEtcdHasTLS - if isTLSUpgrade { - fmt.Printf("[upgrade/etcd] Upgrading to TLS for %s\n", constants.Etcd) - } + fmt.Printf("[upgrade/etcd] Upgrading to TLS for %s\n", constants.Etcd) // Perform etcd upgrade using common to all control plane components function - fatal, err := performEtcdStaticPodUpgrade(client, waiter, pathMgr, cfg, recoverManifests, isTLSUpgrade, oldEtcdClient, newEtcdClient) + fatal, err := performEtcdStaticPodUpgrade(client, waiter, pathMgr, cfg, recoverManifests, oldEtcdClient, newEtcdClient) if err != nil { if fatal { return err @@ -469,7 +437,7 @@ func StaticPodControlPlane(client clientset.Interface, waiter apiclient.Waiter, } for _, component := range constants.MasterComponents { - if err = upgradeComponent(component, waiter, pathMgr, cfg, beforePodHashMap[component], recoverManifests, isTLSUpgrade); err != nil { + if err = upgradeComponent(component, waiter, pathMgr, cfg, beforePodHashMap[component], recoverManifests); err != nil { return err } } diff --git a/cmd/kubeadm/app/phases/upgrade/staticpods_test.go b/cmd/kubeadm/app/phases/upgrade/staticpods_test.go index 74c5623a8d2..e246697bcc1 100644 --- a/cmd/kubeadm/app/phases/upgrade/staticpods_test.go +++ b/cmd/kubeadm/app/phases/upgrade/staticpods_test.go @@ -38,11 +38,11 @@ import ( controlplanephase "k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane" etcdphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/etcd" "k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient" + certstestutil "k8s.io/kubernetes/cmd/kubeadm/app/util/certs" configutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config" etcdutil "k8s.io/kubernetes/cmd/kubeadm/app/util/etcd" "k8s.io/kubernetes/cmd/kubeadm/app/util/pkiutil" testutil "k8s.io/kubernetes/cmd/kubeadm/test" - certstestutil "k8s.io/kubernetes/cmd/kubeadm/test/certs" ) const ( @@ -228,13 +228,9 @@ func (spm *fakeStaticPodPathManager) CleanupDirs() error { type fakeTLSEtcdClient struct{ TLS bool } -func (c fakeTLSEtcdClient) HasTLS() bool { - return c.TLS -} - func (c fakeTLSEtcdClient) ClusterAvailable() (bool, error) { return true, nil } -func (c fakeTLSEtcdClient) WaitForClusterAvailable(delay time.Duration, retries int, retryInterval time.Duration) (bool, error) { +func (c fakeTLSEtcdClient) WaitForClusterAvailable(retries int, retryInterval time.Duration) (bool, error) { return true, nil } @@ -263,14 +259,9 @@ func (c fakeTLSEtcdClient) AddMember(name string, peerAddrs string) ([]etcdutil. type fakePodManifestEtcdClient struct{ ManifestDir, CertificatesDir string } -func (c fakePodManifestEtcdClient) HasTLS() bool { - hasTLS, _ := etcdutil.PodManifestsHaveTLS(c.ManifestDir) - return hasTLS -} - func (c fakePodManifestEtcdClient) ClusterAvailable() (bool, error) { return true, nil } -func (c fakePodManifestEtcdClient) WaitForClusterAvailable(delay time.Duration, retries int, retryInterval time.Duration) (bool, error) { +func (c fakePodManifestEtcdClient) WaitForClusterAvailable(retries int, retryInterval time.Duration) (bool, error) { return true, nil } @@ -471,7 +462,7 @@ func TestStaticPodControlPlane(t *testing.T) { t.Fatalf("couldn't read temp file: %v", err) } - newcfg, err := getConfig("v1.11.0", tempCertsDir, tmpEtcdDataDir) + newcfg, err := getConfig("v1.13.0", tempCertsDir, tmpEtcdDataDir) if err != nil { t.Fatalf("couldn't create config: %v", err) } @@ -523,10 +514,11 @@ func TestStaticPodControlPlane(t *testing.T) { if (oldHash != newHash) != rt.manifestShouldChange { t.Errorf( - "failed StaticPodControlPlane\n%s\n\texpected manifest change: %t\n\tgot: %t", + "failed StaticPodControlPlane\n%s\n\texpected manifest change: %t\n\tgot: %t\n\tnewHash: %v", rt.description, rt.manifestShouldChange, (oldHash != newHash), + newHash, ) } return diff --git a/cmd/kubeadm/app/phases/uploadconfig/uploadconfig_test.go b/cmd/kubeadm/app/phases/uploadconfig/uploadconfig_test.go index 1b69f49569e..c10a57376af 100644 --- a/cmd/kubeadm/app/phases/uploadconfig/uploadconfig_test.go +++ b/cmd/kubeadm/app/phases/uploadconfig/uploadconfig_test.go @@ -69,7 +69,7 @@ func TestUploadConfiguration(t *testing.T) { AdvertiseAddress: "1.2.3.4", }, ClusterConfiguration: kubeadmapiv1beta1.ClusterConfiguration{ - KubernetesVersion: "v1.11.10", + KubernetesVersion: "v1.12.10", }, BootstrapTokens: []kubeadmapiv1beta1.BootstrapToken{ { diff --git a/cmd/kubeadm/app/preflight/checks_test.go b/cmd/kubeadm/app/preflight/checks_test.go index 1b7c01295ca..8d1b3d881f3 100644 --- a/cmd/kubeadm/app/preflight/checks_test.go +++ b/cmd/kubeadm/app/preflight/checks_test.go @@ -647,13 +647,13 @@ func TestKubeletVersionCheck(t *testing.T) { expectErrors bool expectWarnings bool }{ - {"v1.12.2", "", false, false}, // check minimally supported version when there is no information about control plane - {"v1.9.3", "v1.9.8", true, false}, // too old kubelet (older than kubeadmconstants.MinimumKubeletVersion), should fail. - {"v1.11.0", "v1.11.5", false, false}, // kubelet within same major.minor as control plane - {"v1.11.5", "v1.11.1", false, false}, // kubelet is newer, but still within same major.minor as control plane - {"v1.11.0", "v1.12.1", false, false}, // kubelet is lower than control plane, but newer than minimally supported - {"v1.12.0-alpha.1", "v1.11.1", true, false}, // kubelet is newer (development build) than control plane, should fail. - {"v1.12.0", "v1.11.5", true, false}, // kubelet is newer (release) than control plane, should fail. + {"v1.13.2", "", false, false}, // check minimally supported version when there is no information about control plane + {"v1.11.3", "v1.11.8", true, false}, // too old kubelet (older than kubeadmconstants.MinimumKubeletVersion), should fail. + {"v1.12.0", "v1.12.5", false, false}, // kubelet within same major.minor as control plane + {"v1.12.5", "v1.12.1", false, false}, // kubelet is newer, but still within same major.minor as control plane + {"v1.12.0", "v1.13.1", false, false}, // kubelet is lower than control plane, but newer than minimally supported + {"v1.13.0-alpha.1", "v1.12.1", true, false}, // kubelet is newer (development build) than control plane, should fail. + {"v1.13.0", "v1.12.5", true, false}, // kubelet is newer (release) than control plane, should fail. } for _, tc := range cases { diff --git a/cmd/kubeadm/app/util/BUILD b/cmd/kubeadm/app/util/BUILD index becafa093b2..7356f887c09 100644 --- a/cmd/kubeadm/app/util/BUILD +++ b/cmd/kubeadm/app/util/BUILD @@ -80,6 +80,7 @@ filegroup( ":package-srcs", "//cmd/kubeadm/app/util/apiclient:all-srcs", "//cmd/kubeadm/app/util/audit:all-srcs", + "//cmd/kubeadm/app/util/certs:all-srcs", "//cmd/kubeadm/app/util/config:all-srcs", "//cmd/kubeadm/app/util/dryrun:all-srcs", "//cmd/kubeadm/app/util/etcd:all-srcs", diff --git a/cmd/kubeadm/app/util/arguments.go b/cmd/kubeadm/app/util/arguments.go index 619aba2eb4c..afba856c421 100644 --- a/cmd/kubeadm/app/util/arguments.go +++ b/cmd/kubeadm/app/util/arguments.go @@ -30,26 +30,26 @@ import ( func BuildArgumentListFromMap(baseArguments map[string]string, overrideArguments map[string]string) []string { var command []string var keys []string - for k := range overrideArguments { + + argsMap := make(map[string]string) + + for k, v := range baseArguments { + argsMap[k] = v + } + + for k, v := range overrideArguments { + argsMap[k] = v + } + + for k := range argsMap { keys = append(keys, k) } + sort.Strings(keys) for _, k := range keys { - v := overrideArguments[k] - // values of "" are allowed as well - command = append(command, fmt.Sprintf("--%s=%s", k, v)) - } - keys = []string{} - for k := range baseArguments { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - v := baseArguments[k] - if _, overrideExists := overrideArguments[k]; !overrideExists { - command = append(command, fmt.Sprintf("--%s=%s", k, v)) - } + command = append(command, fmt.Sprintf("--%s=%s", k, argsMap[k])) } + return command } diff --git a/cmd/kubeadm/app/util/arguments_test.go b/cmd/kubeadm/app/util/arguments_test.go index bdfd5e414f4..fd8c724983d 100644 --- a/cmd/kubeadm/app/util/arguments_test.go +++ b/cmd/kubeadm/app/util/arguments_test.go @@ -85,9 +85,9 @@ func TestBuildArgumentListFromMap(t *testing.T) { }, expected: []string{ "--admission-control=NamespaceLifecycle,LimitRanger", - "--something-that-allows-empty-string=", "--allow-privileged=true", "--insecure-bind-address=127.0.0.1", + "--something-that-allows-empty-string=", }, }, } diff --git a/cmd/kubeadm/test/certs/BUILD b/cmd/kubeadm/app/util/certs/BUILD similarity index 89% rename from cmd/kubeadm/test/certs/BUILD rename to cmd/kubeadm/app/util/certs/BUILD index 2bd48fc0e10..d3007e29a79 100644 --- a/cmd/kubeadm/test/certs/BUILD +++ b/cmd/kubeadm/app/util/certs/BUILD @@ -8,7 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["util.go"], - importpath = "k8s.io/kubernetes/cmd/kubeadm/test/certs", + importpath = "k8s.io/kubernetes/cmd/kubeadm/app/util/certs", deps = [ "//cmd/kubeadm/app/util/pkiutil:go_default_library", "//staging/src/k8s.io/client-go/util/cert:go_default_library", diff --git a/cmd/kubeadm/test/certs/util.go b/cmd/kubeadm/app/util/certs/util.go similarity index 56% rename from cmd/kubeadm/test/certs/util.go rename to cmd/kubeadm/app/util/certs/util.go index f22a3056a22..3a867d85d92 100644 --- a/cmd/kubeadm/test/certs/util.go +++ b/cmd/kubeadm/app/util/certs/util.go @@ -20,6 +20,7 @@ import ( "crypto/rsa" "crypto/x509" "net" + "path" "testing" certutil "k8s.io/client-go/util/cert" @@ -133,3 +134,116 @@ func AssertCertificateHasIPAddresses(t *testing.T, cert *x509.Certificate, IPAdd } } } + +// CreateCACert creates a generic CA cert. +func CreateCACert(t *testing.T) (*x509.Certificate, *rsa.PrivateKey) { + certCfg := &certutil.Config{CommonName: "kubernetes"} + cert, key, err := pkiutil.NewCertificateAuthority(certCfg) + if err != nil { + t.Fatalf("couldn't create CA: %v", err) + } + return cert, key +} + +// CreateTestCert makes a generic certficate with the given CA. +func CreateTestCert(t *testing.T, caCert *x509.Certificate, caKey *rsa.PrivateKey) (*x509.Certificate, *rsa.PrivateKey) { + cert, key, err := pkiutil.NewCertAndKey(caCert, caKey, + &certutil.Config{ + CommonName: "testCert", + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + }) + if err != nil { + t.Fatalf("couldn't create test cert: %v", err) + } + return cert, key +} + +// CertTestCase is a configuration of certificates and whether it's expected to work. +type CertTestCase struct { + Name string + Files PKIFiles + ExpectError bool +} + +// GetSparseCertTestCases produces a series of cert configurations and their intended outcomes. +func GetSparseCertTestCases(t *testing.T) []CertTestCase { + + caCert, caKey := CreateCACert(t) + fpCACert, fpCAKey := CreateCACert(t) + etcdCACert, etcdCAKey := CreateCACert(t) + + fpCert, fpKey := CreateTestCert(t, fpCACert, fpCAKey) + + return []CertTestCase{ + { + Name: "nothing present", + }, + { + Name: "CAs already exist", + Files: PKIFiles{ + "ca.crt": caCert, + "ca.key": caKey, + "front-proxy-ca.crt": fpCACert, + "front-proxy-ca.key": fpCAKey, + "etcd/ca.crt": etcdCACert, + "etcd/ca.key": etcdCAKey, + }, + }, + { + Name: "CA certs only", + Files: PKIFiles{ + "ca.crt": caCert, + "front-proxy-ca.crt": fpCACert, + "etcd/ca.crt": etcdCACert, + }, + ExpectError: true, + }, + { + Name: "FrontProxyCA with certs", + Files: PKIFiles{ + "ca.crt": caCert, + "ca.key": caKey, + "front-proxy-ca.crt": fpCACert, + "front-proxy-client.crt": fpCert, + "front-proxy-client.key": fpKey, + "etcd/ca.crt": etcdCACert, + "etcd/ca.key": etcdCAKey, + }, + }, + { + Name: "FrontProxy certs missing CA", + Files: PKIFiles{ + "front-proxy-client.crt": fpCert, + "front-proxy-client.key": fpKey, + }, + ExpectError: true, + }, + } +} + +// PKIFiles are a list of files that should be created for a test case +type PKIFiles map[string]interface{} + +// WritePKIFiles writes the given files out to the given directory +func WritePKIFiles(t *testing.T, dir string, files PKIFiles) { + for filename, body := range files { + switch body := body.(type) { + case *x509.Certificate: + if err := certutil.WriteCert(path.Join(dir, filename), certutil.EncodeCertPEM(body)); err != nil { + t.Errorf("unable to write certificate to file %q: [%v]", dir, err) + } + case *rsa.PublicKey: + publicKeyBytes, err := certutil.EncodePublicKeyPEM(body) + if err != nil { + t.Errorf("unable to write public key to file %q: [%v]", filename, err) + } + if err := certutil.WriteKey(path.Join(dir, filename), publicKeyBytes); err != nil { + t.Errorf("unable to write public key to file %q: [%v]", filename, err) + } + case *rsa.PrivateKey: + if err := certutil.WriteKey(path.Join(dir, filename), certutil.EncodePrivateKeyPEM(body)); err != nil { + t.Errorf("unable to write private key to file %q: [%v]", filename, err) + } + } + } +} diff --git a/cmd/kubeadm/app/util/config/BUILD b/cmd/kubeadm/app/util/config/BUILD index 012f544e1ac..738e33ab580 100644 --- a/cmd/kubeadm/app/util/config/BUILD +++ b/cmd/kubeadm/app/util/config/BUILD @@ -65,6 +65,7 @@ go_test( "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//vendor/github.com/pmezard/go-difflib/difflib:go_default_library", + "//vendor/github.com/renstrom/dedent:go_default_library", ], ) diff --git a/cmd/kubeadm/app/util/config/common.go b/cmd/kubeadm/app/util/config/common.go index 660210706ab..eb005b2a38e 100644 --- a/cmd/kubeadm/app/util/config/common.go +++ b/cmd/kubeadm/app/util/config/common.go @@ -17,6 +17,7 @@ limitations under the License. package config import ( + "bytes" "io/ioutil" "net" "reflect" @@ -35,28 +36,6 @@ import ( kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" ) -// AnyConfigFileAndDefaultsToInternal reads either a InitConfiguration or JoinConfiguration and unmarshals it -func AnyConfigFileAndDefaultsToInternal(cfgPath string) (runtime.Object, error) { - b, err := ioutil.ReadFile(cfgPath) - if err != nil { - return nil, err - } - - gvks, err := kubeadmutil.GroupVersionKindsFromBytes(b) - if err != nil { - return nil, err - } - - // First, check if the gvk list has InitConfiguration and in that case try to unmarshal it - if kubeadmutil.GroupVersionKindsHasInitConfiguration(gvks...) { - return ConfigFileAndDefaultsToInternalConfig(cfgPath, &kubeadmapiv1beta1.InitConfiguration{}) - } - if kubeadmutil.GroupVersionKindsHasJoinConfiguration(gvks...) { - return JoinConfigFileAndDefaultsToInternalConfig(cfgPath, &kubeadmapiv1beta1.JoinConfiguration{}) - } - return nil, errors.Errorf("didn't recognize types with GroupVersionKind: %v", gvks) -} - // MarshalKubeadmConfigObject marshals an Object registered in the kubeadm scheme. If the object is a InitConfiguration or ClusterConfiguration, some extra logic is run func MarshalKubeadmConfigObject(obj runtime.Object) ([]byte, error) { switch internalcfg := obj.(type) { @@ -181,3 +160,46 @@ func ChooseAPIServerBindAddress(bindAddress net.IP) (net.IP, error) { } return ip, nil } + +// MigrateOldConfigFromFile migrates an old configuration from a file into a new one (returned as a byte slice). Only kubeadm kinds are migrated. Others are silently ignored. +func MigrateOldConfigFromFile(cfgPath string) ([]byte, error) { + newConfig := [][]byte{} + + cfgBytes, err := ioutil.ReadFile(cfgPath) + if err != nil { + return []byte{}, err + } + + gvks, err := kubeadmutil.GroupVersionKindsFromBytes(cfgBytes) + if err != nil { + return []byte{}, err + } + + // Migrate InitConfiguration and ClusterConfiguration if there are any in the config + if kubeadmutil.GroupVersionKindsHasInitConfiguration(gvks...) || kubeadmutil.GroupVersionKindsHasClusterConfiguration(gvks...) { + o, err := ConfigFileAndDefaultsToInternalConfig(cfgPath, &kubeadmapiv1beta1.InitConfiguration{}) + if err != nil { + return []byte{}, err + } + b, err := MarshalKubeadmConfigObject(o) + if err != nil { + return []byte{}, err + } + newConfig = append(newConfig, b) + } + + // Migrate JoinConfiguration if there is any + if kubeadmutil.GroupVersionKindsHasJoinConfiguration(gvks...) { + o, err := JoinConfigFileAndDefaultsToInternalConfig(cfgPath, &kubeadmapiv1beta1.JoinConfiguration{}) + if err != nil { + return []byte{}, err + } + b, err := MarshalKubeadmConfigObject(o) + if err != nil { + return []byte{}, err + } + newConfig = append(newConfig, b) + } + + return bytes.Join(newConfig, []byte(constants.YAMLDocumentSeparator)), nil +} diff --git a/cmd/kubeadm/app/util/config/common_test.go b/cmd/kubeadm/app/util/config/common_test.go index 2aa4c44de37..08e0158cc5b 100644 --- a/cmd/kubeadm/app/util/config/common_test.go +++ b/cmd/kubeadm/app/util/config/common_test.go @@ -18,10 +18,15 @@ package config import ( "bytes" + "io/ioutil" + "os" "testing" + "github.com/renstrom/dedent" + kubeadmapiv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1" "k8s.io/kubernetes/cmd/kubeadm/app/constants" + kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" ) var files = map[string][]byte{ @@ -298,3 +303,212 @@ func TestVerifyAPIServerBindAddress(t *testing.T) { }) } } + +func TestMigrateOldConfigFromFile(t *testing.T) { + tests := []struct { + desc string + oldCfg string + expectedKinds []string + expectErr bool + }{ + { + desc: "empty file produces empty result", + oldCfg: "", + expectErr: false, + }, + { + desc: "bad config produces error", + oldCfg: dedent.Dedent(` + apiVersion: kubeadm.k8s.io/v1alpha3 + `), + expectErr: true, + }, + { + desc: "InitConfiguration only gets migrated", + oldCfg: dedent.Dedent(` + apiVersion: kubeadm.k8s.io/v1alpha3 + kind: InitConfiguration + `), + expectedKinds: []string{ + constants.InitConfigurationKind, + constants.ClusterConfigurationKind, + }, + expectErr: false, + }, + { + desc: "ClusterConfiguration only gets migrated", + oldCfg: dedent.Dedent(` + apiVersion: kubeadm.k8s.io/v1alpha3 + kind: ClusterConfiguration + `), + expectedKinds: []string{ + constants.InitConfigurationKind, + constants.ClusterConfigurationKind, + }, + expectErr: false, + }, + { + desc: "JoinConfiguration only gets migrated", + oldCfg: dedent.Dedent(` + apiVersion: kubeadm.k8s.io/v1alpha3 + kind: JoinConfiguration + token: abcdef.0123456789abcdef + discoveryTokenAPIServers: + - kube-apiserver:6443 + discoveryTokenUnsafeSkipCAVerification: true + `), + expectedKinds: []string{ + constants.JoinConfigurationKind, + }, + expectErr: false, + }, + { + desc: "Init + Cluster Configurations are migrated", + oldCfg: dedent.Dedent(` + apiVersion: kubeadm.k8s.io/v1alpha3 + kind: InitConfiguration + --- + apiVersion: kubeadm.k8s.io/v1alpha3 + kind: ClusterConfiguration + `), + expectedKinds: []string{ + constants.InitConfigurationKind, + constants.ClusterConfigurationKind, + }, + expectErr: false, + }, + { + desc: "Init + Join Configurations are migrated", + oldCfg: dedent.Dedent(` + apiVersion: kubeadm.k8s.io/v1alpha3 + kind: InitConfiguration + --- + apiVersion: kubeadm.k8s.io/v1alpha3 + kind: JoinConfiguration + token: abcdef.0123456789abcdef + discoveryTokenAPIServers: + - kube-apiserver:6443 + discoveryTokenUnsafeSkipCAVerification: true + `), + expectedKinds: []string{ + constants.InitConfigurationKind, + constants.ClusterConfigurationKind, + constants.JoinConfigurationKind, + }, + expectErr: false, + }, + { + desc: "Cluster + Join Configurations are migrated", + oldCfg: dedent.Dedent(` + apiVersion: kubeadm.k8s.io/v1alpha3 + kind: ClusterConfiguration + --- + apiVersion: kubeadm.k8s.io/v1alpha3 + kind: JoinConfiguration + token: abcdef.0123456789abcdef + discoveryTokenAPIServers: + - kube-apiserver:6443 + discoveryTokenUnsafeSkipCAVerification: true + `), + expectedKinds: []string{ + constants.InitConfigurationKind, + constants.ClusterConfigurationKind, + constants.JoinConfigurationKind, + }, + expectErr: false, + }, + { + desc: "Init + Cluster + Join Configurations are migrated", + oldCfg: dedent.Dedent(` + apiVersion: kubeadm.k8s.io/v1alpha3 + kind: InitConfiguration + --- + apiVersion: kubeadm.k8s.io/v1alpha3 + kind: ClusterConfiguration + --- + apiVersion: kubeadm.k8s.io/v1alpha3 + kind: JoinConfiguration + token: abcdef.0123456789abcdef + discoveryTokenAPIServers: + - kube-apiserver:6443 + discoveryTokenUnsafeSkipCAVerification: true + `), + expectedKinds: []string{ + constants.InitConfigurationKind, + constants.ClusterConfigurationKind, + constants.JoinConfigurationKind, + }, + expectErr: false, + }, + { + desc: "component configs are not migrated", + oldCfg: dedent.Dedent(` + apiVersion: kubeadm.k8s.io/v1alpha3 + kind: InitConfiguration + --- + apiVersion: kubeadm.k8s.io/v1alpha3 + kind: ClusterConfiguration + --- + apiVersion: kubeadm.k8s.io/v1alpha3 + kind: JoinConfiguration + token: abcdef.0123456789abcdef + discoveryTokenAPIServers: + - kube-apiserver:6443 + discoveryTokenUnsafeSkipCAVerification: true + --- + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + kind: KubeProxyConfiguration + --- + apiVersion: kubelet.config.k8s.io/v1beta1 + kind: KubeletConfiguration + `), + expectedKinds: []string{ + constants.InitConfigurationKind, + constants.ClusterConfigurationKind, + constants.JoinConfigurationKind, + }, + expectErr: false, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + file, err := ioutil.TempFile("", "") + if err != nil { + t.Fatalf("could not create temporary test file: %v", err) + } + fileName := file.Name() + defer os.Remove(fileName) + + _, err = file.WriteString(test.oldCfg) + file.Close() + if err != nil { + t.Fatalf("could not write contents of old config: %v", err) + } + + b, err := MigrateOldConfigFromFile(fileName) + if test.expectErr { + if err == nil { + t.Fatalf("unexpected success:\n%s", b) + } + } else { + if err != nil { + t.Fatalf("unexpected failure: %v", err) + } + gvks, err := kubeadmutil.GroupVersionKindsFromBytes(b) + if err != nil { + t.Fatalf("unexpected error returned by GroupVersionKindsFromBytes: %v", err) + } + if len(gvks) != len(test.expectedKinds) { + t.Fatalf("length mismatch between resulting gvks and expected kinds:\n\tlen(gvks)=%d\n\tlen(expectedKinds)=%d", + len(gvks), len(test.expectedKinds)) + } + for _, expectedKind := range test.expectedKinds { + if !kubeadmutil.GroupVersionKindsHasKind(gvks, expectedKind) { + t.Fatalf("migration failed to produce config kind: %s", expectedKind) + } + } + } + }) + } +} diff --git a/cmd/kubeadm/app/util/config/testdata/conversion/master/internal.yaml b/cmd/kubeadm/app/util/config/testdata/conversion/master/internal.yaml index b6c3c66a889..8e8f095f815 100644 --- a/cmd/kubeadm/app/util/config/testdata/conversion/master/internal.yaml +++ b/cmd/kubeadm/app/util/config/testdata/conversion/master/internal.yaml @@ -183,7 +183,7 @@ Etcd: ServerCertSANs: null FeatureGates: null ImageRepository: k8s.gcr.io -KubernetesVersion: v1.11.2 +KubernetesVersion: v1.12.2 LocalAPIEndpoint: AdvertiseAddress: 192.168.2.2 BindPort: 6443 diff --git a/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha3.yaml b/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha3.yaml index 339e4f07957..28ed6d7ce93 100644 --- a/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha3.yaml +++ b/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha3.yaml @@ -42,12 +42,12 @@ etcd: image: "" imageRepository: k8s.gcr.io kind: ClusterConfiguration -kubernetesVersion: v1.11.2 +kubernetesVersion: v1.12.2 networking: dnsDomain: cluster.local podSubnet: "" serviceSubnet: 10.96.0.0/12 -unifiedControlPlaneImage: "k8s.gcr.io/hyperkube:v1.11.2" +unifiedControlPlaneImage: "k8s.gcr.io/hyperkube:v1.12.2" --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 bindAddress: 0.0.0.0 diff --git a/cmd/kubeadm/app/util/config/testdata/conversion/master/v1beta1.yaml b/cmd/kubeadm/app/util/config/testdata/conversion/master/v1beta1.yaml index bc8abfa27ba..7e04fad462c 100644 --- a/cmd/kubeadm/app/util/config/testdata/conversion/master/v1beta1.yaml +++ b/cmd/kubeadm/app/util/config/testdata/conversion/master/v1beta1.yaml @@ -42,7 +42,7 @@ etcd: dataDir: /var/lib/etcd imageRepository: k8s.gcr.io kind: ClusterConfiguration -kubernetesVersion: v1.11.2 +kubernetesVersion: v1.12.2 networking: dnsDomain: cluster.local podSubnet: "" diff --git a/cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted.yaml b/cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted.yaml index 4456eb175ce..0eac60cec3e 100644 --- a/cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted.yaml +++ b/cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted.yaml @@ -32,7 +32,7 @@ etcd: dataDir: /var/lib/etcd imageRepository: my-company.com kind: ClusterConfiguration -kubernetesVersion: v1.12.0 +kubernetesVersion: v1.13.0 networking: dnsDomain: cluster.global podSubnet: 10.148.0.0/16 diff --git a/cmd/kubeadm/app/util/config/testdata/defaulting/master/incomplete.yaml b/cmd/kubeadm/app/util/config/testdata/defaulting/master/incomplete.yaml index 362e8e31357..b7ce756d2a2 100644 --- a/cmd/kubeadm/app/util/config/testdata/defaulting/master/incomplete.yaml +++ b/cmd/kubeadm/app/util/config/testdata/defaulting/master/incomplete.yaml @@ -12,7 +12,7 @@ apiVersion: kubeadm.k8s.io/v1beta1 certificatesDir: /var/lib/kubernetes/pki imageRepository: my-company.com kind: ClusterConfiguration -kubernetesVersion: v1.12.0 +kubernetesVersion: v1.13.0 networking: dnsDomain: cluster.global podSubnet: 10.148.0.0/16 diff --git a/cmd/kubeadm/app/util/endpoint.go b/cmd/kubeadm/app/util/endpoint.go index 8760aec6b0b..62746bf17db 100644 --- a/cmd/kubeadm/app/util/endpoint.go +++ b/cmd/kubeadm/app/util/endpoint.go @@ -95,7 +95,7 @@ func ParseHostPort(hostport string) (string, string, error) { // if port is defined, parse and validate it if port != "" { if _, err := ParsePort(port); err != nil { - return "", "", errors.New("port must be a valid number between 1 and 65535, inclusive") + return "", "", errors.Errorf("hostport %s: port %s must be a valid number between 1 and 65535, inclusive", hostport, port) } } @@ -109,7 +109,7 @@ func ParseHostPort(hostport string) (string, string, error) { return host, port, nil } - return "", "", errors.New("host must be a valid IP address or a valid RFC-1123 DNS subdomain") + return "", "", errors.Errorf("hostport %s: host '%s' must be a valid IP address or a valid RFC-1123 DNS subdomain", hostport, host) } // ParsePort parses a string representing a TCP port. diff --git a/cmd/kubeadm/app/util/etcd/BUILD b/cmd/kubeadm/app/util/etcd/BUILD index bb666f7cabb..adf2ff4f1b3 100644 --- a/cmd/kubeadm/app/util/etcd/BUILD +++ b/cmd/kubeadm/app/util/etcd/BUILD @@ -24,6 +24,7 @@ go_test( embed = [":go_default_library"], deps = [ "//cmd/kubeadm/app/apis/kubeadm:go_default_library", + "//cmd/kubeadm/app/constants:go_default_library", "//cmd/kubeadm/test:go_default_library", ], ) diff --git a/cmd/kubeadm/app/util/etcd/etcd.go b/cmd/kubeadm/app/util/etcd/etcd.go index c41b0ccdadd..c1c745f024a 100644 --- a/cmd/kubeadm/app/util/etcd/etcd.go +++ b/cmd/kubeadm/app/util/etcd/etcd.go @@ -20,7 +20,9 @@ import ( "context" "crypto/tls" "fmt" + "net" "path/filepath" + "strconv" "strings" "time" @@ -41,8 +43,7 @@ type ClusterInterrogator interface { GetClusterStatus() (map[string]*clientv3.StatusResponse, error) GetClusterVersions() (map[string]string, error) GetVersion() (string, error) - HasTLS() bool - WaitForClusterAvailable(delay time.Duration, retries int, retryInterval time.Duration) (bool, error) + WaitForClusterAvailable(retries int, retryInterval time.Duration) (bool, error) Sync() error AddMember(name string, peerAddrs string) ([]Member, error) } @@ -53,11 +54,6 @@ type Client struct { TLS *tls.Config } -// HasTLS returns true if etcd is configured for TLS -func (c Client) HasTLS() bool { - return c.TLS != nil -} - // PodManifestsHaveTLS reads the etcd staticpod manifest from disk and returns false if the TLS flags // are missing from the command list. If all the flags are present it returns true. func PodManifestsHaveTLS(ManifestDir string) (bool, error) { @@ -124,38 +120,41 @@ func NewFromCluster(client clientset.Interface, certificatesDir string) (*Client // The first case should be dropped in v1.14 when support for v1.12 clusters can be removed from the codebase. // Detect which type of etcd we are dealing with + // Please note that this test can be executed only on master nodes during upgrades; + // For nodes where we are joining a new control plane node instead we should tolerate that the etcd manifest does not + // exists and try to connect to etcd using API server advertise address; as described above this will lead to a know isse + // for cluster created with v1.12, but a documented workaround will be provided oldManifest := false klog.V(1).Infoln("checking etcd manifest") etcdManifestFile := constants.GetStaticPodFilepath(constants.Etcd, constants.GetStaticPodDirectory()) etcdPod, err := staticpod.ReadStaticPodFromDisk(etcdManifestFile) - if err != nil { - return nil, errors.Wrap(err, "error reading etcd manifest file") - } - etcdContainer := etcdPod.Spec.Containers[0] - for _, arg := range etcdContainer.Command { - if arg == "--listen-client-urls=https://127.0.0.1:2379" { - klog.V(1).Infoln("etcd manifest created by kubeadm v1.12") - oldManifest = true - } - } - - // if etcd is listening on localhost only - if oldManifest == true { - // etcd cluster has a single member "by design" - endpoints := []string{fmt.Sprintf("localhost:%d", constants.EtcdListenClientPort)} - - etcdClient, err := New( - endpoints, - filepath.Join(certificatesDir, constants.EtcdCACertName), - filepath.Join(certificatesDir, constants.EtcdHealthcheckClientCertName), - filepath.Join(certificatesDir, constants.EtcdHealthcheckClientKeyName), - ) - if err != nil { - return nil, errors.Wrapf(err, "error creating etcd client for %v endpoint", endpoints) + if err == nil { + etcdContainer := etcdPod.Spec.Containers[0] + for _, arg := range etcdContainer.Command { + if arg == "--listen-client-urls=https://127.0.0.1:2379" { + klog.V(1).Infoln("etcd manifest created by kubeadm v1.12") + oldManifest = true + } } - return etcdClient, nil + // if etcd is listening on localhost only + if oldManifest == true { + // etcd cluster has a single member "by design" + endpoints := []string{fmt.Sprintf("localhost:%d", constants.EtcdListenClientPort)} + + etcdClient, err := New( + endpoints, + filepath.Join(certificatesDir, constants.EtcdCACertName), + filepath.Join(certificatesDir, constants.EtcdHealthcheckClientCertName), + filepath.Join(certificatesDir, constants.EtcdHealthcheckClientKeyName), + ) + if err != nil { + return nil, errors.Wrapf(err, "error creating etcd client for %v endpoint", endpoints) + } + + return etcdClient, nil + } } // etcd is listening on localhost and API server advertise address, and @@ -171,7 +170,7 @@ func NewFromCluster(client clientset.Interface, certificatesDir string) (*Client // Get the list of etcd endpoints from cluster status endpoints := []string{} for _, e := range clusterStatus.APIEndpoints { - endpoints = append(endpoints, fmt.Sprintf("https://%s:%d", e.AdvertiseAddress, constants.EtcdListenClientPort)) + endpoints = append(endpoints, GetClientURLByIP(e.AdvertiseAddress)) } klog.V(1).Infof("etcd endpoints read from pods: %s", strings.Join(endpoints, ",")) @@ -191,12 +190,13 @@ func NewFromCluster(client clientset.Interface, certificatesDir string) (*Client if err != nil { return nil, errors.Wrap(err, "error syncing endpoints with etc") } + klog.V(1).Infof("update etcd endpoints: %s", strings.Join(etcdClient.Endpoints, ",")) return etcdClient, nil } // Sync synchronizes client's endpoints with the known endpoints from the etcd membership. -func (c Client) Sync() error { +func (c *Client) Sync() error { cli, err := clientv3.New(clientv3.Config{ Endpoints: c.Endpoints, DialTimeout: 20 * time.Second, @@ -329,10 +329,8 @@ func (c Client) GetClusterStatus() (map[string]*clientv3.StatusResponse, error) return clusterStatus, nil } -// WaitForClusterAvailable returns true if all endpoints in the cluster are available after an initial delay and retry attempts, an error is returned otherwise -func (c Client) WaitForClusterAvailable(delay time.Duration, retries int, retryInterval time.Duration) (bool, error) { - fmt.Printf("[util/etcd] Waiting %v for initial delay\n", delay) - time.Sleep(delay) +// WaitForClusterAvailable returns true if all endpoints in the cluster are available after retry attempts, an error is returned otherwise +func (c Client) WaitForClusterAvailable(retries int, retryInterval time.Duration) (bool, error) { for i := 0; i < retries; i++ { if i > 0 { fmt.Printf("[util/etcd] Waiting %v until next retry\n", retryInterval) @@ -358,3 +356,21 @@ func (c Client) WaitForClusterAvailable(delay time.Duration, retries int, retryI func CheckConfigurationIsHA(cfg *kubeadmapi.Etcd) bool { return cfg.External != nil && len(cfg.External.Endpoints) > 1 } + +// GetClientURL creates an HTTPS URL that uses the configured advertise +// address and client port for the API controller +func GetClientURL(cfg *kubeadmapi.InitConfiguration) string { + return "https://" + net.JoinHostPort(cfg.LocalAPIEndpoint.AdvertiseAddress, strconv.Itoa(constants.EtcdListenClientPort)) +} + +// GetPeerURL creates an HTTPS URL that uses the configured advertise +// address and peer port for the API controller +func GetPeerURL(cfg *kubeadmapi.InitConfiguration) string { + return "https://" + net.JoinHostPort(cfg.LocalAPIEndpoint.AdvertiseAddress, strconv.Itoa(constants.EtcdListenPeerPort)) +} + +// GetClientURLByIP creates an HTTPS URL based on an IP address +// and the client listening port. +func GetClientURLByIP(ip string) string { + return "https://" + net.JoinHostPort(ip, strconv.Itoa(constants.EtcdListenClientPort)) +} diff --git a/cmd/kubeadm/app/util/etcd/etcd_test.go b/cmd/kubeadm/app/util/etcd/etcd_test.go index 58e133eca03..efd8f896a77 100644 --- a/cmd/kubeadm/app/util/etcd/etcd_test.go +++ b/cmd/kubeadm/app/util/etcd/etcd_test.go @@ -17,12 +17,15 @@ limitations under the License. package etcd import ( + "fmt" "io/ioutil" "os" "path/filepath" + "strconv" "testing" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" + "k8s.io/kubernetes/cmd/kubeadm/app/constants" testutil "k8s.io/kubernetes/cmd/kubeadm/test" ) @@ -308,3 +311,90 @@ func TestCheckConfigurationIsHA(t *testing.T) { }) } } + +func testGetURL(t *testing.T, getURLFunc func(*kubeadmapi.InitConfiguration) string, port int) { + portStr := strconv.Itoa(port) + var tests = []struct { + name string + advertiseAddress string + expectedURL string + }{ + { + name: "IPv4", + advertiseAddress: "10.10.10.10", + expectedURL: fmt.Sprintf("https://10.10.10.10:%s", portStr), + }, + { + name: "IPv6", + advertiseAddress: "2001:db8::2", + expectedURL: fmt.Sprintf("https://[2001:db8::2]:%s", portStr), + }, + { + name: "IPv4 localhost", + advertiseAddress: "127.0.0.1", + expectedURL: fmt.Sprintf("https://127.0.0.1:%s", portStr), + }, + { + name: "IPv6 localhost", + advertiseAddress: "::1", + expectedURL: fmt.Sprintf("https://[::1]:%s", portStr), + }, + } + + for _, test := range tests { + cfg := &kubeadmapi.InitConfiguration{ + LocalAPIEndpoint: kubeadmapi.APIEndpoint{ + AdvertiseAddress: test.advertiseAddress, + }, + } + url := getURLFunc(cfg) + if url != test.expectedURL { + t.Errorf("expected %s, got %s", test.expectedURL, url) + } + } +} + +func TestGetClientURL(t *testing.T) { + testGetURL(t, GetClientURL, constants.EtcdListenClientPort) +} + +func TestGetPeerURL(t *testing.T) { + testGetURL(t, GetClientURL, constants.EtcdListenClientPort) +} + +func TestGetClientURLByIP(t *testing.T) { + portStr := strconv.Itoa(constants.EtcdListenClientPort) + var tests = []struct { + name string + ip string + expectedURL string + }{ + { + name: "IPv4", + ip: "10.10.10.10", + expectedURL: fmt.Sprintf("https://10.10.10.10:%s", portStr), + }, + { + name: "IPv6", + ip: "2001:db8::2", + expectedURL: fmt.Sprintf("https://[2001:db8::2]:%s", portStr), + }, + { + name: "IPv4 localhost", + ip: "127.0.0.1", + expectedURL: fmt.Sprintf("https://127.0.0.1:%s", portStr), + }, + { + name: "IPv6 localhost", + ip: "::1", + expectedURL: fmt.Sprintf("https://[::1]:%s", portStr), + }, + } + + for _, test := range tests { + url := GetClientURLByIP(test.ip) + if url != test.expectedURL { + t.Errorf("expected %s, got %s", test.expectedURL, url) + } + } +} diff --git a/cmd/kubeadm/test/BUILD b/cmd/kubeadm/test/BUILD index 2c6fc05940a..53a4ff8911d 100644 --- a/cmd/kubeadm/test/BUILD +++ b/cmd/kubeadm/test/BUILD @@ -13,9 +13,9 @@ go_library( "//cmd/kubeadm/app/apis/kubeadm:go_default_library", "//cmd/kubeadm/app/apis/kubeadm/v1beta1:go_default_library", "//cmd/kubeadm/app/constants:go_default_library", + "//cmd/kubeadm/app/util/certs:go_default_library", "//cmd/kubeadm/app/util/config:go_default_library", "//cmd/kubeadm/app/util/pkiutil:go_default_library", - "//cmd/kubeadm/test/certs:go_default_library", "//vendor/github.com/renstrom/dedent:go_default_library", ], ) @@ -31,7 +31,6 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", - "//cmd/kubeadm/test/certs:all-srcs", "//cmd/kubeadm/test/cmd:all-srcs", "//cmd/kubeadm/test/kubeconfig:all-srcs", ], diff --git a/cmd/kubeadm/test/cmd/init_test.go b/cmd/kubeadm/test/cmd/init_test.go index e986047ec68..282ec83527b 100644 --- a/cmd/kubeadm/test/cmd/init_test.go +++ b/cmd/kubeadm/test/cmd/init_test.go @@ -102,7 +102,7 @@ func TestCmdInitKubernetesVersion(t *testing.T) { }, { name: "valid version is accepted", - args: "--kubernetes-version=1.11.0", + args: "--kubernetes-version=1.12.0", expected: true, }, } diff --git a/cmd/kubeadm/test/kubeconfig/BUILD b/cmd/kubeadm/test/kubeconfig/BUILD index 7b6e9eeba1d..792e593e136 100644 --- a/cmd/kubeadm/test/kubeconfig/BUILD +++ b/cmd/kubeadm/test/kubeconfig/BUILD @@ -10,7 +10,7 @@ go_library( srcs = ["util.go"], importpath = "k8s.io/kubernetes/cmd/kubeadm/test/kubeconfig", deps = [ - "//cmd/kubeadm/test/certs:go_default_library", + "//cmd/kubeadm/app/util/certs:go_default_library", "//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library", ], ) diff --git a/cmd/kubeadm/test/kubeconfig/util.go b/cmd/kubeadm/test/kubeconfig/util.go index daad6892282..51092a79d7e 100644 --- a/cmd/kubeadm/test/kubeconfig/util.go +++ b/cmd/kubeadm/test/kubeconfig/util.go @@ -22,7 +22,7 @@ import ( "testing" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" - certstestutil "k8s.io/kubernetes/cmd/kubeadm/test/certs" + certstestutil "k8s.io/kubernetes/cmd/kubeadm/app/util/certs" ) // AssertKubeConfigCurrentCluster is a utility function for kubeadm testing that asserts if the CurrentCluster in diff --git a/cmd/kubeadm/test/util.go b/cmd/kubeadm/test/util.go index eceea5712c0..e54dde58575 100644 --- a/cmd/kubeadm/test/util.go +++ b/cmd/kubeadm/test/util.go @@ -28,9 +28,9 @@ import ( kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmapiv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" + certtestutil "k8s.io/kubernetes/cmd/kubeadm/app/util/certs" configutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config" "k8s.io/kubernetes/cmd/kubeadm/app/util/pkiutil" - certtestutil "k8s.io/kubernetes/cmd/kubeadm/test/certs" ) // SetupTempDir is a utility function for kubeadm testing, that creates a temporary directory @@ -66,7 +66,7 @@ func SetupInitConfigurationFile(t *testing.T, tmpdir string, cfg *kubeadmapi.Ini apiVersion: kubeadm.k8s.io/v1beta1 kind: ClusterConfiguration certificatesDir: {{.CertificatesDir}} - kubernetesVersion: v1.11.0 + kubernetesVersion: v1.12.0 `))) f, err := os.Create(cfgPath) diff --git a/cmd/kubelet/app/BUILD b/cmd/kubelet/app/BUILD index 4dc0bbcc4ee..59e5a936053 100644 --- a/cmd/kubelet/app/BUILD +++ b/cmd/kubelet/app/BUILD @@ -8,8 +8,22 @@ load( go_test( name = "go_default_test", - srcs = ["server_test.go"], + srcs = [ + "server_bootstrap_test.go", + "server_test.go", + ], embed = [":go_default_library"], + deps = [ + "//staging/src/k8s.io/api/certificates/v1beta1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/client-go/rest:go_default_library", + "//staging/src/k8s.io/client-go/util/cert:go_default_library", + "//vendor/github.com/cloudflare/cfssl/config:go_default_library", + "//vendor/github.com/cloudflare/cfssl/signer:go_default_library", + "//vendor/github.com/cloudflare/cfssl/signer/local:go_default_library", + ], ) go_library( @@ -113,6 +127,7 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library", @@ -129,7 +144,7 @@ go_library( "//vendor/k8s.io/utils/exec:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/golang.org/x/exp/inotify:go_default_library", + "//vendor/github.com/sigma/go-inotify:go_default_library", ], "@io_bazel_rules_go//go/platform:windows": [ "//pkg/windows/service:go_default_library", diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go index 04422bb8990..c84979fd617 100644 --- a/cmd/kubelet/app/server.go +++ b/cmd/kubelet/app/server.go @@ -50,6 +50,7 @@ import ( "k8s.io/apiserver/pkg/util/flag" "k8s.io/client-go/dynamic" clientset "k8s.io/client-go/kubernetes" + certificatesclient "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" v1core "k8s.io/client-go/kubernetes/typed/core/v1" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" @@ -169,7 +170,7 @@ HTTP server: The kubelet can also listen for HTTP and respond to a simple API utilflag.PrintFlags(cleanFlagSet) // set feature gates from initial flags-based config - if err := utilfeature.DefaultFeatureGate.SetFromMap(kubeletConfig.FeatureGates); err != nil { + if err := utilfeature.DefaultMutableFeatureGate.SetFromMap(kubeletConfig.FeatureGates); err != nil { klog.Fatal(err) } @@ -195,7 +196,7 @@ HTTP server: The kubelet can also listen for HTTP and respond to a simple API klog.Fatal(err) } // update feature gates based on new config - if err := utilfeature.DefaultFeatureGate.SetFromMap(kubeletConfig.FeatureGates); err != nil { + if err := utilfeature.DefaultMutableFeatureGate.SetFromMap(kubeletConfig.FeatureGates); err != nil { klog.Fatal(err) } } @@ -226,7 +227,7 @@ HTTP server: The kubelet can also listen for HTTP and respond to a simple API kubeletConfig = dynamicKubeletConfig // Note: flag precedence was already enforced in the controller, prior to validation, // by our above transform function. Now we simply update feature gates from the new config. - if err := utilfeature.DefaultFeatureGate.SetFromMap(kubeletConfig.FeatureGates); err != nil { + if err := utilfeature.DefaultMutableFeatureGate.SetFromMap(kubeletConfig.FeatureGates); err != nil { klog.Fatal(err) } } @@ -467,7 +468,7 @@ func makeEventRecorder(kubeDeps *kubelet.Dependencies, nodeName types.NodeName) func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan struct{}) (err error) { // Set global feature gates based on the value on the initial KubeletServer - err = utilfeature.DefaultFeatureGate.SetFromMap(s.KubeletConfiguration.FeatureGates) + err = utilfeature.DefaultMutableFeatureGate.SetFromMap(s.KubeletConfiguration.FeatureGates) if err != nil { return err } @@ -537,66 +538,39 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan return err } - if s.BootstrapKubeconfig != "" { - if err := bootstrap.LoadClientCert(s.KubeConfig, s.BootstrapKubeconfig, s.CertDirectory, nodeName); err != nil { - return err - } - } - // if in standalone mode, indicate as much by setting all clients to nil - if standaloneMode { + switch { + case standaloneMode: kubeDeps.KubeClient = nil kubeDeps.DynamicKubeClient = nil kubeDeps.EventClient = nil kubeDeps.HeartbeatClient = nil klog.Warningf("standalone mode, no API client") - } else if kubeDeps.KubeClient == nil || kubeDeps.EventClient == nil || kubeDeps.HeartbeatClient == nil || kubeDeps.DynamicKubeClient == nil { - // initialize clients if not standalone mode and any of the clients are not provided - var kubeClient clientset.Interface - var eventClient v1core.EventsGetter - var heartbeatClient clientset.Interface - var dynamicKubeClient dynamic.Interface - clientConfig, err := createAPIServerClientConfig(s) - if err != nil { - return fmt.Errorf("invalid kubeconfig: %v", err) - } - - var clientCertificateManager certificate.Manager - if s.RotateCertificates && utilfeature.DefaultFeatureGate.Enabled(features.RotateKubeletClientCertificate) { - clientCertificateManager, err = kubeletcertificate.NewKubeletClientCertificateManager(s.CertDirectory, nodeName, clientConfig.CertData, clientConfig.KeyData, clientConfig.CertFile, clientConfig.KeyFile) - if err != nil { - return err - } - } - // we set exitAfter to five minutes because we use this client configuration to request new certs - if we are unable - // to request new certs, we will be unable to continue normal operation. Exiting the process allows a wrapper - // or the bootstrapping credentials to potentially lay down new initial config. - closeAllConns, err := kubeletcertificate.UpdateTransport(wait.NeverStop, clientConfig, clientCertificateManager, 5*time.Minute) + case kubeDeps.KubeClient == nil, kubeDeps.EventClient == nil, kubeDeps.HeartbeatClient == nil, kubeDeps.DynamicKubeClient == nil: + clientConfig, closeAllConns, err := buildKubeletClientConfig(s, nodeName) if err != nil { return err } + kubeDeps.OnHeartbeatFailure = closeAllConns - kubeClient, err = clientset.NewForConfig(clientConfig) + kubeDeps.KubeClient, err = clientset.NewForConfig(clientConfig) if err != nil { - klog.Warningf("New kubeClient from clientConfig error: %v", err) - } else if kubeClient.CertificatesV1beta1() != nil && clientCertificateManager != nil { - klog.V(2).Info("Starting client certificate rotation.") - clientCertificateManager.SetCertificateSigningRequestClient(kubeClient.CertificatesV1beta1().CertificateSigningRequests()) - clientCertificateManager.Start() + return fmt.Errorf("failed to initialize kubelet client: %v", err) } - dynamicKubeClient, err = dynamic.NewForConfig(clientConfig) + + kubeDeps.DynamicKubeClient, err = dynamic.NewForConfig(clientConfig) if err != nil { - klog.Warningf("Failed to initialize dynamic KubeClient: %v", err) + return fmt.Errorf("failed to initialize kubelet dynamic client: %v", err) } // make a separate client for events eventClientConfig := *clientConfig eventClientConfig.QPS = float32(s.EventRecordQPS) eventClientConfig.Burst = int(s.EventBurst) - eventClient, err = v1core.NewForConfig(&eventClientConfig) + kubeDeps.EventClient, err = v1core.NewForConfig(&eventClientConfig) if err != nil { - klog.Warningf("Failed to create API Server client for Events: %v", err) + return fmt.Errorf("failed to initialize kubelet event client: %v", err) } // make a separate client for heartbeat with throttling disabled and a timeout attached @@ -610,28 +584,18 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan } } heartbeatClientConfig.QPS = float32(-1) - heartbeatClient, err = clientset.NewForConfig(&heartbeatClientConfig) + kubeDeps.HeartbeatClient, err = clientset.NewForConfig(&heartbeatClientConfig) if err != nil { - klog.Warningf("Failed to create API Server client for heartbeat: %v", err) + return fmt.Errorf("failed to initialize kubelet heartbeat client: %v", err) } - // csiClient works with CRDs that support json only - clientConfig.ContentType = "application/json" - csiClient, err := csiclientset.NewForConfig(clientConfig) + // CRDs are JSON only, and client renegotiation for streaming is not correct as per #67803 + csiClientConfig := restclient.CopyConfig(clientConfig) + csiClientConfig.ContentType = "application/json" + kubeDeps.CSIClient, err = csiclientset.NewForConfig(csiClientConfig) if err != nil { - klog.Warningf("Failed to create CSI API client: %v", err) + return fmt.Errorf("failed to initialize kubelet storage client: %v", err) } - - kubeDeps.KubeClient = kubeClient - kubeDeps.DynamicKubeClient = dynamicKubeClient - if heartbeatClient != nil { - kubeDeps.HeartbeatClient = heartbeatClient - kubeDeps.OnHeartbeatFailure = closeAllConns - } - if eventClient != nil { - kubeDeps.EventClient = eventClient - } - kubeDeps.CSIClient = csiClient } // If the kubelet config controller is available, and dynamic config is enabled, start the config and status sync loops @@ -663,7 +627,7 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan if kubeDeps.ContainerManager == nil { if s.CgroupsPerQOS && s.CgroupRoot == "" { - klog.Infof("--cgroups-per-qos enabled, but --cgroup-root was not specified. defaulting to /") + klog.Info("--cgroups-per-qos enabled, but --cgroup-root was not specified. defaulting to /") s.CgroupRoot = "/" } kubeReserved, err := parseResourceList(s.KubeReserved) @@ -771,6 +735,118 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan return nil } +// buildKubeletClientConfig constructs the appropriate client config for the kubelet depending on whether +// bootstrapping is enabled or client certificate rotation is enabled. +func buildKubeletClientConfig(s *options.KubeletServer, nodeName types.NodeName) (*restclient.Config, func(), error) { + if s.RotateCertificates && utilfeature.DefaultFeatureGate.Enabled(features.RotateKubeletClientCertificate) { + // Rules for client rotation and the handling of kube config files: + // + // 1. If the client provides only a kubeconfig file, we must use that as the initial client + // kubeadm needs the initial data in the kubeconfig to be placed into the cert store + // 2. If the client provides only an initial bootstrap kubeconfig file, we must create a + // kubeconfig file at the target location that points to the cert store, but until + // the file is present the client config will have no certs + // 3. If the client provides both and the kubeconfig is valid, we must ignore the bootstrap + // kubeconfig. + // 4. If the client provides both and the kubeconfig is expired or otherwise invalid, we must + // replace the kubeconfig with a new file that points to the cert dir + // + // The desired configuration for bootstrapping is to use a bootstrap kubeconfig and to have + // the kubeconfig file be managed by this process. For backwards compatibility with kubeadm, + // which provides a high powered kubeconfig on the master with cert/key data, we must + // bootstrap the cert manager with the contents of the initial client config. + + klog.Infof("Client rotation is on, will bootstrap in background") + certConfig, clientConfig, err := bootstrap.LoadClientConfig(s.KubeConfig, s.BootstrapKubeconfig, s.CertDirectory) + if err != nil { + return nil, nil, err + } + + clientCertificateManager, err := buildClientCertificateManager(certConfig, clientConfig, s.CertDirectory, nodeName) + if err != nil { + return nil, nil, err + } + + // the rotating transport will use the cert from the cert manager instead of these files + transportConfig := restclient.AnonymousClientConfig(clientConfig) + kubeClientConfigOverrides(s, transportConfig) + + // we set exitAfter to five minutes because we use this client configuration to request new certs - if we are unable + // to request new certs, we will be unable to continue normal operation. Exiting the process allows a wrapper + // or the bootstrapping credentials to potentially lay down new initial config. + closeAllConns, err := kubeletcertificate.UpdateTransport(wait.NeverStop, transportConfig, clientCertificateManager, 5*time.Minute) + if err != nil { + return nil, nil, err + } + + klog.V(2).Info("Starting client certificate rotation.") + clientCertificateManager.Start() + + return transportConfig, closeAllConns, nil + } + + if len(s.BootstrapKubeconfig) > 0 { + if err := bootstrap.LoadClientCert(s.KubeConfig, s.BootstrapKubeconfig, s.CertDirectory, nodeName); err != nil { + return nil, nil, err + } + } + + clientConfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + &clientcmd.ClientConfigLoadingRules{ExplicitPath: s.KubeConfig}, + &clientcmd.ConfigOverrides{}, + ).ClientConfig() + if err != nil { + return nil, nil, fmt.Errorf("invalid kubeconfig: %v", err) + } + + kubeClientConfigOverrides(s, clientConfig) + + return clientConfig, nil, nil +} + +// buildClientCertificateManager creates a certificate manager that will use certConfig to request a client certificate +// if no certificate is available, or the most recent clientConfig (which is assumed to point to the cert that the manager will +// write out). +func buildClientCertificateManager(certConfig, clientConfig *restclient.Config, certDir string, nodeName types.NodeName) (certificate.Manager, error) { + newClientFn := func(current *tls.Certificate) (certificatesclient.CertificateSigningRequestInterface, error) { + // If we have a valid certificate, use that to fetch CSRs. Otherwise use the bootstrap + // credentials. In the future it would be desirable to change the behavior of bootstrap + // to always fall back to the external bootstrap credentials when such credentials are + // provided by a fundamental trust system like cloud VM identity or an HSM module. + config := certConfig + if current != nil { + config = clientConfig + } + client, err := clientset.NewForConfig(config) + if err != nil { + return nil, err + } + return client.CertificatesV1beta1().CertificateSigningRequests(), nil + } + + return kubeletcertificate.NewKubeletClientCertificateManager( + certDir, + nodeName, + + // this preserves backwards compatibility with kubeadm which passes + // a high powered certificate to the kubelet as --kubeconfig and expects + // it to be rotated out immediately + clientConfig.CertData, + clientConfig.KeyData, + + clientConfig.CertFile, + clientConfig.KeyFile, + newClientFn, + ) +} + +func kubeClientConfigOverrides(s *options.KubeletServer, clientConfig *restclient.Config) { + clientConfig.ContentType = s.ContentType + // Override kubeconfig qps/burst settings from flags + clientConfig.QPS = float32(s.KubeAPIQPS) + clientConfig.Burst = int(s.KubeAPIBurst) +} + // getNodeName returns the node name according to the cloud provider // if cloud provider is specified. Otherwise, returns the hostname of the node. func getNodeName(cloud cloudprovider.Interface, hostname string) (types.NodeName, error) { @@ -859,39 +935,6 @@ func InitializeTLS(kf *options.KubeletFlags, kc *kubeletconfiginternal.KubeletCo return tlsOptions, nil } -func kubeconfigClientConfig(s *options.KubeletServer) (*restclient.Config, error) { - return clientcmd.NewNonInteractiveDeferredLoadingClientConfig( - &clientcmd.ClientConfigLoadingRules{ExplicitPath: s.KubeConfig}, - &clientcmd.ConfigOverrides{}, - ).ClientConfig() -} - -// createClientConfig creates a client configuration from the command line arguments. -// If --kubeconfig is explicitly set, it will be used. -func createClientConfig(s *options.KubeletServer) (*restclient.Config, error) { - if s.BootstrapKubeconfig != "" || len(s.KubeConfig) > 0 { - return kubeconfigClientConfig(s) - } else { - return nil, fmt.Errorf("createClientConfig called in standalone mode") - } -} - -// createAPIServerClientConfig generates a client.Config from command line flags -// via createClientConfig and then injects chaos into the configuration via addChaosToClientConfig. -func createAPIServerClientConfig(s *options.KubeletServer) (*restclient.Config, error) { - clientConfig, err := createClientConfig(s) - if err != nil { - return nil, err - } - - clientConfig.ContentType = s.ContentType - // Override kubeconfig qps/burst settings from flags - clientConfig.QPS = float32(s.KubeAPIQPS) - clientConfig.Burst = int(s.KubeAPIBurst) - - return clientConfig, nil -} - // RunKubelet is responsible for setting up and running a kubelet. It is used in three different applications: // 1 Integration tests // 2 Kubelet binary @@ -993,10 +1036,10 @@ func RunKubelet(kubeServer *options.KubeletServer, kubeDeps *kubelet.Dependencie if _, err := k.RunOnce(podCfg.Updates()); err != nil { return fmt.Errorf("runonce failed: %v", err) } - klog.Infof("Started kubelet as runonce") + klog.Info("Started kubelet as runonce") } else { startKubelet(k, podCfg, &kubeServer.KubeletConfiguration, kubeDeps, kubeServer.EnableServer) - klog.Infof("Started kubelet") + klog.Info("Started kubelet") } return nil } diff --git a/cmd/kubelet/app/server_bootstrap_test.go b/cmd/kubelet/app/server_bootstrap_test.go new file mode 100644 index 00000000000..c59029fae86 --- /dev/null +++ b/cmd/kubelet/app/server_bootstrap_test.go @@ -0,0 +1,377 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package app + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/json" + "encoding/pem" + "io/ioutil" + "math/big" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "sync" + "testing" + "time" + + cfsslconfig "github.com/cloudflare/cfssl/config" + cfsslsigner "github.com/cloudflare/cfssl/signer" + cfssllocal "github.com/cloudflare/cfssl/signer/local" + + certapi "k8s.io/api/certificates/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + restclient "k8s.io/client-go/rest" + certutil "k8s.io/client-go/util/cert" +) + +// Test_buildClientCertificateManager validates that we can build a local client cert +// manager that will use the bootstrap client until we get a valid cert, then use our +// provided identity on subsequent requests. +func Test_buildClientCertificateManager(t *testing.T) { + testDir, err := ioutil.TempDir("", "kubeletcert") + if err != nil { + t.Fatal(err) + } + defer func() { os.RemoveAll(testDir) }() + + serverPrivateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatal(err) + } + serverCA, err := certutil.NewSelfSignedCACert(certutil.Config{ + CommonName: "the-test-framework", + }, serverPrivateKey) + if err != nil { + t.Fatal(err) + } + server := &csrSimulator{ + t: t, + serverPrivateKey: serverPrivateKey, + serverCA: serverCA, + } + s := httptest.NewServer(server) + defer s.Close() + + config1 := &restclient.Config{ + UserAgent: "FirstClient", + Host: s.URL, + } + config2 := &restclient.Config{ + UserAgent: "SecondClient", + Host: s.URL, + } + + nodeName := types.NodeName("test") + m, err := buildClientCertificateManager(config1, config2, testDir, nodeName) + if err != nil { + t.Fatal(err) + } + defer m.Stop() + r := m.(rotater) + + // get an expired CSR (simulating historical output) + server.backdate = 2 * time.Hour + server.expectUserAgent = "FirstClient" + ok, err := r.RotateCerts() + if !ok || err != nil { + t.Fatalf("unexpected rotation err: %t %v", ok, err) + } + if cert := m.Current(); cert != nil { + t.Fatalf("Unexpected cert, should be expired: %#v", cert) + } + fi := getFileInfo(testDir) + if len(fi) != 2 { + t.Fatalf("Unexpected directory contents: %#v", fi) + } + + // if m.Current() == nil, then we try again and get a valid + // client + server.backdate = 0 + server.expectUserAgent = "FirstClient" + if ok, err := r.RotateCerts(); !ok || err != nil { + t.Fatalf("unexpected rotation err: %t %v", ok, err) + } + if cert := m.Current(); cert == nil { + t.Fatalf("Unexpected cert, should be valid: %#v", cert) + } + fi = getFileInfo(testDir) + if len(fi) != 2 { + t.Fatalf("Unexpected directory contents: %#v", fi) + } + + // if m.Current() != nil, then we should use the second client + server.expectUserAgent = "SecondClient" + if ok, err := r.RotateCerts(); !ok || err != nil { + t.Fatalf("unexpected rotation err: %t %v", ok, err) + } + if cert := m.Current(); cert == nil { + t.Fatalf("Unexpected cert, should be valid: %#v", cert) + } + fi = getFileInfo(testDir) + if len(fi) != 2 { + t.Fatalf("Unexpected directory contents: %#v", fi) + } +} + +func Test_buildClientCertificateManager_populateCertDir(t *testing.T) { + testDir, err := ioutil.TempDir("", "kubeletcert") + if err != nil { + t.Fatal(err) + } + defer func() { os.RemoveAll(testDir) }() + + // when no cert is provided, write nothing to disk + config1 := &restclient.Config{ + UserAgent: "FirstClient", + Host: "http://localhost", + } + config2 := &restclient.Config{ + UserAgent: "SecondClient", + Host: "http://localhost", + } + nodeName := types.NodeName("test") + if _, err := buildClientCertificateManager(config1, config2, testDir, nodeName); err != nil { + t.Fatal(err) + } + fi := getFileInfo(testDir) + if len(fi) != 0 { + t.Fatalf("Unexpected directory contents: %#v", fi) + } + + // an invalid cert should be ignored + config2.CertData = []byte("invalid contents") + config2.KeyData = []byte("invalid contents") + if _, err := buildClientCertificateManager(config1, config2, testDir, nodeName); err == nil { + t.Fatal("unexpected non error") + } + fi = getFileInfo(testDir) + if len(fi) != 0 { + t.Fatalf("Unexpected directory contents: %#v", fi) + } + + // an expired client certificate should be written to disk, because the cert manager can + // use config1 to refresh it and the cert manager won't return it for clients. + config2.CertData, config2.KeyData = genClientCert(t, time.Now().Add(-2*time.Hour), time.Now().Add(-time.Hour)) + if _, err := buildClientCertificateManager(config1, config2, testDir, nodeName); err != nil { + t.Fatal(err) + } + fi = getFileInfo(testDir) + if len(fi) != 2 { + t.Fatalf("Unexpected directory contents: %#v", fi) + } + + // a valid, non-expired client certificate should be written to disk + config2.CertData, config2.KeyData = genClientCert(t, time.Now().Add(-time.Hour), time.Now().Add(24*time.Hour)) + if _, err := buildClientCertificateManager(config1, config2, testDir, nodeName); err != nil { + t.Fatal(err) + } + fi = getFileInfo(testDir) + if len(fi) != 2 { + t.Fatalf("Unexpected directory contents: %#v", fi) + } + +} + +func getFileInfo(dir string) map[string]os.FileInfo { + fi := make(map[string]os.FileInfo) + filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if path == dir { + return nil + } + fi[path] = info + if !info.IsDir() { + os.Remove(path) + } + return nil + }) + return fi +} + +type rotater interface { + RotateCerts() (bool, error) +} + +func getCSR(req *http.Request) (*certapi.CertificateSigningRequest, error) { + if req.Body == nil { + return nil, nil + } + body, err := ioutil.ReadAll(req.Body) + if err != nil { + return nil, err + } + csr := &certapi.CertificateSigningRequest{} + if err := json.Unmarshal(body, csr); err != nil { + return nil, err + } + return csr, nil +} + +func mustMarshal(obj interface{}) []byte { + data, err := json.Marshal(obj) + if err != nil { + panic(err) + } + return data +} + +type csrSimulator struct { + t *testing.T + + serverPrivateKey *ecdsa.PrivateKey + serverCA *x509.Certificate + backdate time.Duration + + expectUserAgent string + + lock sync.Mutex + csr *certapi.CertificateSigningRequest +} + +func (s *csrSimulator) ServeHTTP(w http.ResponseWriter, req *http.Request) { + s.lock.Lock() + defer s.lock.Unlock() + t := s.t + + t.Logf("Request %s %s %s", req.Method, req.URL, req.UserAgent()) + + if len(s.expectUserAgent) > 0 && req.UserAgent() != s.expectUserAgent { + t.Errorf("Unexpected user agent: %s", req.UserAgent()) + } + + switch { + case req.Method == "POST" && req.URL.Path == "/apis/certificates.k8s.io/v1beta1/certificatesigningrequests": + csr, err := getCSR(req) + if err != nil { + t.Fatal(err) + } + if csr.Name == "" { + csr.Name = "test-csr" + } + + csr.UID = types.UID("1") + csr.ResourceVersion = "1" + data := mustMarshal(csr) + w.Header().Set("Content-Type", "application/json") + w.Write(data) + + csr = csr.DeepCopy() + csr.ResourceVersion = "2" + var usages []string + for _, usage := range csr.Spec.Usages { + usages = append(usages, string(usage)) + } + policy := &cfsslconfig.Signing{ + Default: &cfsslconfig.SigningProfile{ + Usage: usages, + Expiry: time.Hour, + ExpiryString: time.Hour.String(), + Backdate: s.backdate, + }, + } + cfs, err := cfssllocal.NewSigner(s.serverPrivateKey, s.serverCA, cfsslsigner.DefaultSigAlgo(s.serverPrivateKey), policy) + if err != nil { + t.Fatal(err) + } + csr.Status.Certificate, err = cfs.Sign(cfsslsigner.SignRequest{ + Request: string(csr.Spec.Request), + }) + if err != nil { + t.Fatal(err) + } + csr.Status.Conditions = []certapi.CertificateSigningRequestCondition{ + {Type: certapi.CertificateApproved}, + } + s.csr = csr + + case req.Method == "GET" && req.URL.Path == "/apis/certificates.k8s.io/v1beta1/certificatesigningrequests" && req.URL.RawQuery == "fieldSelector=metadata.name%3Dtest-csr&limit=500": + if s.csr == nil { + t.Fatalf("no csr") + } + csr := s.csr.DeepCopy() + + data := mustMarshal(&certapi.CertificateSigningRequestList{ + ListMeta: metav1.ListMeta{ + ResourceVersion: "2", + }, + Items: []certapi.CertificateSigningRequest{ + *csr, + }, + }) + w.Header().Set("Content-Type", "application/json") + w.Write(data) + + case req.Method == "GET" && req.URL.Path == "/apis/certificates.k8s.io/v1beta1/certificatesigningrequests" && req.URL.RawQuery == "fieldSelector=metadata.name%3Dtest-csr&resourceVersion=2&watch=true": + if s.csr == nil { + t.Fatalf("no csr") + } + csr := s.csr.DeepCopy() + + data := mustMarshal(&metav1.WatchEvent{ + Type: "ADDED", + Object: runtime.RawExtension{ + Raw: mustMarshal(csr), + }, + }) + w.Header().Set("Content-Type", "application/json") + w.Write(data) + + default: + t.Fatalf("unexpected request: %s %s", req.Method, req.URL) + } +} + +// genClientCert generates an x509 certificate for testing. Certificate and key +// are returned in PEM encoding. +func genClientCert(t *testing.T, from, to time.Time) ([]byte, []byte) { + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatal(err) + } + keyRaw, err := x509.MarshalECPrivateKey(key) + if err != nil { + t.Fatal(err) + } + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + t.Fatal(err) + } + cert := &x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{Organization: []string{"Acme Co"}}, + NotBefore: from, + NotAfter: to, + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + BasicConstraintsValid: true, + } + certRaw, err := x509.CreateCertificate(rand.Reader, cert, cert, key.Public(), key) + if err != nil { + t.Fatal(err) + } + return pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certRaw}), + pem.EncodeToMemory(&pem.Block{Type: "PRIVATE KEY", Bytes: keyRaw}) +} diff --git a/cmd/kubelet/app/server_linux.go b/cmd/kubelet/app/server_linux.go index f7c39d4cb7d..6f84afd9dfc 100644 --- a/cmd/kubelet/app/server_linux.go +++ b/cmd/kubelet/app/server_linux.go @@ -17,7 +17,7 @@ limitations under the License. package app import ( - "golang.org/x/exp/inotify" + "github.com/sigma/go-inotify" "k8s.io/klog" ) diff --git a/docs/.generated_docs b/docs/.generated_docs index 89a63ed631d..7c97583e830 100644 --- a/docs/.generated_docs +++ b/docs/.generated_docs @@ -127,7 +127,6 @@ docs/man/man1/kubeadm-config-images-list.1 docs/man/man1/kubeadm-config-images-pull.1 docs/man/man1/kubeadm-config-images.1 docs/man/man1/kubeadm-config-migrate.1 -docs/man/man1/kubeadm-config-print-default.1 docs/man/man1/kubeadm-config-print-init-defaults.1 docs/man/man1/kubeadm-config-print-join-defaults.1 docs/man/man1/kubeadm-config-print.1 diff --git a/docs/api-reference/storage.k8s.io/v1/definitions.html b/docs/api-reference/storage.k8s.io/v1/definitions.html index b0371e3bafa..bfa801f36f8 100755 --- a/docs/api-reference/storage.k8s.io/v1/definitions.html +++ b/docs/api-reference/storage.k8s.io/v1/definitions.html @@ -770,7 +770,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }

message

-

String detailing the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information.

+

String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.

false

string

diff --git a/docs/api-reference/storage.k8s.io/v1beta1/definitions.html b/docs/api-reference/storage.k8s.io/v1beta1/definitions.html index c8f95f8fbd9..8a64f53ee69 100755 --- a/docs/api-reference/storage.k8s.io/v1beta1/definitions.html +++ b/docs/api-reference/storage.k8s.io/v1beta1/definitions.html @@ -1190,7 +1190,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }

message

-

String detailing the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information.

+

String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.

false

string

diff --git a/docs/man/man1/kubeadm-config-print-default.1 b/docs/man/man1/kubeadm-config-print-default.1 deleted file mode 100644 index b6fd7a0f989..00000000000 --- a/docs/man/man1/kubeadm-config-print-default.1 +++ /dev/null @@ -1,3 +0,0 @@ -This file is autogenerated, but we've stopped checking such files into the -repository to reduce the need for rebases. Please run hack/generate-docs.sh to -populate this file. diff --git a/hack/.golint_failures b/hack/.golint_failures index f3879831d2f..a918ef88f02 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -1,4 +1,3 @@ - cluster/images/etcd-version-monitor cmd/cloud-controller-manager/app/apis/config/v1alpha1 cmd/hyperkube @@ -89,7 +88,6 @@ pkg/auth/authorizer/abac pkg/capabilities pkg/cloudprovider/providers/fake pkg/cloudprovider/providers/gce/cloud -pkg/cloudprovider/providers/ovirt pkg/cloudprovider/providers/photon pkg/cloudprovider/providers/vsphere pkg/cloudprovider/providers/vsphere/vclib @@ -138,15 +136,12 @@ pkg/kubeapiserver/options pkg/kubectl pkg/kubectl/apps pkg/kubectl/cmd/annotate -pkg/kubectl/cmd/apiresources pkg/kubectl/cmd/apply pkg/kubectl/cmd/attach -pkg/kubectl/cmd/auth pkg/kubectl/cmd/autoscale pkg/kubectl/cmd/certificates pkg/kubectl/cmd/clusterinfo pkg/kubectl/cmd/completion -pkg/kubectl/cmd/config pkg/kubectl/cmd/convert pkg/kubectl/cmd/cp pkg/kubectl/cmd/create @@ -176,9 +171,7 @@ pkg/kubectl/cmd/taint pkg/kubectl/cmd/testing pkg/kubectl/cmd/top pkg/kubectl/cmd/util -pkg/kubectl/cmd/util/editor pkg/kubectl/cmd/util/openapi -pkg/kubectl/cmd/util/sanity pkg/kubectl/cmd/version pkg/kubectl/cmd/wait pkg/kubectl/describe/versioned @@ -394,6 +387,7 @@ pkg/version/verflag pkg/volume pkg/volume/azure_dd pkg/volume/azure_file +pkg/volume/csi/csiv0 pkg/volume/csi/fake pkg/volume/git_repo pkg/volume/host_path @@ -746,14 +740,6 @@ test/e2e_node/environment test/e2e_node/remote test/e2e_node/runner/remote test/e2e_node/services -test/images/net/nat -test/images/netexec -test/images/nettest -test/images/no-snat-test -test/images/no-snat-test-proxy -test/images/resource-consumer -test/images/resource-consumer/common -test/images/resource-consumer/controller test/integration test/integration/auth test/integration/evictions diff --git a/hack/import-restrictions.yaml b/hack/import-restrictions.yaml index 3ae27381c33..9e56ad61c68 100644 --- a/hack/import-restrictions.yaml +++ b/hack/import-restrictions.yaml @@ -171,3 +171,12 @@ - k8s.io/apimachinery - k8s.io/client-go - k8s.io/klog + +- baseImportPath: "./vendor/k8s.io/csi-api/" + allowedImports: + - k8s.io/api + - k8s.io/apimachinery + - k8s.io/apiextensions-apiserver + - k8s.io/client-go + - k8s.io/csi-api + - k8s.io/klog diff --git a/hack/lib/etcd.sh b/hack/lib/etcd.sh index 33d8e07a988..b853e5d7e57 100755 --- a/hack/lib/etcd.sh +++ b/hack/lib/etcd.sh @@ -16,7 +16,7 @@ # A set of helpers for starting/running etcd for tests -ETCD_VERSION=${ETCD_VERSION:-3.2.24} +ETCD_VERSION=${ETCD_VERSION:-3.3.10} ETCD_HOST=${ETCD_HOST:-127.0.0.1} ETCD_PORT=${ETCD_PORT:-2379} export KUBE_INTEGRATION_ETCD_URL="http://${ETCD_HOST}:${ETCD_PORT}" diff --git a/hack/lib/golang.sh b/hack/lib/golang.sh index 7ddc0bb79bf..809569d50e1 100755 --- a/hack/lib/golang.sh +++ b/hack/lib/golang.sh @@ -354,7 +354,15 @@ kube::golang::create_gopath_tree() { ln -snf "${KUBE_ROOT}" "${go_pkg_dir}" fi - cat >"${KUBE_GOPATH}/BUILD" <"${KUBE_GOPATH}/BUILD.bazel" </dev/null) || true +if [[ -n "${direct_sets}" ]]; then + echo "Test files may not access mutable global feature gates directly:" >&2 + echo "${direct_sets}" >&2 + echo >&2 + echo "Use this invocation instead:" >&2 + echo " defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features., )()" >&2 + echo >&2 + rc=1 +fi + +# find test files calling SetFeatureGateDuringTest and not calling the result +missing_defers=$(grep -n --include *_test.go -R 'SetFeatureGateDuringTest' . 2>/dev/null | egrep -v "defer .*\\)\\(\\)$") || true +if [[ -n "${missing_defers}" ]]; then + echo "Invalid invocations of utilfeaturetesting.SetFeatureGateDuringTest():" >&2 + echo "${missing_defers}" >&2 + echo >&2 + echo "Always make a deferred call to the returned function to ensure the feature gate is reset:" >&2 + echo " defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features., )()" >&2 + echo >&2 + rc=1 +fi + +exit $rc diff --git a/pkg/api/persistentvolumeclaim/BUILD b/pkg/api/persistentvolumeclaim/BUILD index fba0dfd6598..3a72b78c8f0 100644 --- a/pkg/api/persistentvolumeclaim/BUILD +++ b/pkg/api/persistentvolumeclaim/BUILD @@ -32,10 +32,7 @@ filegroup( go_test( name = "go_default_test", - srcs = [ - "main_test.go", - "util_test.go", - ], + srcs = ["util_test.go"], embed = [":go_default_library"], deps = [ "//pkg/apis/core:go_default_library", diff --git a/pkg/api/persistentvolumeclaim/main_test.go b/pkg/api/persistentvolumeclaim/main_test.go deleted file mode 100644 index 3ce0a93c8c1..00000000000 --- a/pkg/api/persistentvolumeclaim/main_test.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package persistentvolumeclaim - -import ( - "testing" - - utilfeature "k8s.io/apiserver/pkg/util/feature" - utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" - _ "k8s.io/kubernetes/pkg/features" -) - -func TestMain(m *testing.M) { - utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run) -} diff --git a/pkg/api/pod/BUILD b/pkg/api/pod/BUILD index 5721392c09a..2da65ee0c57 100644 --- a/pkg/api/pod/BUILD +++ b/pkg/api/pod/BUILD @@ -33,10 +33,7 @@ filegroup( go_test( name = "go_default_test", - srcs = [ - "main_test.go", - "util_test.go", - ], + srcs = ["util_test.go"], embed = [":go_default_library"], deps = [ "//pkg/apis/core:go_default_library", diff --git a/pkg/api/pod/main_test.go b/pkg/api/pod/main_test.go deleted file mode 100644 index 4fb45213978..00000000000 --- a/pkg/api/pod/main_test.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package pod - -import ( - "testing" - - utilfeature "k8s.io/apiserver/pkg/util/feature" - utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" - _ "k8s.io/kubernetes/pkg/features" -) - -func TestMain(m *testing.M) { - utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run) -} diff --git a/pkg/api/podsecuritypolicy/BUILD b/pkg/api/podsecuritypolicy/BUILD index 4d566715fb7..92c5c73c67b 100644 --- a/pkg/api/podsecuritypolicy/BUILD +++ b/pkg/api/podsecuritypolicy/BUILD @@ -32,10 +32,7 @@ filegroup( go_test( name = "go_default_test", - srcs = [ - "main_test.go", - "util_test.go", - ], + srcs = ["util_test.go"], embed = [":go_default_library"], deps = [ "//pkg/apis/core:go_default_library", diff --git a/pkg/api/podsecuritypolicy/main_test.go b/pkg/api/podsecuritypolicy/main_test.go deleted file mode 100644 index f379a10cfbe..00000000000 --- a/pkg/api/podsecuritypolicy/main_test.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package podsecuritypolicy - -import ( - "testing" - - utilfeature "k8s.io/apiserver/pkg/util/feature" - utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" - _ "k8s.io/kubernetes/pkg/features" -) - -func TestMain(m *testing.M) { - utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run) -} diff --git a/pkg/api/podsecuritypolicy/util_test.go b/pkg/api/podsecuritypolicy/util_test.go index 420e73b77c7..5fe4e52662f 100644 --- a/pkg/api/podsecuritypolicy/util_test.go +++ b/pkg/api/podsecuritypolicy/util_test.go @@ -20,6 +20,7 @@ import ( "testing" utilfeature "k8s.io/apiserver/pkg/util/feature" + utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/policy" "k8s.io/kubernetes/pkg/features" @@ -34,10 +35,7 @@ func TestDropAlphaProcMountType(t *testing.T) { } // Enable alpha feature ProcMountType - err1 := utilfeature.DefaultFeatureGate.Set("ProcMountType=true") - if err1 != nil { - t.Fatalf("Failed to enable feature gate for ProcMountType: %v", err1) - } + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ProcMountType, true)() // now test dropping the fields - should not be dropped DropDisabledAlphaFields(&psp.Spec) @@ -51,10 +49,7 @@ func TestDropAlphaProcMountType(t *testing.T) { } // Disable alpha feature ProcMountType - err := utilfeature.DefaultFeatureGate.Set("ProcMountType=false") - if err != nil { - t.Fatalf("Failed to disable feature gate for ProcMountType: %v", err) - } + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ProcMountType, false)() // now test dropping the fields DropDisabledAlphaFields(&psp.Spec) diff --git a/pkg/api/testing/serialization_test.go b/pkg/api/testing/serialization_test.go index 4b3fa2d95d8..600728439f0 100644 --- a/pkg/api/testing/serialization_test.go +++ b/pkg/api/testing/serialization_test.go @@ -86,9 +86,11 @@ func TestSetControllerConversion(t *testing.T) { rs := &apps.ReplicaSet{} rc := &api.ReplicationController{} + extGroup := schema.GroupVersion{Group: "apps", Version: "v1"} + extCodec := legacyscheme.Codecs.LegacyCodec(extGroup) - extGroup := testapi.Apps - defaultGroup := testapi.Default + defaultGroup := schema.GroupVersion{Group: "", Version: "v1"} + defaultCodec := legacyscheme.Codecs.LegacyCodec(defaultGroup) fuzzInternalObject(t, schema.GroupVersion{Group: "apps", Version: runtime.APIVersionInternal}, rs, rand.Int63()) @@ -102,7 +104,7 @@ func TestSetControllerConversion(t *testing.T) { } t.Logf("rs._internal.apps -> rs.v1.apps") - data, err := runtime.Encode(extGroup.Codec(), rs) + data, err := runtime.Encode(extCodec, rs) if err != nil { t.Fatalf("unexpected encoding error: %v", err) } @@ -110,9 +112,9 @@ func TestSetControllerConversion(t *testing.T) { decoder := legacyscheme.Codecs.DecoderToVersion( legacyscheme.Codecs.UniversalDeserializer(), runtime.NewMultiGroupVersioner( - *defaultGroup.GroupVersion(), - schema.GroupKind{Group: defaultGroup.GroupVersion().Group}, - schema.GroupKind{Group: extGroup.GroupVersion().Group}, + defaultGroup, + schema.GroupKind{Group: defaultGroup.Group}, + schema.GroupKind{Group: extGroup.Group}, ), ) @@ -122,7 +124,7 @@ func TestSetControllerConversion(t *testing.T) { } t.Logf("rc._internal -> rc.v1") - data, err = runtime.Encode(defaultGroup.Codec(), rc) + data, err = runtime.Encode(defaultCodec, rc) if err != nil { t.Fatalf("unexpected encoding error: %v", err) } diff --git a/pkg/apis/apps/validation/validation.go b/pkg/apis/apps/validation/validation.go index 7cdf4c2dc27..a80c0a2093b 100644 --- a/pkg/apis/apps/validation/validation.go +++ b/pkg/apis/apps/validation/validation.go @@ -127,7 +127,7 @@ func ValidateStatefulSetSpec(spec *apps.StatefulSetSpec, fldPath *field.Path) fi allErrs = append(allErrs, field.NotSupported(fldPath.Child("template", "spec", "restartPolicy"), spec.Template.Spec.RestartPolicy, []string{string(api.RestartPolicyAlways)})) } if spec.Template.Spec.ActiveDeadlineSeconds != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("template", "spec", "activeDeadlineSeconds"), spec.Template.Spec.ActiveDeadlineSeconds, "must not be specified")) + allErrs = append(allErrs, field.Forbidden(fldPath.Child("template", "spec", "activeDeadlineSeconds"), "activeDeadlineSeconds in StatefulSet is not Supported")) } return allErrs @@ -327,7 +327,7 @@ func ValidateDaemonSetSpec(spec *apps.DaemonSetSpec, fldPath *field.Path) field. allErrs = append(allErrs, field.NotSupported(fldPath.Child("template", "spec", "restartPolicy"), spec.Template.Spec.RestartPolicy, []string{string(api.RestartPolicyAlways)})) } if spec.Template.Spec.ActiveDeadlineSeconds != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("template", "spec", "activeDeadlineSeconds"), spec.Template.Spec.ActiveDeadlineSeconds, "must not be specified")) + allErrs = append(allErrs, field.Forbidden(fldPath.Child("template", "spec", "activeDeadlineSeconds"), "activeDeadlineSeconds in DaemonSet is not Supported")) } allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.MinReadySeconds), fldPath.Child("minReadySeconds"))...) allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.TemplateGeneration), fldPath.Child("templateGeneration"))...) @@ -664,7 +664,7 @@ func ValidatePodTemplateSpecForReplicaSet(template *api.PodTemplateSpec, selecto allErrs = append(allErrs, field.NotSupported(fldPath.Child("spec", "restartPolicy"), template.Spec.RestartPolicy, []string{string(api.RestartPolicyAlways)})) } if template.Spec.ActiveDeadlineSeconds != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("spec", "activeDeadlineSeconds"), template.Spec.ActiveDeadlineSeconds, "must not be specified")) + allErrs = append(allErrs, field.Forbidden(fldPath.Child("spec", "activeDeadlineSeconds"), "activeDeadlineSeconds in ReplicaSet is not Supported")) } } return allErrs diff --git a/pkg/apis/autoscaling/v2beta1/BUILD b/pkg/apis/autoscaling/v2beta1/BUILD index 47d21ddb94e..5fbc372b458 100644 --- a/pkg/apis/autoscaling/v2beta1/BUILD +++ b/pkg/apis/autoscaling/v2beta1/BUILD @@ -26,7 +26,10 @@ go_library( go_test( name = "go_default_test", - srcs = ["defaults_test.go"], + srcs = [ + "conversion_test.go", + "defaults_test.go", + ], embed = [":go_default_library"], deps = [ "//pkg/api/legacyscheme:go_default_library", @@ -37,6 +40,7 @@ go_test( "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/utils/pointer:go_default_library", ], ) diff --git a/pkg/apis/autoscaling/v2beta1/conversion.go b/pkg/apis/autoscaling/v2beta1/conversion.go index eebc1043053..142cf726493 100644 --- a/pkg/apis/autoscaling/v2beta1/conversion.go +++ b/pkg/apis/autoscaling/v2beta1/conversion.go @@ -185,8 +185,10 @@ func Convert_v2beta1_ObjectMetricSource_To_autoscaling_ObjectMetricSource(in *au } func Convert_autoscaling_PodsMetricSource_To_v2beta1_PodsMetricSource(in *autoscaling.PodsMetricSource, out *autoscalingv2beta1.PodsMetricSource, s conversion.Scope) error { - targetAverageValue := *in.Target.AverageValue - out.TargetAverageValue = targetAverageValue + if in.Target.AverageValue != nil { + targetAverageValue := *in.Target.AverageValue + out.TargetAverageValue = targetAverageValue + } out.MetricName = in.Metric.Name out.Selector = in.Metric.Selector @@ -247,8 +249,10 @@ func Convert_autoscaling_ObjectMetricStatus_To_v2beta1_ObjectMetricStatus(in *au } out.MetricName = in.Metric.Name out.Selector = in.Metric.Selector - currentAverageValue := *in.Current.AverageValue - out.AverageValue = ¤tAverageValue + if in.Current.AverageValue != nil { + currentAverageValue := *in.Current.AverageValue + out.AverageValue = ¤tAverageValue + } return nil } diff --git a/pkg/apis/autoscaling/v2beta1/conversion_test.go b/pkg/apis/autoscaling/v2beta1/conversion_test.go new file mode 100644 index 00000000000..4a1db1a1537 --- /dev/null +++ b/pkg/apis/autoscaling/v2beta1/conversion_test.go @@ -0,0 +1,84 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2beta1 + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "k8s.io/api/autoscaling/v2beta1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/apis/autoscaling" +) + +// Testing nil pointer panic uncovered by #70806 +// TODO(yue9944882): Test nil/empty conversion across all resource types +func TestNilOrEmptyConversion(t *testing.T) { + scheme := runtime.NewScheme() + assert.NoError(t, addConversionFuncs(scheme)) + + testCases := []struct { + obj1 interface{} + obj2 interface{} + }{ + { + obj1: &autoscaling.ExternalMetricSource{}, + obj2: &v2beta1.ExternalMetricSource{}, + }, + { + obj1: &autoscaling.ExternalMetricStatus{}, + obj2: &v2beta1.ExternalMetricStatus{}, + }, + { + obj1: &autoscaling.PodsMetricSource{}, + obj2: &v2beta1.PodsMetricSource{}, + }, + { + obj1: &autoscaling.PodsMetricStatus{}, + obj2: &v2beta1.PodsMetricStatus{}, + }, + { + obj1: &autoscaling.ObjectMetricSource{}, + obj2: &v2beta1.ObjectMetricSource{}, + }, + { + obj1: &autoscaling.ObjectMetricStatus{}, + obj2: &v2beta1.ObjectMetricStatus{}, + }, + { + obj1: &autoscaling.ResourceMetricSource{}, + obj2: &v2beta1.ResourceMetricSource{}, + }, + { + obj1: &autoscaling.ResourceMetricStatus{}, + obj2: &v2beta1.ResourceMetricStatus{}, + }, + { + obj1: &autoscaling.HorizontalPodAutoscaler{}, + obj2: &v2beta1.HorizontalPodAutoscaler{}, + }, + { + obj1: &autoscaling.MetricTarget{}, + obj2: &v2beta1.CrossVersionObjectReference{}, + }, + } + for _, testCase := range testCases { + assert.NoError(t, scheme.Convert(testCase.obj1, testCase.obj2, nil)) + assert.NoError(t, scheme.Convert(testCase.obj2, testCase.obj1, nil)) + } +} diff --git a/pkg/apis/batch/validation/BUILD b/pkg/apis/batch/validation/BUILD index 4a29be29ae2..f9a72f4ed17 100644 --- a/pkg/apis/batch/validation/BUILD +++ b/pkg/apis/batch/validation/BUILD @@ -27,10 +27,7 @@ go_library( go_test( name = "go_default_test", - srcs = [ - "main_test.go", - "validation_test.go", - ], + srcs = ["validation_test.go"], embed = [":go_default_library"], deps = [ "//pkg/apis/batch:go_default_library", diff --git a/pkg/apis/batch/validation/main_test.go b/pkg/apis/batch/validation/main_test.go deleted file mode 100644 index ad488a1caaa..00000000000 --- a/pkg/apis/batch/validation/main_test.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validation - -import ( - "testing" - - utilfeature "k8s.io/apiserver/pkg/util/feature" - utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" - _ "k8s.io/kubernetes/pkg/features" -) - -func TestMain(m *testing.M) { - utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run) -} diff --git a/pkg/apis/batch/validation/validation_test.go b/pkg/apis/batch/validation/validation_test.go index 7ff9e3f73ea..5300193233e 100644 --- a/pkg/apis/batch/validation/validation_test.go +++ b/pkg/apis/batch/validation/validation_test.go @@ -24,6 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" utilfeature "k8s.io/apiserver/pkg/util/feature" + utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" "k8s.io/kubernetes/pkg/apis/batch" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/features" @@ -74,14 +75,6 @@ func featureToggle(feature utilfeature.Feature) []string { } func TestValidateJob(t *testing.T) { - ttlEnabled := utilfeature.DefaultFeatureGate.Enabled(features.TTLAfterFinished) - defer func() { - err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t", features.TTLAfterFinished, ttlEnabled)) - if err != nil { - t.Fatalf("Failed to set feature gate for %s: %v", features.TTLAfterFinished, err) - } - }() - validManualSelector := getValidManualSelector() validPodTemplateSpecForManual := getValidPodTemplateSpecForManual(validManualSelector) validGeneratedSelector := getValidGeneratedSelector() @@ -231,11 +224,8 @@ func TestValidateJob(t *testing.T) { }, } - for _, setFeature := range featureToggle(features.TTLAfterFinished) { - // Set error cases based on if TTLAfterFinished feature is enabled or not - if err := utilfeature.DefaultFeatureGate.Set(setFeature); err != nil { - t.Fatalf("Failed to set feature gate for %s: %v", features.TTLAfterFinished, err) - } + for _, setFeature := range []bool{true, false} { + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.TTLAfterFinished, setFeature)() ttlCase := "spec.ttlSecondsAfterFinished:must be greater than or equal to 0" if utilfeature.DefaultFeatureGate.Enabled(features.TTLAfterFinished) { errorCases[ttlCase] = batch.Job{ diff --git a/pkg/apis/core/v1/BUILD b/pkg/apis/core/v1/BUILD index b7e881dd95c..e52a3e4f907 100644 --- a/pkg/apis/core/v1/BUILD +++ b/pkg/apis/core/v1/BUILD @@ -36,7 +36,6 @@ go_test( srcs = [ "conversion_test.go", "defaults_test.go", - "main_test.go", ], embed = [":go_default_library"], deps = [ diff --git a/pkg/apis/core/validation/BUILD b/pkg/apis/core/validation/BUILD index a815e5b0ac7..ed546217cf6 100644 --- a/pkg/apis/core/validation/BUILD +++ b/pkg/apis/core/validation/BUILD @@ -47,7 +47,6 @@ go_test( name = "go_default_test", srcs = [ "events_test.go", - "main_test.go", "validation_test.go", ], embed = [":go_default_library"], diff --git a/pkg/apis/core/validation/main_test.go b/pkg/apis/core/validation/main_test.go deleted file mode 100644 index ad488a1caaa..00000000000 --- a/pkg/apis/core/validation/main_test.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validation - -import ( - "testing" - - utilfeature "k8s.io/apiserver/pkg/util/feature" - utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" - _ "k8s.io/kubernetes/pkg/features" -) - -func TestMain(m *testing.M) { - utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run) -} diff --git a/pkg/apis/core/validation/validation.go b/pkg/apis/core/validation/validation.go index 6f0302c37fa..4643720eddd 100644 --- a/pkg/apis/core/validation/validation.go +++ b/pkg/apis/core/validation/validation.go @@ -4062,7 +4062,7 @@ func ValidatePodTemplateSpecForRC(template *core.PodTemplateSpec, selectorMap ma allErrs = append(allErrs, field.NotSupported(fldPath.Child("spec", "restartPolicy"), template.Spec.RestartPolicy, []string{string(core.RestartPolicyAlways)})) } if template.Spec.ActiveDeadlineSeconds != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("spec", "activeDeadlineSeconds"), template.Spec.ActiveDeadlineSeconds, "must not be specified")) + allErrs = append(allErrs, field.Forbidden(fldPath.Child("spec", "activeDeadlineSeconds"), "activeDeadlineSeconds in ReplicationController is not Supported")) } } return allErrs diff --git a/pkg/apis/core/validation/validation_test.go b/pkg/apis/core/validation/validation_test.go index 94bff049cda..1c53800f704 100644 --- a/pkg/apis/core/validation/validation_test.go +++ b/pkg/apis/core/validation/validation_test.go @@ -18,7 +18,6 @@ package validation import ( "bytes" - "fmt" "math" "reflect" "strings" @@ -784,11 +783,7 @@ func TestAlphaVolumeSnapshotDataSource(t *testing.T) { } // Enable alpha feature VolumeSnapshotDataSource - err := utilfeature.DefaultFeatureGate.Set("VolumeSnapshotDataSource=true") - if err != nil { - t.Errorf("Failed to enable feature gate for VolumeSnapshotDataSource: %v", err) - return - } + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeSnapshotDataSource, true)() for _, tc := range successTestCases { if errs := ValidatePersistentVolumeClaimSpec(&tc, field.NewPath("spec")); len(errs) != 0 { t.Errorf("expected success: %v", errs) @@ -800,11 +795,7 @@ func TestAlphaVolumeSnapshotDataSource(t *testing.T) { } } // Disable alpha feature VolumeSnapshotDataSource - err = utilfeature.DefaultFeatureGate.Set("VolumeSnapshotDataSource=false") - if err != nil { - t.Errorf("Failed to disable feature gate for VolumeSnapshotDataSource: %v", err) - return - } + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeSnapshotDataSource, false)() for _, tc := range successTestCases { if errs := ValidatePersistentVolumeClaimSpec(&tc, field.NewPath("spec")); len(errs) == 0 { t.Errorf("expected failure: %v", errs) @@ -4897,8 +4888,7 @@ func TestValidateVolumeMounts(t *testing.T) { } func TestValidateDisabledSubpath(t *testing.T) { - utilfeature.DefaultFeatureGate.Set("VolumeSubpath=false") - defer utilfeature.DefaultFeatureGate.Set("VolumeSubpath=true") + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeSubpath, false)() volumes := []core.Volume{ {Name: "abc", VolumeSource: core.VolumeSource{PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{ClaimName: "testclaim1"}}}, @@ -5734,16 +5724,7 @@ func TestValidateRestartPolicy(t *testing.T) { } func TestValidateDNSPolicy(t *testing.T) { - customDNSEnabled := utilfeature.DefaultFeatureGate.Enabled("CustomPodDNS") - defer func() { - // Restoring the old value. - if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("CustomPodDNS=%v", customDNSEnabled)); err != nil { - t.Errorf("Failed to restore CustomPodDNS feature gate: %v", err) - } - }() - if err := utilfeature.DefaultFeatureGate.Set("CustomPodDNS=true"); err != nil { - t.Errorf("Failed to enable CustomPodDNS feature gate: %v", err) - } + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CustomPodDNS, true)() successCases := []core.DNSPolicy{core.DNSClusterFirst, core.DNSDefault, core.DNSPolicy(core.DNSClusterFirst), core.DNSNone} for _, policy := range successCases { @@ -5761,16 +5742,7 @@ func TestValidateDNSPolicy(t *testing.T) { } func TestValidatePodDNSConfig(t *testing.T) { - customDNSEnabled := utilfeature.DefaultFeatureGate.Enabled("CustomPodDNS") - defer func() { - // Restoring the old value. - if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("CustomPodDNS=%v", customDNSEnabled)); err != nil { - t.Errorf("Failed to restore CustomPodDNS feature gate: %v", err) - } - }() - if err := utilfeature.DefaultFeatureGate.Set("CustomPodDNS=true"); err != nil { - t.Errorf("Failed to enable CustomPodDNS feature gate: %v", err) - } + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CustomPodDNS, true)() generateTestSearchPathFunc := func(numChars int) string { res := "" @@ -5932,16 +5904,7 @@ func TestValidatePodDNSConfig(t *testing.T) { } func TestValidatePodReadinessGates(t *testing.T) { - podReadinessGatesEnabled := utilfeature.DefaultFeatureGate.Enabled(features.PodReadinessGates) - defer func() { - // Restoring the old value. - if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%v", features.PodReadinessGates, podReadinessGatesEnabled)); err != nil { - t.Errorf("Failed to restore PodReadinessGates feature gate: %v", err) - } - }() - if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodReadinessGates)); err != nil { - t.Errorf("Failed to enable PodReadinessGates feature gate: %v", err) - } + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodReadinessGates, true)() successCases := []struct { desc string @@ -6420,8 +6383,7 @@ func TestValidatePodSpec(t *testing.T) { } } - // original value will be restored by previous defer - utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodShareProcessNamespace, false) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodShareProcessNamespace, false)() featuregatedCases := map[string]core.PodSpec{ "set ShareProcessNamespace": { diff --git a/pkg/apis/storage/types.go b/pkg/apis/storage/types.go index 7dedc666e18..f8c16a74505 100644 --- a/pkg/apis/storage/types.go +++ b/pkg/apis/storage/types.go @@ -195,7 +195,7 @@ type VolumeError struct { Time metav1.Time // String detailing the error encountered during Attach or Detach operation. - // This string maybe logged, so it should not contain sensitive + // This string may be logged, so it should not contain sensitive // information. // +optional Message string diff --git a/pkg/apis/storage/util/BUILD b/pkg/apis/storage/util/BUILD index 0cddfad64b4..1b37c7f631d 100644 --- a/pkg/apis/storage/util/BUILD +++ b/pkg/apis/storage/util/BUILD @@ -36,10 +36,7 @@ filegroup( go_test( name = "go_default_test", - srcs = [ - "main_test.go", - "util_test.go", - ], + srcs = ["util_test.go"], embed = [":go_default_library"], deps = [ "//pkg/apis/core:go_default_library", diff --git a/pkg/apis/storage/v1/BUILD b/pkg/apis/storage/v1/BUILD index 11449ae5f12..05bd5565a79 100644 --- a/pkg/apis/storage/v1/BUILD +++ b/pkg/apis/storage/v1/BUILD @@ -47,10 +47,7 @@ filegroup( go_test( name = "go_default_test", - srcs = [ - "defaults_test.go", - "main_test.go", - ], + srcs = ["defaults_test.go"], embed = [":go_default_library"], deps = [ "//pkg/api/legacyscheme:go_default_library", diff --git a/pkg/apis/storage/v1/main_test.go b/pkg/apis/storage/v1/main_test.go deleted file mode 100644 index e46b01929f7..00000000000 --- a/pkg/apis/storage/v1/main_test.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "testing" - - utilfeature "k8s.io/apiserver/pkg/util/feature" - utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" - _ "k8s.io/kubernetes/pkg/features" -) - -func TestMain(m *testing.M) { - utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run) -} diff --git a/pkg/apis/storage/v1beta1/BUILD b/pkg/apis/storage/v1beta1/BUILD index 4695a2ddb21..7be33870151 100644 --- a/pkg/apis/storage/v1beta1/BUILD +++ b/pkg/apis/storage/v1beta1/BUILD @@ -47,10 +47,7 @@ filegroup( go_test( name = "go_default_test", - srcs = [ - "defaults_test.go", - "main_test.go", - ], + srcs = ["defaults_test.go"], embed = [":go_default_library"], deps = [ "//pkg/api/legacyscheme:go_default_library", diff --git a/pkg/apis/storage/v1beta1/main_test.go b/pkg/apis/storage/v1beta1/main_test.go deleted file mode 100644 index 4613d1156c4..00000000000 --- a/pkg/apis/storage/v1beta1/main_test.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "testing" - - utilfeature "k8s.io/apiserver/pkg/util/feature" - utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" - _ "k8s.io/kubernetes/pkg/features" -) - -func TestMain(m *testing.M) { - utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run) -} diff --git a/pkg/apis/storage/validation/BUILD b/pkg/apis/storage/validation/BUILD index fec90d3adcb..a91f11d93e1 100644 --- a/pkg/apis/storage/validation/BUILD +++ b/pkg/apis/storage/validation/BUILD @@ -26,10 +26,7 @@ go_library( go_test( name = "go_default_test", - srcs = [ - "main_test.go", - "validation_test.go", - ], + srcs = ["validation_test.go"], embed = [":go_default_library"], deps = [ "//pkg/apis/core:go_default_library", diff --git a/pkg/apis/storage/validation/main_test.go b/pkg/apis/storage/validation/main_test.go deleted file mode 100644 index ad488a1caaa..00000000000 --- a/pkg/apis/storage/validation/main_test.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validation - -import ( - "testing" - - utilfeature "k8s.io/apiserver/pkg/util/feature" - utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" - _ "k8s.io/kubernetes/pkg/features" -) - -func TestMain(m *testing.M) { - utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run) -} diff --git a/pkg/cloudprovider/providers/aws/BUILD b/pkg/cloudprovider/providers/aws/BUILD index ca34b569b24..755d731a0fe 100644 --- a/pkg/cloudprovider/providers/aws/BUILD +++ b/pkg/cloudprovider/providers/aws/BUILD @@ -78,6 +78,7 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/kubelet/apis:go_default_library", + "//pkg/volume:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/pkg/cloudprovider/providers/aws/aws.go b/pkg/cloudprovider/providers/aws/aws.go index d4f38dc75f4..97a6a4fd577 100644 --- a/pkg/cloudprovider/providers/aws/aws.go +++ b/pkg/cloudprovider/providers/aws/aws.go @@ -261,9 +261,6 @@ const DefaultVolumeType = "gp2" // Used to call recognizeWellKnownRegions just once var once sync.Once -// AWS implements PVLabeler. -var _ cloudprovider.PVLabeler = (*Cloud)(nil) - // Services is an abstraction over AWS, to allow mocking/other implementations type Services interface { Compute(region string) (EC2, error) @@ -480,6 +477,13 @@ type InstanceGroupInfo interface { CurrentSize() (int, error) } +var _ cloudprovider.Interface = (*Cloud)(nil) +var _ cloudprovider.Instances = (*Cloud)(nil) +var _ cloudprovider.LoadBalancer = (*Cloud)(nil) +var _ cloudprovider.Routes = (*Cloud)(nil) +var _ cloudprovider.Zones = (*Cloud)(nil) +var _ cloudprovider.PVLabeler = (*Cloud)(nil) + // Cloud is an implementation of Interface, LoadBalancer and Instances for Amazon Web Services. type Cloud struct { ec2 EC2 @@ -2324,6 +2328,11 @@ func (c *Cloud) checkIfAvailable(disk *awsDisk, opName string, instance string) // GetLabelsForVolume gets the volume labels for a volume func (c *Cloud) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVolume) (map[string]string, error) { + // Ignore if not AWSElasticBlockStore. + if pv.Spec.AWSElasticBlockStore == nil { + return nil, nil + } + // Ignore any volumes that are being provisioned if pv.Spec.AWSElasticBlockStore.VolumeID == volume.ProvisionedVolumeName { return nil, nil diff --git a/pkg/cloudprovider/providers/aws/aws_fakes.go b/pkg/cloudprovider/providers/aws/aws_fakes.go index 28946fa35c0..844a3a0c0af 100644 --- a/pkg/cloudprovider/providers/aws/aws_fakes.go +++ b/pkg/cloudprovider/providers/aws/aws_fakes.go @@ -353,10 +353,10 @@ func (m *FakeMetadata) GetMetadata(key string) (string, error) { } } - return "", nil - } else { return "", nil } + + return "", nil } // FakeELB is a fake ELB client used for testing diff --git a/pkg/cloudprovider/providers/aws/aws_loadbalancer.go b/pkg/cloudprovider/providers/aws/aws_loadbalancer.go index 0c2e7553067..3242a057bc3 100644 --- a/pkg/cloudprovider/providers/aws/aws_loadbalancer.go +++ b/pkg/cloudprovider/providers/aws/aws_loadbalancer.go @@ -569,12 +569,17 @@ func filterForIPRangeDescription(securityGroups []*ec2.SecurityGroup, lbName str response := []*ec2.SecurityGroup{} clientRule := fmt.Sprintf("%s=%s", NLBClientRuleDescription, lbName) healthRule := fmt.Sprintf("%s=%s", NLBHealthCheckRuleDescription, lbName) + alreadyAdded := sets.NewString() for i := range securityGroups { for j := range securityGroups[i].IpPermissions { for k := range securityGroups[i].IpPermissions[j].IpRanges { description := aws.StringValue(securityGroups[i].IpPermissions[j].IpRanges[k].Description) if description == clientRule || description == healthRule { - response = append(response, securityGroups[i]) + sgIDString := aws.StringValue(securityGroups[i].GroupId) + if !alreadyAdded.Has(sgIDString) { + response = append(response, securityGroups[i]) + alreadyAdded.Insert(sgIDString) + } } } } @@ -599,6 +604,7 @@ func (c *Cloud) getVpcCidrBlock() (*string, error) { // if clientTraffic is false, then only update HealthCheck rules func (c *Cloud) updateInstanceSecurityGroupsForNLBTraffic(actualGroups []*ec2.SecurityGroup, desiredSgIds []string, ports []int64, lbName string, clientCidrs []string, clientTraffic bool) error { + klog.V(8).Infof("updateInstanceSecurityGroupsForNLBTraffic: actualGroups=%v, desiredSgIds=%v, ports=%v, clientTraffic=%v", actualGroups, desiredSgIds, ports, clientTraffic) // Map containing the groups we want to make changes on; the ports to make // changes on; and whether to add or remove it. true to add, false to remove portChanges := map[string]map[int64]bool{} @@ -653,16 +659,16 @@ func (c *Cloud) updateInstanceSecurityGroupsForNLBTraffic(actualGroups []*ec2.Se if add { if clientTraffic { klog.V(2).Infof("Adding rule for client MTU discovery from the network load balancer (%s) to instances (%s)", clientCidrs, instanceSecurityGroupID) - klog.V(2).Infof("Adding rule for client traffic from the network load balancer (%s) to instances (%s)", clientCidrs, instanceSecurityGroupID) + klog.V(2).Infof("Adding rule for client traffic from the network load balancer (%s) to instances (%s), port (%v)", clientCidrs, instanceSecurityGroupID, port) } else { - klog.V(2).Infof("Adding rule for health check traffic from the network load balancer (%s) to instances (%s)", clientCidrs, instanceSecurityGroupID) + klog.V(2).Infof("Adding rule for health check traffic from the network load balancer (%s) to instances (%s), port (%v)", clientCidrs, instanceSecurityGroupID, port) } } else { if clientTraffic { klog.V(2).Infof("Removing rule for client MTU discovery from the network load balancer (%s) to instances (%s)", clientCidrs, instanceSecurityGroupID) - klog.V(2).Infof("Removing rule for client traffic from the network load balancer (%s) to instance (%s)", clientCidrs, instanceSecurityGroupID) + klog.V(2).Infof("Removing rule for client traffic from the network load balancer (%s) to instance (%s), port (%v)", clientCidrs, instanceSecurityGroupID, port) } - klog.V(2).Infof("Removing rule for health check traffic from the network load balancer (%s) to instance (%s)", clientCidrs, instanceSecurityGroupID) + klog.V(2).Infof("Removing rule for health check traffic from the network load balancer (%s) to instance (%s), port (%v)", clientCidrs, instanceSecurityGroupID, port) } if clientTraffic { diff --git a/pkg/cloudprovider/providers/aws/aws_loadbalancer_test.go b/pkg/cloudprovider/providers/aws/aws_loadbalancer_test.go index 9f81ea75cfa..cd5d79ba7a9 100644 --- a/pkg/cloudprovider/providers/aws/aws_loadbalancer_test.go +++ b/pkg/cloudprovider/providers/aws/aws_loadbalancer_test.go @@ -17,9 +17,11 @@ limitations under the License. package aws import ( + "fmt" "testing" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" ) func TestElbProtocolsAreEqual(t *testing.T) { @@ -160,3 +162,63 @@ func TestIsNLB(t *testing.T) { } } } + +func TestSecurityGroupFiltering(t *testing.T) { + grid := []struct { + in []*ec2.SecurityGroup + name string + expected int + description string + }{ + { + in: []*ec2.SecurityGroup{ + { + IpPermissions: []*ec2.IpPermission{ + { + IpRanges: []*ec2.IpRange{ + { + Description: aws.String("an unmanaged"), + }, + }, + }, + }, + }, + }, + name: "unmanaged", + expected: 0, + description: "An environment without managed LBs should have %d, but found %d SecurityGroups", + }, + { + in: []*ec2.SecurityGroup{ + { + IpPermissions: []*ec2.IpPermission{ + { + IpRanges: []*ec2.IpRange{ + { + Description: aws.String("an unmanaged"), + }, + { + Description: aws.String(fmt.Sprintf("%s=%s", NLBClientRuleDescription, "managedlb")), + }, + { + Description: aws.String(fmt.Sprintf("%s=%s", NLBHealthCheckRuleDescription, "managedlb")), + }, + }, + }, + }, + }, + }, + name: "managedlb", + expected: 1, + description: "Found %d, but should have %d Security Groups", + }, + } + + for _, g := range grid { + actual := len(filterForIPRangeDescription(g.in, g.name)) + if actual != g.expected { + t.Errorf(g.description, actual, g.expected) + } + } + +} diff --git a/pkg/cloudprovider/providers/aws/aws_test.go b/pkg/cloudprovider/providers/aws/aws_test.go index 50e303d43d3..4d12932859b 100644 --- a/pkg/cloudprovider/providers/aws/aws_test.go +++ b/pkg/cloudprovider/providers/aws/aws_test.go @@ -36,6 +36,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" + "k8s.io/kubernetes/pkg/volume" ) const TestClusterID = "clusterid.test" @@ -912,6 +913,98 @@ func TestGetVolumeLabels(t *testing.T) { awsServices.ec2.(*MockedFakeEC2).AssertExpectations(t) } +func TestGetLabelsForVolume(t *testing.T) { + defaultVolume := EBSVolumeID("vol-VolumeId").awsString() + tests := []struct { + name string + pv *v1.PersistentVolume + expectedVolumeID *string + expectedEC2Volumes []*ec2.Volume + expectedLabels map[string]string + expectedError error + }{ + { + "not an EBS volume", + &v1.PersistentVolume{ + Spec: v1.PersistentVolumeSpec{}, + }, + nil, + nil, + nil, + nil, + }, + { + "volume which is being provisioned", + &v1.PersistentVolume{ + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{ + AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{ + VolumeID: volume.ProvisionedVolumeName, + }, + }, + }, + }, + nil, + nil, + nil, + nil, + }, + { + "no volumes found", + &v1.PersistentVolume{ + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{ + AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{ + VolumeID: "vol-VolumeId", + }, + }, + }, + }, + defaultVolume, + nil, + nil, + fmt.Errorf("no volumes found"), + }, + { + "correct labels for volume", + &v1.PersistentVolume{ + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{ + AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{ + VolumeID: "vol-VolumeId", + }, + }, + }, + }, + defaultVolume, + []*ec2.Volume{{ + VolumeId: defaultVolume, + AvailabilityZone: aws.String("us-east-1a"), + }}, + map[string]string{ + kubeletapis.LabelZoneFailureDomain: "us-east-1a", + kubeletapis.LabelZoneRegion: "us-east-1", + }, + nil, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + awsServices := newMockedFakeAWSServices(TestClusterID) + expectedVolumeRequest := &ec2.DescribeVolumesInput{VolumeIds: []*string{test.expectedVolumeID}} + awsServices.ec2.(*MockedFakeEC2).On("DescribeVolumes", expectedVolumeRequest).Return(test.expectedEC2Volumes) + + c, err := newAWSCloud(CloudConfig{}, awsServices) + assert.Nil(t, err, "Error building aws cloud: %v", err) + + l, err := c.GetLabelsForVolume(context.TODO(), test.pv) + assert.Equal(t, test.expectedLabels, l) + assert.Equal(t, test.expectedError, err) + }) + + } +} + func TestDescribeLoadBalancerOnDelete(t *testing.T) { awsServices := newMockedFakeAWSServices(TestClusterID) c, _ := newAWSCloud(CloudConfig{}, awsServices) diff --git a/pkg/cloudprovider/providers/azure/azure.go b/pkg/cloudprovider/providers/azure/azure.go index e2205c37fbe..a64e5730c42 100644 --- a/pkg/cloudprovider/providers/azure/azure.go +++ b/pkg/cloudprovider/providers/azure/azure.go @@ -71,9 +71,6 @@ var ( defaultExcludeMasterFromStandardLB = true ) -// Azure implements PVLabeler. -var _ cloudprovider.PVLabeler = (*Cloud)(nil) - // Config holds the configuration parsed from the --cloud-config flag // All fields are required unless otherwise specified type Config struct { @@ -143,6 +140,13 @@ type Config struct { MaximumLoadBalancerRuleCount int `json:"maximumLoadBalancerRuleCount" yaml:"maximumLoadBalancerRuleCount"` } +var _ cloudprovider.Interface = (*Cloud)(nil) +var _ cloudprovider.Instances = (*Cloud)(nil) +var _ cloudprovider.LoadBalancer = (*Cloud)(nil) +var _ cloudprovider.Routes = (*Cloud)(nil) +var _ cloudprovider.Zones = (*Cloud)(nil) +var _ cloudprovider.PVLabeler = (*Cloud)(nil) + // Cloud holds the config and clients type Cloud struct { Config diff --git a/pkg/cloudprovider/providers/azure/azure_blobDiskController.go b/pkg/cloudprovider/providers/azure/azure_blobDiskController.go index 9d0be714894..7cb8a756df7 100644 --- a/pkg/cloudprovider/providers/azure/azure_blobDiskController.go +++ b/pkg/cloudprovider/providers/azure/azure_blobDiskController.go @@ -108,7 +108,7 @@ func (c *BlobDiskController) DeleteVolume(diskURI string) error { if err != nil { return fmt.Errorf("failed to parse vhd URI %v", err) } - key, err := c.common.cloud.getStorageAccesskey(accountName, c.common.resourceGroup) + key, err := c.common.cloud.GetStorageAccesskey(accountName, c.common.resourceGroup) if err != nil { return fmt.Errorf("no key for storage account %s, err %v", accountName, err) } diff --git a/pkg/cloudprovider/providers/azure/azure_controller_standard.go b/pkg/cloudprovider/providers/azure/azure_controller_standard.go index 505b19af8ec..f600209e284 100644 --- a/pkg/cloudprovider/providers/azure/azure_controller_standard.go +++ b/pkg/cloudprovider/providers/azure/azure_controller_standard.go @@ -68,6 +68,7 @@ func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI stri newVM := compute.VirtualMachine{ Location: vm.Location, VirtualMachineProperties: &compute.VirtualMachineProperties{ + HardwareProfile: vm.HardwareProfile, StorageProfile: &compute.StorageProfile{ DataDisks: &disks, }, @@ -76,7 +77,12 @@ func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI stri klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk(%s)", nodeResourceGroup, vmName, diskName) ctx, cancel := getContextWithCancel() defer cancel() - if _, err := as.VirtualMachinesClient.CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM); err != nil { + + // Invalidate the cache right after updating + defer as.cloud.vmCache.Delete(vmName) + + _, err = as.VirtualMachinesClient.CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM) + if err != nil { klog.Errorf("azureDisk - attach disk(%s) failed, err: %v", diskName, err) detail := err.Error() if strings.Contains(detail, errLeaseFailed) || strings.Contains(detail, errDiskBlobNotFound) { @@ -86,8 +92,6 @@ func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI stri } } else { klog.V(2).Infof("azureDisk - attach disk(%s) succeeded", diskName) - // Invalidate the cache right after updating - as.cloud.vmCache.Delete(vmName) } return err } @@ -129,6 +133,7 @@ func (as *availabilitySet) DetachDiskByName(diskName, diskURI string, nodeName t newVM := compute.VirtualMachine{ Location: vm.Location, VirtualMachineProperties: &compute.VirtualMachineProperties{ + HardwareProfile: vm.HardwareProfile, StorageProfile: &compute.StorageProfile{ DataDisks: &disks, }, @@ -137,12 +142,15 @@ func (as *availabilitySet) DetachDiskByName(diskName, diskURI string, nodeName t klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk(%s)", nodeResourceGroup, vmName, diskName) ctx, cancel := getContextWithCancel() defer cancel() - if _, err := as.VirtualMachinesClient.CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM); err != nil { + + // Invalidate the cache right after updating + defer as.cloud.vmCache.Delete(vmName) + + _, err = as.VirtualMachinesClient.CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM) + if err != nil { klog.Errorf("azureDisk - detach disk(%s) failed, err: %v", diskName, err) } else { klog.V(2).Infof("azureDisk - detach disk(%s) succeeded", diskName) - // Invalidate the cache right after updating - as.cloud.vmCache.Delete(vmName) } return err } diff --git a/pkg/cloudprovider/providers/azure/azure_controller_vmss.go b/pkg/cloudprovider/providers/azure/azure_controller_vmss.go index 53bbde77a99..80a27bf0385 100644 --- a/pkg/cloudprovider/providers/azure/azure_controller_vmss.go +++ b/pkg/cloudprovider/providers/azure/azure_controller_vmss.go @@ -67,12 +67,28 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod CreateOption: "attach", }) } - vm.StorageProfile.DataDisks = &disks + newVM := compute.VirtualMachineScaleSetVM{ + Sku: vm.Sku, + Location: vm.Location, + VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{ + HardwareProfile: vm.HardwareProfile, + StorageProfile: &compute.StorageProfile{ + OsDisk: vm.StorageProfile.OsDisk, + DataDisks: &disks, + }, + }, + } ctx, cancel := getContextWithCancel() defer cancel() + + // Invalidate the cache right after updating + key := buildVmssCacheKey(nodeResourceGroup, ss.makeVmssVMName(ssName, instanceID)) + defer ss.vmssVMCache.Delete(key) + klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk(%s)", nodeResourceGroup, nodeName, diskName) - if _, err := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, vm); err != nil { + _, err = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM) + if err != nil { detail := err.Error() if strings.Contains(detail, errLeaseFailed) || strings.Contains(detail, errDiskBlobNotFound) { // if lease cannot be acquired or disk not found, immediately detach the disk and return the original error @@ -81,9 +97,6 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod } } else { klog.V(2).Infof("azureDisk - attach disk(%s) succeeded", diskName) - // Invalidate the cache right after updating - key := buildVmssCacheKey(nodeResourceGroup, ss.makeVmssVMName(ssName, instanceID)) - ss.vmssVMCache.Delete(key) } return err } @@ -123,17 +136,31 @@ func (ss *scaleSet) DetachDiskByName(diskName, diskURI string, nodeName types.No return fmt.Errorf("detach azure disk failure, disk %s not found, diskURI: %s", diskName, diskURI) } - vm.StorageProfile.DataDisks = &disks + newVM := compute.VirtualMachineScaleSetVM{ + Sku: vm.Sku, + Location: vm.Location, + VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{ + HardwareProfile: vm.HardwareProfile, + StorageProfile: &compute.StorageProfile{ + OsDisk: vm.StorageProfile.OsDisk, + DataDisks: &disks, + }, + }, + } + ctx, cancel := getContextWithCancel() defer cancel() + + // Invalidate the cache right after updating + key := buildVmssCacheKey(nodeResourceGroup, ss.makeVmssVMName(ssName, instanceID)) + defer ss.vmssVMCache.Delete(key) + klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk(%s)", nodeResourceGroup, nodeName, diskName) - if _, err := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, vm); err != nil { + _, err = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM) + if err != nil { klog.Errorf("azureDisk - detach disk(%s) from %s failed, err: %v", diskName, nodeName, err) } else { klog.V(2).Infof("azureDisk - detach disk(%s) succeeded", diskName) - // Invalidate the cache right after updating - key := buildVmssCacheKey(nodeResourceGroup, ss.makeVmssVMName(ssName, instanceID)) - ss.vmssVMCache.Delete(key) } return err diff --git a/pkg/cloudprovider/providers/azure/azure_instances.go b/pkg/cloudprovider/providers/azure/azure_instances.go index 158ffb976ca..580983dc336 100644 --- a/pkg/cloudprovider/providers/azure/azure_instances.go +++ b/pkg/cloudprovider/providers/azure/azure_instances.go @@ -23,9 +23,8 @@ import ( "strings" "k8s.io/api/core/v1" - cloudprovider "k8s.io/cloud-provider" - "k8s.io/apimachinery/pkg/types" + cloudprovider "k8s.io/cloud-provider" "k8s.io/klog" ) @@ -96,7 +95,8 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N addresses := []v1.NodeAddress{ {Type: v1.NodeHostName, Address: string(name)}, } - for _, address := range ipAddress.IPV4.IPAddress { + if len(ipAddress.IPV4.IPAddress) > 0 && len(ipAddress.IPV4.IPAddress[0].PrivateIP) > 0 { + address := ipAddress.IPV4.IPAddress[0] addresses = append(addresses, v1.NodeAddress{ Type: v1.NodeInternalIP, Address: address.PrivateIP, @@ -108,7 +108,8 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N }) } } - for _, address := range ipAddress.IPV6.IPAddress { + if len(ipAddress.IPV6.IPAddress) > 0 && len(ipAddress.IPV6.IPAddress[0].PrivateIP) > 0 { + address := ipAddress.IPV6.IPAddress[0] addresses = append(addresses, v1.NodeAddress{ Type: v1.NodeInternalIP, Address: address.PrivateIP, @@ -120,6 +121,13 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N }) } } + + if len(addresses) == 1 { + // No IP addresses is got from instance metadata service, clean up cache and report errors. + az.metadata.imsCache.Delete(metadataCacheKey) + return nil, fmt.Errorf("get empty IP addresses from instance metadata service") + } + return addresses, nil } @@ -155,6 +163,9 @@ func (az *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID stri name, err := az.vmSet.GetNodeNameByProviderID(providerID) if err != nil { + if err == cloudprovider.InstanceNotFound { + return false, nil + } return false, err } diff --git a/pkg/cloudprovider/providers/azure/azure_instances_test.go b/pkg/cloudprovider/providers/azure/azure_instances_test.go index 3ae39917c72..40c92beb9d9 100644 --- a/pkg/cloudprovider/providers/azure/azure_instances_test.go +++ b/pkg/cloudprovider/providers/azure/azure_instances_test.go @@ -21,10 +21,12 @@ import ( "fmt" "net" "net/http" + "reflect" "testing" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute" "github.com/Azure/go-autorest/autorest/to" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" ) @@ -216,3 +218,122 @@ func TestInstanceShutdownByProviderID(t *testing.T) { } } } + +func TestNodeAddresses(t *testing.T) { + cloud := getTestCloud() + cloud.Config.UseInstanceMetadata = true + metadataTemplate := `{"compute":{"name":"%s"},"network":{"interface":[{"ipv4":{"ipAddress":[{"privateIpAddress":"%s","publicIpAddress":"%s"}]},"ipv6":{"ipAddress":[{"privateIpAddress":"%s","publicIpAddress":"%s"}]}}]}}` + + testcases := []struct { + name string + nodeName string + ipV4 string + ipV6 string + ipV4Public string + ipV6Public string + expected []v1.NodeAddress + expectError bool + }{ + { + name: "NodeAddresses should get both ipV4 and ipV6 private addresses", + nodeName: "vm1", + ipV4: "10.240.0.1", + ipV6: "1111:11111:00:00:1111:1111:000:111", + expected: []v1.NodeAddress{ + { + Type: v1.NodeHostName, + Address: "vm1", + }, + { + Type: v1.NodeInternalIP, + Address: "10.240.0.1", + }, + { + Type: v1.NodeInternalIP, + Address: "1111:11111:00:00:1111:1111:000:111", + }, + }, + }, + { + name: "NodeAddresses should report error when IPs are empty", + nodeName: "vm1", + expectError: true, + }, + { + name: "NodeAddresses should get ipV4 private and public addresses", + nodeName: "vm1", + ipV4: "10.240.0.1", + ipV4Public: "9.9.9.9", + expected: []v1.NodeAddress{ + { + Type: v1.NodeHostName, + Address: "vm1", + }, + { + Type: v1.NodeInternalIP, + Address: "10.240.0.1", + }, + { + Type: v1.NodeExternalIP, + Address: "9.9.9.9", + }, + }, + }, + { + name: "NodeAddresses should get ipV6 private and public addresses", + nodeName: "vm1", + ipV6: "1111:11111:00:00:1111:1111:000:111", + ipV6Public: "2222:22221:00:00:2222:2222:000:111", + expected: []v1.NodeAddress{ + { + Type: v1.NodeHostName, + Address: "vm1", + }, + { + Type: v1.NodeInternalIP, + Address: "1111:11111:00:00:1111:1111:000:111", + }, + { + Type: v1.NodeExternalIP, + Address: "2222:22221:00:00:2222:2222:000:111", + }, + }, + }, + } + + for _, test := range testcases { + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Errorf("Test [%s] unexpected error: %v", test.name, err) + } + + mux := http.NewServeMux() + mux.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, fmt.Sprintf(metadataTemplate, test.nodeName, test.ipV4, test.ipV4Public, test.ipV6, test.ipV6Public)) + })) + go func() { + http.Serve(listener, mux) + }() + defer listener.Close() + + cloud.metadata, err = NewInstanceMetadataService("http://" + listener.Addr().String() + "/") + if err != nil { + t.Errorf("Test [%s] unexpected error: %v", test.name, err) + } + + ipAddresses, err := cloud.NodeAddresses(context.Background(), types.NodeName(test.nodeName)) + if test.expectError { + if err == nil { + t.Errorf("Test [%s] unexpected nil err", test.name) + } + } else { + if err != nil { + t.Errorf("Test [%s] unexpected error: %v", test.name, err) + } + } + + if !reflect.DeepEqual(ipAddresses, test.expected) { + t.Errorf("Test [%s] unexpected ipAddresses: %s, expected %q", test.name, ipAddresses, test.expected) + } + } +} diff --git a/pkg/cloudprovider/providers/azure/azure_storageaccount.go b/pkg/cloudprovider/providers/azure/azure_storageaccount.go index 34871a11198..8dadcdc2d0f 100644 --- a/pkg/cloudprovider/providers/azure/azure_storageaccount.go +++ b/pkg/cloudprovider/providers/azure/azure_storageaccount.go @@ -64,8 +64,8 @@ func (az *Cloud) getStorageAccounts(matchingAccountType, matchingAccountKind, re return accounts, nil } -// getStorageAccesskey gets the storage account access key -func (az *Cloud) getStorageAccesskey(account, resourceGroup string) (string, error) { +// GetStorageAccesskey gets the storage account access key +func (az *Cloud) GetStorageAccesskey(account, resourceGroup string) (string, error) { ctx, cancel := getContextWithCancel() defer cancel() @@ -137,7 +137,7 @@ func (az *Cloud) ensureStorageAccount(accountName, accountType, accountKind, res } // find the access key with this account - accountKey, err := az.getStorageAccesskey(accountName, resourceGroup) + accountKey, err := az.GetStorageAccesskey(accountName, resourceGroup) if err != nil { return "", "", fmt.Errorf("could not get storage key for storage account %s: %v", accountName, err) } diff --git a/pkg/cloudprovider/providers/azure/azure_storageaccount_test.go b/pkg/cloudprovider/providers/azure/azure_storageaccount_test.go index bba66e0043f..6d6eabd2f14 100644 --- a/pkg/cloudprovider/providers/azure/azure_storageaccount_test.go +++ b/pkg/cloudprovider/providers/azure/azure_storageaccount_test.go @@ -64,7 +64,7 @@ func TestGetStorageAccessKeys(t *testing.T) { expectedKey := test.expectedKey fake.Keys = test.results fake.Err = test.err - key, err := cloud.getStorageAccesskey("acct", "rg") + key, err := cloud.GetStorageAccesskey("acct", "rg") if test.expectErr && err == nil { t.Errorf("Unexpected non-error") continue diff --git a/pkg/cloudprovider/providers/cloudstack/cloudstack.go b/pkg/cloudprovider/providers/cloudstack/cloudstack.go index 4769adb647f..222a0356293 100644 --- a/pkg/cloudprovider/providers/cloudstack/cloudstack.go +++ b/pkg/cloudprovider/providers/cloudstack/cloudstack.go @@ -120,6 +120,11 @@ func newCSCloud(cfg *CSConfig) (*CSCloud, error) { return cs, nil } +var _ cloudprovider.Interface = (*CSCloud)(nil) +var _ cloudprovider.Instances = (*CSCloud)(nil) +var _ cloudprovider.LoadBalancer = (*CSCloud)(nil) +var _ cloudprovider.Zones = (*CSCloud)(nil) + // Initialize passes a Kubernetes clientBuilder interface to the cloud provider func (cs *CSCloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) { } diff --git a/pkg/cloudprovider/providers/cloudstack/metadata.go b/pkg/cloudprovider/providers/cloudstack/metadata.go index f2decccdf86..b666e076dd5 100644 --- a/pkg/cloudprovider/providers/cloudstack/metadata.go +++ b/pkg/cloudprovider/providers/cloudstack/metadata.go @@ -31,6 +31,9 @@ import ( "k8s.io/klog" ) +var _ cloudprovider.Instances = (*metadata)(nil) +var _ cloudprovider.Zones = (*metadata)(nil) + type metadata struct { dhcpServer string zone string diff --git a/pkg/cloudprovider/providers/fake/fake.go b/pkg/cloudprovider/providers/fake/fake.go index 8adf2fed214..01cec8a75a3 100644 --- a/pkg/cloudprovider/providers/fake/fake.go +++ b/pkg/cloudprovider/providers/fake/fake.go @@ -45,6 +45,14 @@ type FakeUpdateBalancerCall struct { Hosts []*v1.Node } +var _ cloudprovider.Interface = (*FakeCloud)(nil) +var _ cloudprovider.Instances = (*FakeCloud)(nil) +var _ cloudprovider.LoadBalancer = (*FakeCloud)(nil) +var _ cloudprovider.Routes = (*FakeCloud)(nil) +var _ cloudprovider.Zones = (*FakeCloud)(nil) +var _ cloudprovider.PVLabeler = (*FakeCloud)(nil) +var _ cloudprovider.Clusters = (*FakeCloud)(nil) + // FakeCloud is a test-double implementation of Interface, LoadBalancer, Instances, and Routes. It is useful for testing. type FakeCloud struct { Exists bool @@ -228,6 +236,8 @@ func (f *FakeCloud) SetNodeAddresses(nodeAddresses []v1.NodeAddress) { // It adds an entry "node-addresses-by-provider-id" into the internal method call record. func (f *FakeCloud) NodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error) { f.addCall("node-addresses-by-provider-id") + f.addressesMux.Lock() + defer f.addressesMux.Unlock() return f.Addresses, f.Err } diff --git a/pkg/cloudprovider/providers/gce/gce.go b/pkg/cloudprovider/providers/gce/gce.go index 616fe5f5477..3e13930197e 100644 --- a/pkg/cloudprovider/providers/gce/gce.go +++ b/pkg/cloudprovider/providers/gce/gce.go @@ -98,6 +98,14 @@ type gceObject interface { MarshalJSON() ([]byte, error) } +var _ cloudprovider.Interface = (*Cloud)(nil) +var _ cloudprovider.Instances = (*Cloud)(nil) +var _ cloudprovider.LoadBalancer = (*Cloud)(nil) +var _ cloudprovider.Routes = (*Cloud)(nil) +var _ cloudprovider.Zones = (*Cloud)(nil) +var _ cloudprovider.PVLabeler = (*Cloud)(nil) +var _ cloudprovider.Clusters = (*Cloud)(nil) + // Cloud is an implementation of Interface, LoadBalancer and Instances for Google Compute Engine. type Cloud struct { // ClusterID contains functionality for getting (and initializing) the ingress-uid. Call Cloud.Initialize() @@ -765,9 +773,6 @@ func isProjectNumber(idOrNumber string) bool { return err == nil } -// Cloud implements cloudprovider.Interface. -var _ cloudprovider.Interface = (*Cloud)(nil) - func gceNetworkURL(apiEndpoint, project, network string) string { if apiEndpoint == "" { apiEndpoint = gceComputeAPIEndpoint diff --git a/pkg/cloudprovider/providers/gce/gce_disks.go b/pkg/cloudprovider/providers/gce/gce_disks.go index 24dd76fc96d..7e10e50c32b 100644 --- a/pkg/cloudprovider/providers/gce/gce_disks.go +++ b/pkg/cloudprovider/providers/gce/gce_disks.go @@ -158,7 +158,7 @@ func (manager *gceServiceManager) CreateRegionalDiskOnCloudProvider( fullyQualifiedReplicaZones := []string{} for _, replicaZone := range replicaZones.UnsortedList() { fullyQualifiedReplicaZones = append( - fullyQualifiedReplicaZones, manager.getReplicaZoneURI(replicaZone, true)) + fullyQualifiedReplicaZones, manager.getReplicaZoneURI(replicaZone)) } diskToCreate := &compute.Disk{ @@ -359,15 +359,8 @@ func (manager *gceServiceManager) getDiskTypeURI( } } -func (manager *gceServiceManager) getReplicaZoneURI(zone string, useBetaAPI bool) string { - var getProjectsAPIEndpoint string - if useBetaAPI { - getProjectsAPIEndpoint = manager.getProjectsAPIEndpointBeta() - } else { - getProjectsAPIEndpoint = manager.getProjectsAPIEndpoint() - } - - return getProjectsAPIEndpoint + fmt.Sprintf( +func (manager *gceServiceManager) getReplicaZoneURI(zone string) string { + return manager.getProjectsAPIEndpoint() + fmt.Sprintf( replicaZoneURITemplateSingleZone, manager.gce.projectID, zone) diff --git a/pkg/cloudprovider/providers/openstack/openstack.go b/pkg/cloudprovider/providers/openstack/openstack.go index 9f13db944e1..9af4e4f7bd0 100644 --- a/pkg/cloudprovider/providers/openstack/openstack.go +++ b/pkg/cloudprovider/providers/openstack/openstack.go @@ -126,6 +126,9 @@ type MetadataOpts struct { RequestTimeout MyDuration `gcfg:"request-timeout"` } +var _ cloudprovider.Interface = (*OpenStack)(nil) +var _ cloudprovider.Zones = (*OpenStack)(nil) + // OpenStack is an implementation of cloud provider Interface for OpenStack. type OpenStack struct { provider *gophercloud.ProviderClient diff --git a/pkg/cloudprovider/providers/openstack/openstack_instances.go b/pkg/cloudprovider/providers/openstack/openstack_instances.go index c52ce21998e..4ca4198f419 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_instances.go +++ b/pkg/cloudprovider/providers/openstack/openstack_instances.go @@ -30,6 +30,8 @@ import ( cloudprovider "k8s.io/cloud-provider" ) +var _ cloudprovider.Instances = (*Instances)(nil) + // Instances encapsulates an implementation of Instances for OpenStack. type Instances struct { compute *gophercloud.ServiceClient diff --git a/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go b/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go index 26e6b095a2c..5f793924a35 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go +++ b/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go @@ -78,6 +78,8 @@ const ( ServiceAnnotationLoadBalancerInternal = "service.beta.kubernetes.io/openstack-internal-load-balancer" ) +var _ cloudprovider.LoadBalancer = (*LbaasV2)(nil) + // LbaasV2 is a LoadBalancer implementation for Neutron LBaaS v2 API type LbaasV2 struct { LoadBalancer diff --git a/pkg/cloudprovider/providers/openstack/openstack_routes.go b/pkg/cloudprovider/providers/openstack/openstack_routes.go index 0cb31a435a7..a1b447e37cf 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_routes.go +++ b/pkg/cloudprovider/providers/openstack/openstack_routes.go @@ -33,6 +33,8 @@ import ( var errNoRouterID = errors.New("router-id not set in cloud provider config") +var _ cloudprovider.Routes = (*Routes)(nil) + // Routes implements the cloudprovider.Routes for OpenStack clouds type Routes struct { compute *gophercloud.ServiceClient diff --git a/pkg/cloudprovider/providers/ovirt/ovirt.go b/pkg/cloudprovider/providers/ovirt/ovirt.go index 9937919ed16..c9364a2585b 100644 --- a/pkg/cloudprovider/providers/ovirt/ovirt.go +++ b/pkg/cloudprovider/providers/ovirt/ovirt.go @@ -36,24 +36,32 @@ import ( cloudprovider "k8s.io/cloud-provider" ) +// ProviderName is the name of this cloud provider. const ProviderName = "ovirt" -type OVirtInstance struct { +// Instance specifies UUID, name and IP address of the instance. +type Instance struct { UUID string Name string IPAddress string } -type OVirtInstanceMap map[string]OVirtInstance +// InstanceMap provides the map of Ovirt instances. +type InstanceMap map[string]Instance -type OVirtCloud struct { +var _ cloudprovider.Interface = (*Cloud)(nil) +var _ cloudprovider.Instances = (*Cloud)(nil) + +// Cloud is an implementation of the cloud provider interface for Ovirt. +type Cloud struct { VmsRequest *url.URL HostsRequest *url.URL } -type OVirtApiConfig struct { +// APIConfig wraps the api settings for the Ovirt. +type APIConfig struct { Connection struct { - ApiEntry string `gcfg:"uri"` + APIEntry string `gcfg:"uri"` Username string `gcfg:"username"` Password string `gcfg:"password"` } @@ -62,21 +70,24 @@ type OVirtApiConfig struct { } } -type XmlVmAddress struct { +// XMLVMAddress is an implementation for the Ovirt instance IP address in xml. +type XMLVMAddress struct { Address string `xml:"address,attr"` } -type XmlVmInfo struct { +// XMLVMInfo is an implementation for the Ovirt instance details in xml. +type XMLVMInfo struct { UUID string `xml:"id,attr"` Name string `xml:"name"` Hostname string `xml:"guest_info>fqdn"` - Addresses []XmlVmAddress `xml:"guest_info>ips>ip"` + Addresses []XMLVMAddress `xml:"guest_info>ips>ip"` State string `xml:"status>state"` } -type XmlVmsList struct { +// XMLVmsList is an implementation to provide the list of Ovirt instances. +type XMLVmsList struct { XMLName xml.Name `xml:"vms"` - Vm []XmlVmInfo `xml:"vm"` + VM []XMLVMInfo `xml:"vm"` } func init() { @@ -86,12 +97,12 @@ func init() { }) } -func newOVirtCloud(config io.Reader) (*OVirtCloud, error) { +func newOVirtCloud(config io.Reader) (*Cloud, error) { if config == nil { return nil, fmt.Errorf("missing configuration file for ovirt cloud provider") } - oVirtConfig := OVirtApiConfig{} + oVirtConfig := APIConfig{} /* defaults */ oVirtConfig.Connection.Username = "admin@internal" @@ -100,11 +111,11 @@ func newOVirtCloud(config io.Reader) (*OVirtCloud, error) { return nil, err } - if oVirtConfig.Connection.ApiEntry == "" { + if oVirtConfig.Connection.APIEntry == "" { return nil, fmt.Errorf("missing ovirt uri in cloud provider configuration") } - request, err := url.Parse(oVirtConfig.Connection.ApiEntry) + request, err := url.Parse(oVirtConfig.Connection.APIEntry) if err != nil { return nil, err } @@ -113,49 +124,50 @@ func newOVirtCloud(config io.Reader) (*OVirtCloud, error) { request.User = url.UserPassword(oVirtConfig.Connection.Username, oVirtConfig.Connection.Password) request.RawQuery = url.Values{"search": {oVirtConfig.Filters.VmsQuery}}.Encode() - return &OVirtCloud{VmsRequest: request}, nil + return &Cloud{VmsRequest: request}, nil } // Initialize passes a Kubernetes clientBuilder interface to the cloud provider -func (v *OVirtCloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) { +func (v *Cloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) { } -func (v *OVirtCloud) Clusters() (cloudprovider.Clusters, bool) { +// Clusters returns the list of clusters. +func (v *Cloud) Clusters() (cloudprovider.Clusters, bool) { return nil, false } // ProviderName returns the cloud provider ID. -func (v *OVirtCloud) ProviderName() string { +func (v *Cloud) ProviderName() string { return ProviderName } // HasClusterID returns true if the cluster has a clusterID -func (v *OVirtCloud) HasClusterID() bool { +func (v *Cloud) HasClusterID() bool { return true } // LoadBalancer returns an implementation of LoadBalancer for oVirt cloud -func (v *OVirtCloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) { +func (v *Cloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) { return nil, false } // Instances returns an implementation of Instances for oVirt cloud -func (v *OVirtCloud) Instances() (cloudprovider.Instances, bool) { +func (v *Cloud) Instances() (cloudprovider.Instances, bool) { return v, true } // Zones returns an implementation of Zones for oVirt cloud -func (v *OVirtCloud) Zones() (cloudprovider.Zones, bool) { +func (v *Cloud) Zones() (cloudprovider.Zones, bool) { return nil, false } // Routes returns an implementation of Routes for oVirt cloud -func (v *OVirtCloud) Routes() (cloudprovider.Routes, bool) { +func (v *Cloud) Routes() (cloudprovider.Routes, bool) { return nil, false } // NodeAddresses returns the NodeAddresses of the instance with the specified nodeName. -func (v *OVirtCloud) NodeAddresses(ctx context.Context, nodeName types.NodeName) ([]v1.NodeAddress, error) { +func (v *Cloud) NodeAddresses(ctx context.Context, nodeName types.NodeName) ([]v1.NodeAddress, error) { name := mapNodeNameToInstanceName(nodeName) instance, err := v.fetchInstance(name) if err != nil { @@ -186,7 +198,7 @@ func (v *OVirtCloud) NodeAddresses(ctx context.Context, nodeName types.NodeName) // NodeAddressesByProviderID returns the node addresses of an instances with the specified unique providerID // This method will not be called from the node that is requesting this ID. i.e. metadata service // and other local methods cannot be used here -func (v *OVirtCloud) NodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error) { +func (v *Cloud) NodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error) { return []v1.NodeAddress{}, cloudprovider.NotImplemented } @@ -198,17 +210,17 @@ func mapNodeNameToInstanceName(nodeName types.NodeName) string { // InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running. // If false is returned with no error, the instance will be immediately deleted by the cloud controller manager. -func (v *OVirtCloud) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) { +func (v *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) { return false, cloudprovider.NotImplemented } // InstanceShutdownByProviderID returns true if the instance is in safe state to detach volumes -func (v *OVirtCloud) InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error) { +func (v *Cloud) InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error) { return false, cloudprovider.NotImplemented } // InstanceID returns the cloud provider ID of the node with the specified NodeName. -func (v *OVirtCloud) InstanceID(ctx context.Context, nodeName types.NodeName) (string, error) { +func (v *Cloud) InstanceID(ctx context.Context, nodeName types.NodeName) (string, error) { name := mapNodeNameToInstanceName(nodeName) instance, err := v.fetchInstance(name) if err != nil { @@ -222,16 +234,16 @@ func (v *OVirtCloud) InstanceID(ctx context.Context, nodeName types.NodeName) (s // InstanceTypeByProviderID returns the cloudprovider instance type of the node with the specified unique providerID // This method will not be called from the node that is requesting this ID. i.e. metadata service // and other local methods cannot be used here -func (v *OVirtCloud) InstanceTypeByProviderID(ctx context.Context, providerID string) (string, error) { +func (v *Cloud) InstanceTypeByProviderID(ctx context.Context, providerID string) (string, error) { return "", cloudprovider.NotImplemented } // InstanceType returns the type of the specified instance. -func (v *OVirtCloud) InstanceType(ctx context.Context, name types.NodeName) (string, error) { +func (v *Cloud) InstanceType(ctx context.Context, name types.NodeName) (string, error) { return "", nil } -func getInstancesFromXml(body io.Reader) (OVirtInstanceMap, error) { +func getInstancesFromXML(body io.Reader) (InstanceMap, error) { if body == nil { return nil, fmt.Errorf("ovirt rest-api response body is missing") } @@ -241,15 +253,15 @@ func getInstancesFromXml(body io.Reader) (OVirtInstanceMap, error) { return nil, err } - vmlist := XmlVmsList{} + vmlist := XMLVmsList{} if err := xml.Unmarshal(content, &vmlist); err != nil { return nil, err } - instances := make(OVirtInstanceMap) + instances := make(InstanceMap) - for _, vm := range vmlist.Vm { + for _, vm := range vmlist.VM { // Always return only vms that are up and running if vm.Hostname != "" && strings.ToLower(vm.State) == "up" { address := "" @@ -257,7 +269,7 @@ func getInstancesFromXml(body io.Reader) (OVirtInstanceMap, error) { address = vm.Addresses[0].Address } - instances[vm.Hostname] = OVirtInstance{ + instances[vm.Hostname] = Instance{ UUID: vm.UUID, Name: vm.Name, IPAddress: address, @@ -268,7 +280,7 @@ func getInstancesFromXml(body io.Reader) (OVirtInstanceMap, error) { return instances, nil } -func (v *OVirtCloud) fetchAllInstances() (OVirtInstanceMap, error) { +func (v *Cloud) fetchAllInstances() (InstanceMap, error) { response, err := http.Get(v.VmsRequest.String()) if err != nil { return nil, err @@ -276,10 +288,10 @@ func (v *OVirtCloud) fetchAllInstances() (OVirtInstanceMap, error) { defer response.Body.Close() - return getInstancesFromXml(response.Body) + return getInstancesFromXML(response.Body) } -func (v *OVirtCloud) fetchInstance(name string) (*OVirtInstance, error) { +func (v *Cloud) fetchInstance(name string) (*Instance, error) { allInstances, err := v.fetchAllInstances() if err != nil { return nil, err @@ -293,7 +305,8 @@ func (v *OVirtCloud) fetchInstance(name string) (*OVirtInstance, error) { return &instance, nil } -func (m *OVirtInstanceMap) ListSortedNames() []string { +// ListSortedNames returns the list of sorted Ovirt instances name. +func (m *InstanceMap) ListSortedNames() []string { var names []string for k := range *m { @@ -305,11 +318,12 @@ func (m *OVirtInstanceMap) ListSortedNames() []string { return names } -// Implementation of Instances.CurrentNodeName -func (v *OVirtCloud) CurrentNodeName(ctx context.Context, hostname string) (types.NodeName, error) { +// CurrentNodeName is implementation of Instances.CurrentNodeName. +func (v *Cloud) CurrentNodeName(ctx context.Context, hostname string) (types.NodeName, error) { return types.NodeName(hostname), nil } -func (v *OVirtCloud) AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error { +// AddSSHKeyToAllInstances is currently not implemented. +func (v *Cloud) AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error { return cloudprovider.NotImplemented } diff --git a/pkg/cloudprovider/providers/ovirt/ovirt_test.go b/pkg/cloudprovider/providers/ovirt/ovirt_test.go index fedcd36ff93..4ecbe40f6a5 100644 --- a/pkg/cloudprovider/providers/ovirt/ovirt_test.go +++ b/pkg/cloudprovider/providers/ovirt/ovirt_test.go @@ -62,14 +62,14 @@ uri = https://localhost:8443/ovirt-engine/api func TestOVirtCloudXmlParsing(t *testing.T) { body1 := (io.Reader)(nil) - _, err1 := getInstancesFromXml(body1) + _, err1 := getInstancesFromXML(body1) if err1 == nil { t.Fatalf("An error is expected when body is missing") } body2 := strings.NewReader("") - _, err2 := getInstancesFromXml(body2) + _, err2 := getInstancesFromXML(body2) if err2 == nil { t.Fatalf("An error is expected when body is empty") } @@ -80,7 +80,7 @@ func TestOVirtCloudXmlParsing(t *testing.T) { `) - instances3, err3 := getInstancesFromXml(body3) + instances3, err3 := getInstancesFromXML(body3) if err3 != nil { t.Fatalf("Unexpected error listing instances: %s", err3) } @@ -111,7 +111,7 @@ func TestOVirtCloudXmlParsing(t *testing.T) { `) - instances4, err4 := getInstancesFromXml(body4) + instances4, err4 := getInstancesFromXML(body4) if err4 != nil { t.Fatalf("Unexpected error listing instances: %s", err4) } diff --git a/pkg/cloudprovider/providers/photon/photon.go b/pkg/cloudprovider/providers/photon/photon.go index 24f8d2b8dd8..8bcca5a093d 100644 --- a/pkg/cloudprovider/providers/photon/photon.go +++ b/pkg/cloudprovider/providers/photon/photon.go @@ -55,6 +55,10 @@ const ( // overrideIP = true in cloud config file. Default value is false. var overrideIP bool = false +var _ cloudprovider.Interface = (*PCCloud)(nil) +var _ cloudprovider.Instances = (*PCCloud)(nil) +var _ cloudprovider.Zones = (*PCCloud)(nil) + // Photon is an implementation of the cloud provider interface for Photon Controller. type PCCloud struct { cfg *PCConfig diff --git a/pkg/cloudprovider/providers/vsphere/vsphere.go b/pkg/cloudprovider/providers/vsphere/vsphere.go index 26180e6aff9..2e562b036ed 100644 --- a/pkg/cloudprovider/providers/vsphere/vsphere.go +++ b/pkg/cloudprovider/providers/vsphere/vsphere.go @@ -76,6 +76,10 @@ var ( ErrPasswordMissing = errors.New(MissingPasswordErrMsg) ) +var _ cloudprovider.Interface = (*VSphere)(nil) +var _ cloudprovider.Instances = (*VSphere)(nil) +var _ cloudprovider.Zones = (*VSphere)(nil) + // VSphere is an implementation of cloud provider Interface for VSphere. type VSphere struct { cfg *VSphereConfig diff --git a/pkg/controller/.import-restrictions b/pkg/controller/.import-restrictions index 31aa4d0b504..e7e62c875ed 100644 --- a/pkg/controller/.import-restrictions +++ b/pkg/controller/.import-restrictions @@ -278,7 +278,7 @@ "k8s.io/kubernetes/pkg/registry/core/secret", "k8s.io/kubernetes/pkg/scheduler/algorithm", "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates", - "k8s.io/kubernetes/pkg/scheduler/cache", + "k8s.io/kubernetes/pkg/scheduler/nodeinfo", "k8s.io/kubernetes/pkg/securitycontext", "k8s.io/kubernetes/pkg/serviceaccount", "k8s.io/kubernetes/pkg/util/goroutinemap", diff --git a/pkg/controller/OWNERS b/pkg/controller/OWNERS index 6b09a34b804..b5bd7631ef8 100644 --- a/pkg/controller/OWNERS +++ b/pkg/controller/OWNERS @@ -5,3 +5,5 @@ approvers: - janetkuo reviewers: - deads2k +labels: +- sig/apps diff --git a/pkg/controller/cloud/BUILD b/pkg/controller/cloud/BUILD index 25e1c6fe6bc..65171eaf32e 100644 --- a/pkg/controller/cloud/BUILD +++ b/pkg/controller/cloud/BUILD @@ -50,7 +50,6 @@ go_library( go_test( name = "go_default_test", srcs = [ - "main_test.go", "node_controller_test.go", "pvlcontroller_test.go", ], diff --git a/pkg/controller/cloud/main_test.go b/pkg/controller/cloud/main_test.go deleted file mode 100644 index a2abc54d015..00000000000 --- a/pkg/controller/cloud/main_test.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cloud - -import ( - "testing" - - utilfeature "k8s.io/apiserver/pkg/util/feature" - utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" - _ "k8s.io/kubernetes/pkg/features" -) - -func TestMain(m *testing.M) { - utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run) -} diff --git a/pkg/controller/cronjob/injection.go b/pkg/controller/cronjob/injection.go index 7108879a8f9..14831fe1f6e 100644 --- a/pkg/controller/cronjob/injection.go +++ b/pkg/controller/cronjob/injection.go @@ -118,7 +118,8 @@ func (r realJobControl) CreateJob(namespace string, job *batchv1.Job) (*batchv1. } func (r realJobControl) DeleteJob(namespace string, name string) error { - return r.KubeClient.BatchV1().Jobs(namespace).Delete(name, nil) + background := metav1.DeletePropagationBackground + return r.KubeClient.BatchV1().Jobs(namespace).Delete(name, &metav1.DeleteOptions{PropagationPolicy: &background}) } type fakeJobControl struct { diff --git a/pkg/controller/daemon/BUILD b/pkg/controller/daemon/BUILD index 34eba68811e..9270fe306f6 100644 --- a/pkg/controller/daemon/BUILD +++ b/pkg/controller/daemon/BUILD @@ -22,7 +22,7 @@ go_library( "//pkg/kubelet/types:go_default_library", "//pkg/scheduler/algorithm:go_default_library", "//pkg/scheduler/algorithm/predicates:go_default_library", - "//pkg/scheduler/cache:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", "//pkg/util/labels:go_default_library", "//pkg/util/metrics:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library", @@ -32,7 +32,6 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/json:go_default_library", @@ -61,7 +60,6 @@ go_test( name = "go_default_test", srcs = [ "daemon_controller_test.go", - "main_test.go", "update_test.go", ], embed = [":go_default_library"], diff --git a/pkg/controller/daemon/OWNERS b/pkg/controller/daemon/OWNERS index 4e4296cf452..fc74fc31e79 100755 --- a/pkg/controller/daemon/OWNERS +++ b/pkg/controller/daemon/OWNERS @@ -1,6 +1,7 @@ approvers: - mikedanese - janetkuo +- k82cn reviewers: - janetkuo - lukaszo diff --git a/pkg/controller/daemon/daemon_controller.go b/pkg/controller/daemon/daemon_controller.go index 1b896118f77..9bc827b474d 100644 --- a/pkg/controller/daemon/daemon_controller.go +++ b/pkg/controller/daemon/daemon_controller.go @@ -31,7 +31,6 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" @@ -57,7 +56,7 @@ import ( kubelettypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" "k8s.io/kubernetes/pkg/util/metrics" ) @@ -1011,7 +1010,7 @@ func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nod if err != nil { generation = nil } - template := util.CreatePodTemplate(ds.Namespace, ds.Spec.Template, generation, hash) + template := util.CreatePodTemplate(ds.Spec.Template, generation, hash) // Batch the pod creates. Batch sizes start at SlowStartInitialBatchSize // and double with each successful iteration in a kind of "slow start". // This handles attempts to start large numbers of pods that would @@ -1288,23 +1287,22 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error { return dsc.updateDaemonSetStatus(ds, hash, true) } -func (dsc *DaemonSetsController) simulate(newPod *v1.Pod, node *v1.Node, ds *apps.DaemonSet) ([]algorithm.PredicateFailureReason, *schedulercache.NodeInfo, error) { +func (dsc *DaemonSetsController) simulate(newPod *v1.Pod, node *v1.Node, ds *apps.DaemonSet) ([]algorithm.PredicateFailureReason, *schedulernodeinfo.NodeInfo, error) { objects, err := dsc.podNodeIndex.ByIndex("nodeName", node.Name) if err != nil { return nil, nil, err } - nodeInfo := schedulercache.NewNodeInfo() + nodeInfo := schedulernodeinfo.NewNodeInfo() nodeInfo.SetNode(node) for _, obj := range objects { // Ignore pods that belong to the daemonset when taking into account whether a daemonset should bind to a node. - // TODO: replace this with metav1.IsControlledBy() in 1.12 pod, ok := obj.(*v1.Pod) if !ok { continue } - if isControlledByDaemonSet(pod, ds.GetUID()) { + if metav1.IsControlledBy(pod, ds) { continue } nodeInfo.AddPod(pod) @@ -1420,7 +1418,7 @@ func NewPod(ds *apps.DaemonSet, nodeName string) *v1.Pod { newPod.Spec.NodeName = nodeName // Added default tolerations for DaemonSet pods. - util.AddOrUpdateDaemonPodTolerations(&newPod.Spec, kubelettypes.IsCriticalPod(newPod)) + util.AddOrUpdateDaemonPodTolerations(&newPod.Spec) return newPod } @@ -1430,7 +1428,7 @@ func NewPod(ds *apps.DaemonSet, nodeName string) *v1.Pod { // - PodFitsHost: checks pod's NodeName against node // - PodMatchNodeSelector: checks pod's NodeSelector and NodeAffinity against node // - PodToleratesNodeTaints: exclude tainted node unless pod has specific toleration -func checkNodeFitness(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func checkNodeFitness(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { var predicateFails []algorithm.PredicateFailureReason fit, reasons, err := predicates.PodFitsHost(pod, meta, nodeInfo) if err != nil { @@ -1460,7 +1458,7 @@ func checkNodeFitness(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *s // Predicates checks if a DaemonSet's pod can be scheduled on a node using GeneralPredicates // and PodToleratesNodeTaints predicate -func Predicates(pod *v1.Pod, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func Predicates(pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { var predicateFails []algorithm.PredicateFailureReason // If ScheduleDaemonSetPods is enabled, only check nodeSelector, nodeAffinity and toleration/taint match. @@ -1523,15 +1521,6 @@ func (o podByCreationTimestampAndPhase) Less(i, j int) bool { return o[i].CreationTimestamp.Before(&o[j].CreationTimestamp) } -func isControlledByDaemonSet(p *v1.Pod, uuid types.UID) bool { - for _, ref := range p.OwnerReferences { - if ref.Controller != nil && *ref.Controller && ref.UID == uuid { - return true - } - } - return false -} - func failedPodsBackoffKey(ds *apps.DaemonSet, nodeName string) string { return fmt.Sprintf("%s/%d/%s", ds.UID, ds.Status.ObservedGeneration, nodeName) } diff --git a/pkg/controller/daemon/daemon_controller_test.go b/pkg/controller/daemon/daemon_controller_test.go index 8352531fdbd..9dd5c01f500 100644 --- a/pkg/controller/daemon/daemon_controller_test.go +++ b/pkg/controller/daemon/daemon_controller_test.go @@ -1691,41 +1691,7 @@ func setDaemonSetToleration(ds *apps.DaemonSet, tolerations []v1.Toleration) { ds.Spec.Template.Spec.Tolerations = tolerations } -// DaemonSet should launch a critical pod even when the node with OutOfDisk taints. -// TODO(#48843) OutOfDisk taints will be removed in 1.10 -func TestTaintOutOfDiskNodeDaemonLaunchesCriticalPod(t *testing.T) { - for _, f := range []bool{true, false} { - defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("critical") - ds.Spec.UpdateStrategy = *strategy - setDaemonSetCritical(ds) - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - - node := newNode("not-enough-disk", nil) - node.Status.Conditions = []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue}} - node.Spec.Taints = []v1.Taint{{Key: schedulerapi.TaintNodeOutOfDisk, Effect: v1.TaintEffectNoSchedule}} - manager.nodeStore.Add(node) - - // NOTE: Whether or not TaintNodesByCondition is enabled, it'll add toleration to DaemonSet pods. - - // Without enabling critical pod annotation feature gate, we shouldn't create critical pod - defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ExperimentalCriticalPodAnnotation, false)() - manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) - - // With enabling critical pod annotation feature gate, we will create critical pod - defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ExperimentalCriticalPodAnnotation, true)() - manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) - } - } -} - -// DaemonSet should launch a pod even when the node with MemoryPressure/DiskPressure taints. +// DaemonSet should launch a pod even when the node with MemoryPressure/DiskPressure/PIDPressure taints. func TestTaintPressureNodeDaemonLaunchesPod(t *testing.T) { for _, f := range []bool{true, false} { defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() @@ -1742,10 +1708,12 @@ func TestTaintPressureNodeDaemonLaunchesPod(t *testing.T) { node.Status.Conditions = []v1.NodeCondition{ {Type: v1.NodeDiskPressure, Status: v1.ConditionTrue}, {Type: v1.NodeMemoryPressure, Status: v1.ConditionTrue}, + {Type: v1.NodePIDPressure, Status: v1.ConditionTrue}, } node.Spec.Taints = []v1.Taint{ {Key: schedulerapi.TaintNodeDiskPressure, Effect: v1.TaintEffectNoSchedule}, {Key: schedulerapi.TaintNodeMemoryPressure, Effect: v1.TaintEffectNoSchedule}, + {Key: schedulerapi.TaintNodePIDPressure, Effect: v1.TaintEffectNoSchedule}, } manager.nodeStore.Add(node) diff --git a/pkg/controller/daemon/main_test.go b/pkg/controller/daemon/main_test.go deleted file mode 100644 index 4fe1e9885c2..00000000000 --- a/pkg/controller/daemon/main_test.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package daemon - -import ( - "testing" - - utilfeature "k8s.io/apiserver/pkg/util/feature" - utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" - _ "k8s.io/kubernetes/pkg/features" -) - -func TestMain(m *testing.M) { - utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run) -} diff --git a/pkg/controller/daemon/util/BUILD b/pkg/controller/daemon/util/BUILD index 0292ffce296..650f26f94b7 100644 --- a/pkg/controller/daemon/util/BUILD +++ b/pkg/controller/daemon/util/BUILD @@ -13,14 +13,11 @@ go_library( deps = [ "//pkg/api/v1/pod:go_default_library", "//pkg/apis/core/v1/helper:go_default_library", - "//pkg/features:go_default_library", - "//pkg/kubelet/types:go_default_library", "//pkg/scheduler/api:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/extensions/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", ], ) @@ -39,10 +36,7 @@ filegroup( go_test( name = "go_default_test", - srcs = [ - "daemonset_util_test.go", - "main_test.go", - ], + srcs = ["daemonset_util_test.go"], embed = [":go_default_library"], deps = [ "//pkg/api/testapi:go_default_library", diff --git a/pkg/controller/daemon/util/daemonset_util.go b/pkg/controller/daemon/util/daemonset_util.go index c75334333e5..bde65a031d2 100644 --- a/pkg/controller/daemon/util/daemonset_util.go +++ b/pkg/controller/daemon/util/daemonset_util.go @@ -24,11 +24,8 @@ import ( "k8s.io/api/core/v1" extensions "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - utilfeature "k8s.io/apiserver/pkg/util/feature" podutil "k8s.io/kubernetes/pkg/api/v1/pod" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" - "k8s.io/kubernetes/pkg/features" - kubelettypes "k8s.io/kubernetes/pkg/kubelet/types" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" ) @@ -49,7 +46,7 @@ func GetTemplateGeneration(ds *apps.DaemonSet) (*int64, error) { } // AddOrUpdateDaemonPodTolerations apply necessary tolerations to DeamonSet Pods, e.g. node.kubernetes.io/not-ready:NoExecute. -func AddOrUpdateDaemonPodTolerations(spec *v1.PodSpec, isCritical bool) { +func AddOrUpdateDaemonPodTolerations(spec *v1.PodSpec) { // DaemonSet pods shouldn't be deleted by NodeController in case of node problems. // Add infinite toleration for taint notReady:NoExecute here // to survive taint-based eviction enforced by NodeController @@ -71,8 +68,7 @@ func AddOrUpdateDaemonPodTolerations(spec *v1.PodSpec, isCritical bool) { }) // According to TaintNodesByCondition feature, all DaemonSet pods should tolerate - // MemoryPressure, DisPressure, Unschedulable and NetworkUnavailable taints, - // and the critical pods should tolerate OutOfDisk taint. + // MemoryPressure, DiskPressure, PIDPressure, Unschedulable and NetworkUnavailable taints. v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{ Key: schedulerapi.TaintNodeDiskPressure, Operator: v1.TolerationOpExists, @@ -85,6 +81,12 @@ func AddOrUpdateDaemonPodTolerations(spec *v1.PodSpec, isCritical bool) { Effect: v1.TaintEffectNoSchedule, }) + v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{ + Key: schedulerapi.TaintNodePIDPressure, + Operator: v1.TolerationOpExists, + Effect: v1.TaintEffectNoSchedule, + }) + v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{ Key: schedulerapi.TaintNodeUnschedulable, Operator: v1.TolerationOpExists, @@ -98,33 +100,15 @@ func AddOrUpdateDaemonPodTolerations(spec *v1.PodSpec, isCritical bool) { Effect: v1.TaintEffectNoSchedule, }) } - - // TODO(#48843) OutOfDisk taints will be removed in 1.10 - if isCritical { - v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{ - Key: schedulerapi.TaintNodeOutOfDisk, - Operator: v1.TolerationOpExists, - Effect: v1.TaintEffectNoExecute, - }) - v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{ - Key: schedulerapi.TaintNodeOutOfDisk, - Operator: v1.TolerationOpExists, - Effect: v1.TaintEffectNoSchedule, - }) - } } // CreatePodTemplate returns copy of provided template with additional // label which contains templateGeneration (for backward compatibility), // hash of provided template and sets default daemon tolerations. -func CreatePodTemplate(ns string, template v1.PodTemplateSpec, generation *int64, hash string) v1.PodTemplateSpec { +func CreatePodTemplate(template v1.PodTemplateSpec, generation *int64, hash string) v1.PodTemplateSpec { newTemplate := *template.DeepCopy() - // TODO(k82cn): when removing CritialPod feature, also remove 'ns' parameter. - isCritical := utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalCriticalPodAnnotation) && - kubelettypes.IsCritical(ns, newTemplate.Annotations) - - AddOrUpdateDaemonPodTolerations(&newTemplate.Spec, isCritical) + AddOrUpdateDaemonPodTolerations(&newTemplate.Spec) if newTemplate.ObjectMeta.Labels == nil { newTemplate.ObjectMeta.Labels = make(map[string]string) diff --git a/pkg/controller/daemon/util/daemonset_util_test.go b/pkg/controller/daemon/util/daemonset_util_test.go index 24165a8ce04..be0e45f04a8 100644 --- a/pkg/controller/daemon/util/daemonset_util_test.go +++ b/pkg/controller/daemon/util/daemonset_util_test.go @@ -25,6 +25,7 @@ import ( extensions "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilfeature "k8s.io/apiserver/pkg/util/feature" + utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/features" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" @@ -154,7 +155,7 @@ func TestCreatePodTemplate(t *testing.T) { } for _, test := range tests { podTemplateSpec := v1.PodTemplateSpec{} - newPodTemplate := CreatePodTemplate("", podTemplateSpec, test.templateGeneration, test.hash) + newPodTemplate := CreatePodTemplate(podTemplateSpec, test.templateGeneration, test.hash) val, exists := newPodTemplate.ObjectMeta.Labels[extensions.DaemonSetTemplateGenerationKey] if !exists || val != fmt.Sprint(*test.templateGeneration) { t.Errorf("Expected podTemplateSpec to have generation label value: %d, got: %s", *test.templateGeneration, val) @@ -482,17 +483,12 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) { func forEachFeatureGate(t *testing.T, tf func(t *testing.T), gates ...utilfeature.Feature) { for _, fg := range gates { - func() { - enabled := utilfeature.DefaultFeatureGate.Enabled(fg) - defer func() { - utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, enabled)) - }() - - for _, f := range []bool{true, false} { - utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, f)) + for _, f := range []bool{true, false} { + func() { + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, fg, f)() t.Run(fmt.Sprintf("%v (%t)", fg, f), tf) - } - }() + }() + } } } diff --git a/pkg/controller/podautoscaler/BUILD b/pkg/controller/podautoscaler/BUILD index 7ff398dea32..025f5921018 100644 --- a/pkg/controller/podautoscaler/BUILD +++ b/pkg/controller/podautoscaler/BUILD @@ -5,7 +5,7 @@ go_library( srcs = [ "doc.go", "horizontal.go", - "rate_limitters.go", + "rate_limiters.go", "replica_calculator.go", ], importpath = "k8s.io/kubernetes/pkg/controller/podautoscaler", diff --git a/pkg/controller/podautoscaler/horizontal.go b/pkg/controller/podautoscaler/horizontal.go index b043564abe8..fe0feb34497 100644 --- a/pkg/controller/podautoscaler/horizontal.go +++ b/pkg/controller/podautoscaler/horizontal.go @@ -179,7 +179,7 @@ func (a *HorizontalController) enqueueHPA(obj interface{}) { return } - // always add rate-limitted so we don't fetch metrics more that once per resync interval + // always add rate-limited so we don't fetch metrics more that once per resync interval a.queue.AddRateLimited(key) } diff --git a/pkg/controller/podautoscaler/rate_limitters.go b/pkg/controller/podautoscaler/rate_limiters.go similarity index 94% rename from pkg/controller/podautoscaler/rate_limitters.go rename to pkg/controller/podautoscaler/rate_limiters.go index 06e36ec40a8..915cd5c151c 100644 --- a/pkg/controller/podautoscaler/rate_limitters.go +++ b/pkg/controller/podautoscaler/rate_limiters.go @@ -46,7 +46,7 @@ func (r *FixedItemIntervalRateLimiter) NumRequeues(item interface{}) int { func (r *FixedItemIntervalRateLimiter) Forget(item interface{}) { } -// NewDefaultHPARateLimiter creates a rate limitter which limits overall (as per the +// NewDefaultHPARateLimiter creates a rate limiter which limits overall (as per the // default controller rate limiter), as well as per the resync interval func NewDefaultHPARateLimiter(interval time.Duration) workqueue.RateLimiter { return NewFixedItemIntervalRateLimiter(interval) diff --git a/pkg/controller/service/service_controller_test.go b/pkg/controller/service/service_controller_test.go index bbc4d24aae8..c1563b8f921 100644 --- a/pkg/controller/service/service_controller_test.go +++ b/pkg/controller/service/service_controller_test.go @@ -43,7 +43,7 @@ func newService(name string, uid types.UID, serviceType v1.ServiceType) *v1.Serv return &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: "default", UID: uid, SelfLink: testapi.Default.SelfLink("services", name)}, Spec: v1.ServiceSpec{Type: serviceType}} } -//Wrap newService so that you dont have to call default argumetns again and again. +//Wrap newService so that you don't have to call default arguments again and again. func defaultExternalService() *v1.Service { return newService("external-balancer", types.UID("123"), v1.ServiceTypeLoadBalancer) @@ -325,8 +325,6 @@ func TestGetNodeConditionPredicate(t *testing.T) { } } -// TODO(a-robinson): Add tests for update/sync/delete. - func TestProcessServiceUpdate(t *testing.T) { var controller *ServiceController @@ -416,7 +414,7 @@ func TestProcessServiceUpdate(t *testing.T) { } // TestConflictWhenProcessServiceUpdate tests if processServiceUpdate will -// retry creating the load balancer if the update operation returns a conflict +// retry creating the load balancer when the update operation returns a conflict // error. func TestConflictWhenProcessServiceUpdate(t *testing.T) { svcName := "conflict-lb" @@ -462,15 +460,14 @@ func TestSyncService(t *testing.T) { key: "invalid/key/string", updateFn: func() { controller, _, _ = newController() - }, expectedFn: func(e error) error { - //TODO: Expected error is of the format fmt.Errorf("unexpected key format: %q", "invalid/key/string"), - //TODO: should find a way to test for dependent package errors in such a way that it wont break + //TODO: should find a way to test for dependent package errors in such a way that it won't break //TODO: our tests, currently we only test if there is an error. - //Error should be non-nil - if e == nil { - return fmt.Errorf("Expected=unexpected key format: %q, Obtained=nil", "invalid/key/string") + //Error should be unexpected key format: "invalid/key/string" + expectedError := fmt.Sprintf("unexpected key format: %q", "invalid/key/string") + if e == nil || e.Error() != expectedError { + return fmt.Errorf("Expected=unexpected key format: %q, Obtained=%v", "invalid/key/string", e) } return nil }, @@ -536,11 +533,11 @@ func TestProcessServiceDeletion(t *testing.T) { testCases := []struct { testName string - updateFn func(*ServiceController) // Update function used to manupulate srv and controller values + updateFn func(*ServiceController) // Update function used to manipulate srv and controller values expectedFn func(svcErr error) error // Function to check if the returned value is expected }{ { - testName: "If an non-existent service is deleted", + testName: "If a non-existent service is deleted", updateFn: func(controller *ServiceController) { // Does not do anything }, @@ -717,7 +714,7 @@ func TestDoesExternalLoadBalancerNeedsUpdate(t *testing.T) { } } -//All the testcases for ServiceCache uses a single cache, these below test cases should be run in order, +//All the test cases for ServiceCache uses a single cache, these below test cases should be run in order, //as tc1 (addCache would add elements to the cache) //and tc2 (delCache would remove element from the cache without it adding automatically) //Please keep this in mind while adding new test cases. @@ -827,7 +824,7 @@ func TestServiceCache(t *testing.T) { } } -//Test a utility functions as its not easy to unit test nodeSyncLoop directly +//Test a utility functions as it's not easy to unit test nodeSyncLoop directly func TestNodeSlicesEqualForLB(t *testing.T) { numNodes := 10 nArray := make([]*v1.Node, numNodes) diff --git a/pkg/controller/volume/persistentvolume/BUILD b/pkg/controller/volume/persistentvolume/BUILD index ae16c23f113..aac87f389d9 100644 --- a/pkg/controller/volume/persistentvolume/BUILD +++ b/pkg/controller/volume/persistentvolume/BUILD @@ -70,7 +70,6 @@ go_test( "delete_test.go", "framework_test.go", "index_test.go", - "main_test.go", "provision_test.go", "pv_controller_test.go", "recycle_test.go", diff --git a/pkg/controller/volume/persistentvolume/index_test.go b/pkg/controller/volume/persistentvolume/index_test.go index 8729891c2c7..8955e850768 100644 --- a/pkg/controller/volume/persistentvolume/index_test.go +++ b/pkg/controller/volume/persistentvolume/index_test.go @@ -1161,19 +1161,20 @@ func TestVolumeModeCheck(t *testing.T) { } for name, scenario := range scenarios { - recover := utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BlockVolume, scenario.enableBlock) - expectedMismatch, err := checkVolumeModeMismatches(&scenario.pvc.Spec, &scenario.vol.Spec) - if err != nil { - t.Errorf("Unexpected failure for checkVolumeModeMismatches: %v", err) - } - // expected to match but either got an error or no returned pvmatch - if expectedMismatch && !scenario.isExpectedMismatch { - t.Errorf("Unexpected failure for scenario, expected not to mismatch on modes but did: %s", name) - } - if !expectedMismatch && scenario.isExpectedMismatch { - t.Errorf("Unexpected failure for scenario, did not mismatch on mode when expected to mismatch: %s", name) - } - recover() + t.Run(name, func(t *testing.T) { + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BlockVolume, scenario.enableBlock)() + expectedMismatch, err := checkVolumeModeMismatches(&scenario.pvc.Spec, &scenario.vol.Spec) + if err != nil { + t.Errorf("Unexpected failure for checkVolumeModeMismatches: %v", err) + } + // expected to match but either got an error or no returned pvmatch + if expectedMismatch && !scenario.isExpectedMismatch { + t.Errorf("Unexpected failure for scenario, expected not to mismatch on modes but did: %s", name) + } + if !expectedMismatch && scenario.isExpectedMismatch { + t.Errorf("Unexpected failure for scenario, did not mismatch on mode when expected to mismatch: %s", name) + } + }) } } @@ -1252,23 +1253,24 @@ func TestFilteringVolumeModes(t *testing.T) { } for name, scenario := range scenarios { - recover := utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BlockVolume, scenario.enableBlock) - pvmatch, err := scenario.vol.findBestMatchForClaim(scenario.pvc, false) - // expected to match but either got an error or no returned pvmatch - if pvmatch == nil && scenario.isExpectedMatch { - t.Errorf("Unexpected failure for scenario, no matching volume: %s", name) - } - if err != nil && scenario.isExpectedMatch { - t.Errorf("Unexpected failure for scenario: %s - %+v", name, err) - } - // expected to not match but either got an error or a returned pvmatch - if pvmatch != nil && !scenario.isExpectedMatch { - t.Errorf("Unexpected failure for scenario, expected no matching volume: %s", name) - } - if err != nil && !scenario.isExpectedMatch { - t.Errorf("Unexpected failure for scenario: %s - %+v", name, err) - } - recover() + t.Run(name, func(t *testing.T) { + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BlockVolume, scenario.enableBlock)() + pvmatch, err := scenario.vol.findBestMatchForClaim(scenario.pvc, false) + // expected to match but either got an error or no returned pvmatch + if pvmatch == nil && scenario.isExpectedMatch { + t.Errorf("Unexpected failure for scenario, no matching volume: %s", name) + } + if err != nil && scenario.isExpectedMatch { + t.Errorf("Unexpected failure for scenario: %s - %+v", name, err) + } + // expected to not match but either got an error or a returned pvmatch + if pvmatch != nil && !scenario.isExpectedMatch { + t.Errorf("Unexpected failure for scenario, expected no matching volume: %s", name) + } + if err != nil && !scenario.isExpectedMatch { + t.Errorf("Unexpected failure for scenario: %s - %+v", name, err) + } + }) } } diff --git a/pkg/controller/volume/persistentvolume/main_test.go b/pkg/controller/volume/persistentvolume/main_test.go deleted file mode 100644 index 490540aa60b..00000000000 --- a/pkg/controller/volume/persistentvolume/main_test.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package persistentvolume - -import ( - "testing" - - utilfeature "k8s.io/apiserver/pkg/util/feature" - utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" - _ "k8s.io/kubernetes/pkg/features" -) - -func TestMain(m *testing.M) { - utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run) -} diff --git a/pkg/controller/volume/persistentvolume/scheduler_assume_cache.go b/pkg/controller/volume/persistentvolume/scheduler_assume_cache.go index 292ee8f6ff2..2cc50a91a5f 100644 --- a/pkg/controller/volume/persistentvolume/scheduler_assume_cache.go +++ b/pkg/controller/volume/persistentvolume/scheduler_assume_cache.go @@ -377,8 +377,7 @@ func (c *pvAssumeCache) ListPVs(storageClassName string) []*v1.PersistentVolume type PVCAssumeCache interface { AssumeCache - // GetPVC returns the PVC from the cache with the same - // namespace and the same name of the specified pod. + // GetPVC returns the PVC from the cache with given pvcKey. // pvcKey is the result of MetaNamespaceKeyFunc on PVC obj GetPVC(pvcKey string) (*v1.PersistentVolumeClaim, error) } diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go index cd35ecb7044..34d9e6713cd 100644 --- a/pkg/features/kube_features.go +++ b/pkg/features/kube_features.go @@ -396,7 +396,7 @@ const ( ) func init() { - utilfeature.DefaultFeatureGate.Add(defaultKubernetesFeatureGates) + utilfeature.DefaultMutableFeatureGate.Add(defaultKubernetesFeatureGates) } // defaultKubernetesFeatureGates consists of all known Kubernetes-specific feature keys. @@ -455,7 +455,7 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS ResourceQuotaScopeSelectors: {Default: true, PreRelease: utilfeature.Beta}, CSIBlockVolume: {Default: false, PreRelease: utilfeature.Alpha}, RuntimeClass: {Default: false, PreRelease: utilfeature.Alpha}, - NodeLease: {Default: false, PreRelease: utilfeature.Alpha}, + NodeLease: {Default: true, PreRelease: utilfeature.Beta}, SCTPSupport: {Default: false, PreRelease: utilfeature.Alpha}, VolumeSnapshotDataSource: {Default: false, PreRelease: utilfeature.Alpha}, ProcMountType: {Default: false, PreRelease: utilfeature.Alpha}, diff --git a/pkg/kubeapiserver/doc.go b/pkg/kubeapiserver/doc.go index aa18901607b..fefb1cc6ebe 100644 --- a/pkg/kubeapiserver/doc.go +++ b/pkg/kubeapiserver/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// The kubapiserver package holds code that is common to both the kube-apiserver +// The kubeapiserver package holds code that is common to both the kube-apiserver // and the federation-apiserver, but isn't part of a generic API server. // For instance, the non-delegated authorization options are used by those two // servers, but no generic API server is likely to use them. diff --git a/pkg/kubectl/.import-restrictions b/pkg/kubectl/.import-restrictions index 6cc204640d3..72be55c1893 100644 --- a/pkg/kubectl/.import-restrictions +++ b/pkg/kubectl/.import-restrictions @@ -121,7 +121,7 @@ "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates", "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util", "k8s.io/kubernetes/pkg/scheduler/api", - "k8s.io/kubernetes/pkg/scheduler/cache", + "k8s.io/kubernetes/pkg/scheduler/nodeinfo", "k8s.io/kubernetes/pkg/scheduler/internal/cache", "k8s.io/kubernetes/pkg/scheduler/util", "k8s.io/kubernetes/pkg/scheduler/volumebinder", diff --git a/pkg/kubectl/cmd/annotate/annotate.go b/pkg/kubectl/cmd/annotate/annotate.go index 56fe5603fb4..dedc9e77ec9 100644 --- a/pkg/kubectl/cmd/annotate/annotate.go +++ b/pkg/kubectl/cmd/annotate/annotate.go @@ -109,6 +109,7 @@ var ( kubectl annotate pods foo description-`)) ) +// NewAnnotateOptions creates the options for annotate func NewAnnotateOptions(ioStreams genericclioptions.IOStreams) *AnnotateOptions { return &AnnotateOptions{ PrintFlags: genericclioptions.NewPrintFlags("annotated").WithTypeSetter(scheme.Scheme), @@ -119,6 +120,7 @@ func NewAnnotateOptions(ioStreams genericclioptions.IOStreams) *AnnotateOptions } } +// NewCmdAnnotate creates the `annotate` command func NewCmdAnnotate(parent string, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command { o := NewAnnotateOptions(ioStreams) @@ -126,7 +128,7 @@ func NewCmdAnnotate(parent string, f cmdutil.Factory, ioStreams genericclioption Use: "annotate [--overwrite] (-f FILENAME | TYPE NAME) KEY_1=VAL_1 ... KEY_N=VAL_N [--resource-version=version]", DisableFlagsInUseLine: true, Short: i18n.T("Update the annotations on a resource"), - Long: annotateLong + "\n\n" + cmdutil.SuggestApiResources(parent), + Long: annotateLong + "\n\n" + cmdutil.SuggestAPIResources(parent), Example: annotateExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd, args)) diff --git a/pkg/kubectl/cmd/apiresources/apiresources.go b/pkg/kubectl/cmd/apiresources/apiresources.go index 2e3d90bf239..54da3c981b5 100644 --- a/pkg/kubectl/cmd/apiresources/apiresources.go +++ b/pkg/kubectl/cmd/apiresources/apiresources.go @@ -51,9 +51,9 @@ var ( kubectl api-resources --api-group=extensions`) ) -// ApiResourcesOptions is the start of the data required to perform the operation. As new fields are added, add them here instead of -// referencing the cmd.Flags() -type ApiResourcesOptions struct { +// APIResourceOptions is the start of the data required to perform the operation. +// As new fields are added, add them here instead of referencing the cmd.Flags() +type APIResourceOptions struct { Output string APIGroup string Namespaced bool @@ -70,8 +70,9 @@ type groupResource struct { APIResource metav1.APIResource } -func NewAPIResourceOptions(ioStreams genericclioptions.IOStreams) *ApiResourcesOptions { - return &ApiResourcesOptions{ +// NewAPIResourceOptions creates the options for APIResource +func NewAPIResourceOptions(ioStreams genericclioptions.IOStreams) *APIResourceOptions { + return &APIResourceOptions{ IOStreams: ioStreams, Namespaced: true, } @@ -89,7 +90,7 @@ func NewCmdAPIResources(f cmdutil.Factory, ioStreams genericclioptions.IOStreams Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(cmd, args)) cmdutil.CheckErr(o.Validate()) - cmdutil.CheckErr(o.RunApiResources(cmd, f)) + cmdutil.CheckErr(o.RunAPIResources(cmd, f)) }, } @@ -103,7 +104,8 @@ func NewCmdAPIResources(f cmdutil.Factory, ioStreams genericclioptions.IOStreams return cmd } -func (o *ApiResourcesOptions) Validate() error { +// Validate checks to the APIResourceOptions to see if there is sufficient information run the command +func (o *APIResourceOptions) Validate() error { supportedOutputTypes := sets.NewString("", "wide", "name") if !supportedOutputTypes.Has(o.Output) { return fmt.Errorf("--output %v is not available", o.Output) @@ -111,14 +113,16 @@ func (o *ApiResourcesOptions) Validate() error { return nil } -func (o *ApiResourcesOptions) Complete(cmd *cobra.Command, args []string) error { +// Complete adapts from the command line args and validates them +func (o *APIResourceOptions) Complete(cmd *cobra.Command, args []string) error { if len(args) != 0 { return cmdutil.UsageErrorf(cmd, "unexpected arguments: %v", args) } return nil } -func (o *ApiResourcesOptions) RunApiResources(cmd *cobra.Command, f cmdutil.Factory) error { +// RunAPIResources does the work +func (o *APIResourceOptions) RunAPIResources(cmd *cobra.Command, f cmdutil.Factory) error { w := printers.GetNewTabWriter(o.Out) defer w.Flush() diff --git a/pkg/kubectl/cmd/apiresources/apiversions.go b/pkg/kubectl/cmd/apiresources/apiversions.go index 738aa51b6b4..e12897c444c 100644 --- a/pkg/kubectl/cmd/apiresources/apiversions.go +++ b/pkg/kubectl/cmd/apiresources/apiversions.go @@ -36,21 +36,23 @@ var ( kubectl api-versions`)) ) -type ApiVersionsOptions struct { +// APIVersionsOptions have the data required for API versions +type APIVersionsOptions struct { discoveryClient discovery.CachedDiscoveryInterface genericclioptions.IOStreams } -func NewApiVersionsOptions(ioStreams genericclioptions.IOStreams) *ApiVersionsOptions { - return &ApiVersionsOptions{ +// NewAPIVersionsOptions creates the options for APIVersions +func NewAPIVersionsOptions(ioStreams genericclioptions.IOStreams) *APIVersionsOptions { + return &APIVersionsOptions{ IOStreams: ioStreams, } } // NewCmdAPIVersions creates the `api-versions` command func NewCmdAPIVersions(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command { - o := NewApiVersionsOptions(ioStreams) + o := NewAPIVersionsOptions(ioStreams) cmd := &cobra.Command{ Use: "api-versions", Short: "Print the supported API versions on the server, in the form of \"group/version\"", @@ -58,13 +60,14 @@ func NewCmdAPIVersions(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) Example: apiversionsExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd, args)) - cmdutil.CheckErr(o.RunApiVersions()) + cmdutil.CheckErr(o.RunAPIVersions()) }, } return cmd } -func (o *ApiVersionsOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { +// Complete adapts from the command line args and factory to the data required +func (o *APIVersionsOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { if len(args) != 0 { return cmdutil.UsageErrorf(cmd, "unexpected arguments: %v", args) } @@ -76,13 +79,14 @@ func (o *ApiVersionsOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, arg return nil } -func (o *ApiVersionsOptions) RunApiVersions() error { +// RunAPIVersions does the work +func (o *APIVersionsOptions) RunAPIVersions() error { // Always request fresh data from the server o.discoveryClient.Invalidate() groupList, err := o.discoveryClient.ServerGroups() if err != nil { - return fmt.Errorf("Couldn't get available api versions from server: %v\n", err) + return fmt.Errorf("couldn't get available api versions from server: %v", err) } apiVersions := metav1.ExtractGroupVersions(groupList) sort.Strings(apiVersions) diff --git a/pkg/kubectl/cmd/apply/apply.go b/pkg/kubectl/cmd/apply/apply.go index 08919f23b2b..acf97e30345 100644 --- a/pkg/kubectl/cmd/apply/apply.go +++ b/pkg/kubectl/cmd/apply/apply.go @@ -17,6 +17,7 @@ limitations under the License. package apply import ( + "encoding/json" "fmt" "io" "strings" @@ -72,7 +73,7 @@ type ApplyOptions struct { cmdBaseName string All bool Overwrite bool - OpenApiPatch bool + OpenAPIPatch bool PruneWhitelist []string ShouldIncludeUninitialized bool @@ -92,7 +93,7 @@ type ApplyOptions struct { const ( // maxPatchRetry is the maximum number of conflicts retry for during a patch operation before returning failure maxPatchRetry = 5 - // backOffPeriod is the period to back off when apply patch resutls in error. + // backOffPeriod is the period to back off when apply patch results in error. backOffPeriod = 1 * time.Second // how many times we can retry before back off triesBeforeBackOff = 1 @@ -132,7 +133,7 @@ func NewApplyOptions(ioStreams genericclioptions.IOStreams) *ApplyOptions { PrintFlags: genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme), Overwrite: true, - OpenApiPatch: true, + OpenAPIPatch: true, Recorder: genericclioptions.NoopRecorder{}, @@ -140,6 +141,7 @@ func NewApplyOptions(ioStreams genericclioptions.IOStreams) *ApplyOptions { } } +// NewCmdApply creates the `apply` command func NewCmdApply(baseName string, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command { o := NewApplyOptions(ioStreams) @@ -173,7 +175,7 @@ func NewCmdApply(baseName string, f cmdutil.Factory, ioStreams genericclioptions cmd.Flags().StringVarP(&o.Selector, "selector", "l", o.Selector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)") cmd.Flags().BoolVar(&o.All, "all", o.All, "Select all resources in the namespace of the specified resource types.") cmd.Flags().StringArrayVar(&o.PruneWhitelist, "prune-whitelist", o.PruneWhitelist, "Overwrite the default whitelist with for --prune") - cmd.Flags().BoolVar(&o.OpenApiPatch, "openapi-patch", o.OpenApiPatch, "If true, use openapi to calculate diff when the openapi presents and the resource can be found in the openapi spec. Otherwise, fall back to use baked-in types.") + cmd.Flags().BoolVar(&o.OpenAPIPatch, "openapi-patch", o.OpenAPIPatch, "If true, use openapi to calculate diff when the openapi presents and the resource can be found in the openapi spec. Otherwise, fall back to use baked-in types.") cmd.Flags().BoolVar(&o.ServerDryRun, "server-dry-run", o.ServerDryRun, "If true, request will be sent to server with dry-run flag, which means the modifications won't be persisted. This is an alpha feature and flag.") cmdutil.AddDryRunFlag(cmd) cmdutil.AddIncludeUninitializedFlag(cmd) @@ -257,7 +259,7 @@ func validatePruneAll(prune, all bool, selector string) error { return fmt.Errorf("cannot set --all and --selector at the same time") } if prune && !all && selector == "" { - return fmt.Errorf("all resources selected for prune without explicitly passing --all. To prune all resources, pass the --all flag. If you did not mean to prune all resources, specify a label selector.") + return fmt.Errorf("all resources selected for prune without explicitly passing --all. To prune all resources, pass the --all flag. If you did not mean to prune all resources, specify a label selector") } return nil } @@ -295,7 +297,7 @@ func parsePruneResources(mapper meta.RESTMapper, gvks []string) ([]pruneResource func (o *ApplyOptions) Run() error { var openapiSchema openapi.Resources - if o.OpenApiPatch { + if o.OpenAPIPatch { openapiSchema = o.OpenAPISchema } @@ -342,6 +344,13 @@ func (o *ApplyOptions) Run() error { return err } + // If server-dry-run is requested but the type doesn't support it, fail right away. + if o.ServerDryRun { + if err := dryRunVerifier.HasSupport(info.Mapping.GroupVersionKind); err != nil { + return err + } + } + if info.Namespaced() { visitedNamespaces.Insert(info.Namespace) } @@ -365,12 +374,6 @@ func (o *ApplyOptions) Run() error { if !errors.IsNotFound(err) { return cmdutil.AddSourceToErr(fmt.Sprintf("retrieving current configuration of:\n%s\nfrom server for:", info.String()), info.Source, err) } - // If server-dry-run is requested but the type doesn't support it, fail right away. - if o.ServerDryRun { - if err := dryRunVerifier.HasSupport(info.Mapping.GroupVersionKind); err != nil { - return err - } - } // Create the resource if it doesn't exist // First, update the annotation used by kubectl apply @@ -436,6 +439,7 @@ func (o *ApplyOptions) Run() error { GracePeriod: o.DeleteOptions.GracePeriod, ServerDryRun: o.ServerDryRun, OpenapiSchema: openapiSchema, + Retries: maxPatchRetry, } patchBytes, patchedObject, err := patcher.Patch(info.Object, modified, info.Source, info.Namespace, info.Name, o.ErrOut) @@ -699,6 +703,12 @@ type Patcher struct { GracePeriod int ServerDryRun bool + // If set, forces the patch against a specific resourceVersion + ResourceVersion *string + + // Number of retries to make if the patch fails with conflict + Retries int + OpenapiSchema openapi.Resources } @@ -741,6 +751,22 @@ func (v *DryRunVerifier) HasSupport(gvk schema.GroupVersionKind) error { return nil } +func addResourceVersion(patch []byte, rv string) ([]byte, error) { + var patchMap map[string]interface{} + err := json.Unmarshal(patch, &patchMap) + if err != nil { + return nil, err + } + u := unstructured.Unstructured{Object: patchMap} + a, err := meta.Accessor(&u) + if err != nil { + return nil, err + } + a.SetResourceVersion(rv) + + return json.Marshal(patchMap) +} + func (p *Patcher) patchSimple(obj runtime.Object, modified []byte, source, namespace, name string, errOut io.Writer) ([]byte, runtime.Object, error) { // Serialize the current configuration of the object from the server. current, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) @@ -812,6 +838,13 @@ func (p *Patcher) patchSimple(obj runtime.Object, modified []byte, source, names return patch, obj, nil } + if p.ResourceVersion != nil { + patch, err = addResourceVersion(patch, *p.ResourceVersion) + if err != nil { + return nil, nil, cmdutil.AddSourceToErr("Failed to insert resourceVersion in patch", source, err) + } + } + options := metav1.UpdateOptions{} if p.ServerDryRun { options.DryRun = []string{metav1.DryRunAll} @@ -824,7 +857,10 @@ func (p *Patcher) patchSimple(obj runtime.Object, modified []byte, source, names func (p *Patcher) Patch(current runtime.Object, modified []byte, source, namespace, name string, errOut io.Writer) ([]byte, runtime.Object, error) { var getErr error patchBytes, patchObject, err := p.patchSimple(current, modified, source, namespace, name, errOut) - for i := 1; i <= maxPatchRetry && errors.IsConflict(err); i++ { + if p.Retries == 0 { + p.Retries = maxPatchRetry + } + for i := 1; i <= p.Retries && errors.IsConflict(err); i++ { if i > triesBeforeBackOff { p.BackOff.Sleep(backOffPeriod) } @@ -867,7 +903,7 @@ func (p *Patcher) deleteAndCreate(original runtime.Object, modified []byte, name // but still propagate and advertise error to user recreated, recreateErr := p.Helper.Create(namespace, true, original, &options) if recreateErr != nil { - err = fmt.Errorf("An error occurred force-replacing the existing object with the newly provided one:\n\n%v.\n\nAdditionally, an error occurred attempting to restore the original object:\n\n%v\n", err, recreateErr) + err = fmt.Errorf("An error occurred force-replacing the existing object with the newly provided one:\n\n%v.\n\nAdditionally, an error occurred attempting to restore the original object:\n\n%v", err, recreateErr) } else { createdObject = recreated } diff --git a/pkg/kubectl/cmd/apply/apply_set_last_applied.go b/pkg/kubectl/cmd/apply/apply_set_last_applied.go index 23699f407fc..722f9efedfe 100644 --- a/pkg/kubectl/cmd/apply/apply_set_last_applied.go +++ b/pkg/kubectl/cmd/apply/apply_set_last_applied.go @@ -162,9 +162,8 @@ func (o *SetLastAppliedOptions) Validate() error { if err := info.Get(); err != nil { if errors.IsNotFound(err) { return err - } else { - return cmdutil.AddSourceToErr(fmt.Sprintf("retrieving current configuration of:\n%s\nfrom server for:", info.String()), info.Source, err) } + return cmdutil.AddSourceToErr(fmt.Sprintf("retrieving current configuration of:\n%s\nfrom server for:", info.String()), info.Source, err) } originalBuf, err := kubectl.GetOriginalConfiguration(info.Object) if err != nil { diff --git a/pkg/kubectl/cmd/apply/apply_test.go b/pkg/kubectl/cmd/apply/apply_test.go index 3bd088fae96..6c21e92d65c 100644 --- a/pkg/kubectl/cmd/apply/apply_test.go +++ b/pkg/kubectl/cmd/apply/apply_test.go @@ -377,7 +377,7 @@ func TestRunApplyViewLastApplied(t *testing.T) { name: "view resource/name invalid format", filePath: "", outputFormat: "wide", - expectedErr: "error: Unexpected -o output mode: wide, the flag 'output' must be one of yaml|json\nSee 'view-last-applied -h' for help and examples.", + expectedErr: "error: Unexpected -o output mode: wide, the flag 'output' must be one of yaml|json\nSee 'view-last-applied -h' for help and examples", expectedOut: "", selector: "", args: []string{"replicationcontroller", "test-rc"}, @@ -648,7 +648,7 @@ func TestApplyRetry(t *testing.T) { case p == pathRC && m == "PATCH": if firstPatch { firstPatch = false - statusErr := kubeerr.NewConflict(schema.GroupResource{Group: "", Resource: "rc"}, "test-rc", fmt.Errorf("the object has been modified. Please apply at first.")) + statusErr := kubeerr.NewConflict(schema.GroupResource{Group: "", Resource: "rc"}, "test-rc", fmt.Errorf("the object has been modified. Please apply at first")) bodyBytes, _ := json.Marshal(statusErr) bodyErr := ioutil.NopCloser(bytes.NewReader(bodyBytes)) return &http.Response{StatusCode: http.StatusConflict, Header: cmdtesting.DefaultHeader(), Body: bodyErr}, nil @@ -1280,7 +1280,7 @@ func TestForceApply(t *testing.T) { case strings.HasSuffix(p, pathRC) && m == "PATCH": counts["patch"]++ if counts["patch"] <= 6 { - statusErr := kubeerr.NewConflict(schema.GroupResource{Group: "", Resource: "rc"}, "test-rc", fmt.Errorf("the object has been modified. Please apply at first.")) + statusErr := kubeerr.NewConflict(schema.GroupResource{Group: "", Resource: "rc"}, "test-rc", fmt.Errorf("the object has been modified. Please apply at first")) bodyBytes, _ := json.Marshal(statusErr) bodyErr := ioutil.NopCloser(bytes.NewReader(bodyBytes)) return &http.Response{StatusCode: http.StatusConflict, Header: cmdtesting.DefaultHeader(), Body: bodyErr}, nil diff --git a/pkg/kubectl/cmd/auth/auth.go b/pkg/kubectl/cmd/auth/auth.go index 3f1e5943d73..d53b30af6ff 100644 --- a/pkg/kubectl/cmd/auth/auth.go +++ b/pkg/kubectl/cmd/auth/auth.go @@ -23,6 +23,7 @@ import ( cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" ) +// NewCmdAuth returns an initialized Command instance for 'auth' sub command func NewCmdAuth(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { // Parent command to which all subcommands are added. cmds := &cobra.Command{ diff --git a/pkg/kubectl/cmd/auth/cani.go b/pkg/kubectl/cmd/auth/cani.go index 31ce080dd1a..e83e0da1636 100644 --- a/pkg/kubectl/cmd/auth/cani.go +++ b/pkg/kubectl/cmd/auth/cani.go @@ -80,6 +80,7 @@ var ( kubectl auth can-i get /logs/`) ) +// NewCmdCanI returns an initialized Command for 'auth can-i' sub command func NewCmdCanI(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { o := &CanIOptions{ IOStreams: streams, @@ -112,6 +113,7 @@ func NewCmdCanI(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.C return cmd } +// Complete completes all the required options func (o *CanIOptions) Complete(f cmdutil.Factory, args []string) error { if o.Quiet { o.Out = ioutil.Discard @@ -155,6 +157,7 @@ func (o *CanIOptions) Complete(f cmdutil.Factory, args []string) error { return nil } +// Validate makes sure provided values for CanIOptions are valid func (o *CanIOptions) Validate() error { if o.NonResourceURL != "" { if o.Subresource != "" { @@ -167,6 +170,7 @@ func (o *CanIOptions) Validate() error { return nil } +// RunAccessCheck checks if user has access to a certain resource or non resource URL func (o *CanIOptions) RunAccessCheck() (bool, error) { var sar *authorizationv1.SelfSubjectAccessReview if o.NonResourceURL == "" { diff --git a/pkg/kubectl/cmd/auth/reconcile.go b/pkg/kubectl/cmd/auth/reconcile.go index e2dafca743d..dfa84875cca 100644 --- a/pkg/kubectl/cmd/auth/reconcile.go +++ b/pkg/kubectl/cmd/auth/reconcile.go @@ -66,6 +66,7 @@ var ( kubectl auth reconcile -f my-rbac-rules.yaml`) ) +// NewReconcileOptions returns a new ReconcileOptions instance func NewReconcileOptions(ioStreams genericclioptions.IOStreams) *ReconcileOptions { return &ReconcileOptions{ FilenameOptions: &resource.FilenameOptions{}, @@ -74,6 +75,7 @@ func NewReconcileOptions(ioStreams genericclioptions.IOStreams) *ReconcileOption } } +// NewCmdReconcile holds the options for 'auth reconcile' sub command func NewCmdReconcile(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { o := NewReconcileOptions(streams) @@ -101,6 +103,7 @@ func NewCmdReconcile(f cmdutil.Factory, streams genericclioptions.IOStreams) *co return cmd } +// Complete completes all the required options func (o *ReconcileOptions) Complete(cmd *cobra.Command, f cmdutil.Factory, args []string) error { if len(args) > 0 { return errors.New("no arguments are allowed") @@ -149,6 +152,7 @@ func (o *ReconcileOptions) Complete(cmd *cobra.Command, f cmdutil.Factory, args return nil } +// Validate makes sure provided values for ReconcileOptions are valid func (o *ReconcileOptions) Validate() error { if o.Visitor == nil { return errors.New("ReconcileOptions.Visitor must be set") @@ -171,6 +175,7 @@ func (o *ReconcileOptions) Validate() error { return nil } +// RunReconcile performs the execution func (o *ReconcileOptions) RunReconcile() error { return o.Visitor.Visit(func(info *resource.Info, err error) error { if err != nil { diff --git a/pkg/kubectl/cmd/autoscale/autoscale.go b/pkg/kubectl/cmd/autoscale/autoscale.go index 9f59e7bbd82..d26a52b5592 100644 --- a/pkg/kubectl/cmd/autoscale/autoscale.go +++ b/pkg/kubectl/cmd/autoscale/autoscale.go @@ -66,7 +66,7 @@ type AutoscaleOptions struct { Generator string Min int32 Max int32 - CpuPercent int32 + CPUPercent int32 createAnnotation bool args []string @@ -120,7 +120,7 @@ func NewCmdAutoscale(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) * cmd.Flags().Int32Var(&o.Min, "min", -1, "The lower limit for the number of pods that can be set by the autoscaler. If it's not specified or negative, the server will apply a default value.") cmd.Flags().Int32Var(&o.Max, "max", -1, "The upper limit for the number of pods that can be set by the autoscaler. Required.") cmd.MarkFlagRequired("max") - cmd.Flags().Int32Var(&o.CpuPercent, "cpu-percent", -1, fmt.Sprintf("The target average CPU utilization (represented as a percent of requested CPU) over all the pods. If it's not specified or negative, a default autoscaling policy will be used.")) + cmd.Flags().Int32Var(&o.CPUPercent, "cpu-percent", -1, fmt.Sprintf("The target average CPU utilization (represented as a percent of requested CPU) over all the pods. If it's not specified or negative, a default autoscaling policy will be used.")) cmd.Flags().StringVar(&o.Name, "name", "", i18n.T("The name for the newly created object. If not specified, the name of the input resource will be used.")) cmdutil.AddDryRunFlag(cmd) cmdutil.AddFilenameOptionFlags(cmd, o.FilenameOptions, "identifying the resource to autoscale.") @@ -156,7 +156,7 @@ func (o *AutoscaleOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args Name: name, MinReplicas: o.Min, MaxReplicas: o.Max, - CPUPercent: o.CpuPercent, + CPUPercent: o.CPUPercent, ScaleRefName: name, ScaleRefKind: mapping.GroupVersionKind.Kind, ScaleRefAPIVersion: mapping.GroupVersionKind.GroupVersion().String(), diff --git a/pkg/kubectl/cmd/certificates/certificates.go b/pkg/kubectl/cmd/certificates/certificates.go index fabc8d60da7..fb537f2dd68 100644 --- a/pkg/kubectl/cmd/certificates/certificates.go +++ b/pkg/kubectl/cmd/certificates/certificates.go @@ -68,6 +68,14 @@ type CertificateOptions struct { genericclioptions.IOStreams } +// NewCertificateOptions creates the options for certificate +func NewCertificateOptions(ioStreams genericclioptions.IOStreams) *CertificateOptions { + return &CertificateOptions{ + PrintFlags: genericclioptions.NewPrintFlags("approved").WithTypeSetter(scheme.Scheme), + IOStreams: ioStreams, + } +} + func (o *CertificateOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { o.csrNames = args o.outputStyle = cmdutil.GetFlagString(cmd, "output") @@ -103,10 +111,8 @@ func (o *CertificateOptions) Validate() error { } func NewCmdCertificateApprove(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command { - options := CertificateOptions{ - PrintFlags: genericclioptions.NewPrintFlags("approved").WithTypeSetter(scheme.Scheme), - IOStreams: ioStreams, - } + o := NewCertificateOptions(ioStreams) + cmd := &cobra.Command{ Use: "approve (-f FILENAME | NAME)", DisableFlagsInUseLine: true, @@ -124,16 +130,16 @@ func NewCmdCertificateApprove(f cmdutil.Factory, ioStreams genericclioptions.IOS signed certificate can do. `), Run: func(cmd *cobra.Command, args []string) { - cmdutil.CheckErr(options.Complete(f, cmd, args)) - cmdutil.CheckErr(options.Validate()) - cmdutil.CheckErr(options.RunCertificateApprove(cmdutil.GetFlagBool(cmd, "force"))) + cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.Validate()) + cmdutil.CheckErr(o.RunCertificateApprove(cmdutil.GetFlagBool(cmd, "force"))) }, } - options.PrintFlags.AddFlags(cmd) + o.PrintFlags.AddFlags(cmd) cmd.Flags().Bool("force", false, "Update the CSR even if it is already approved.") - cmdutil.AddFilenameOptionFlags(cmd, &options.FilenameOptions, "identifying the resource to update") + cmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, "identifying the resource to update") return cmd } @@ -160,10 +166,8 @@ func (o *CertificateOptions) RunCertificateApprove(force bool) error { } func NewCmdCertificateDeny(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command { - options := CertificateOptions{ - PrintFlags: genericclioptions.NewPrintFlags("denied").WithTypeSetter(scheme.Scheme), - IOStreams: ioStreams, - } + o := NewCertificateOptions(ioStreams) + cmd := &cobra.Command{ Use: "deny (-f FILENAME | NAME)", DisableFlagsInUseLine: true, @@ -176,16 +180,16 @@ func NewCmdCertificateDeny(f cmdutil.Factory, ioStreams genericclioptions.IOStre not to issue a certificate to the requestor. `), Run: func(cmd *cobra.Command, args []string) { - cmdutil.CheckErr(options.Complete(f, cmd, args)) - cmdutil.CheckErr(options.Validate()) - cmdutil.CheckErr(options.RunCertificateDeny(cmdutil.GetFlagBool(cmd, "force"))) + cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.Validate()) + cmdutil.CheckErr(o.RunCertificateDeny(cmdutil.GetFlagBool(cmd, "force"))) }, } - options.PrintFlags.AddFlags(cmd) + o.PrintFlags.AddFlags(cmd) cmd.Flags().Bool("force", false, "Update the CSR even if it is already denied.") - cmdutil.AddFilenameOptionFlags(cmd, &options.FilenameOptions, "identifying the resource to update") + cmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, "identifying the resource to update") return cmd } @@ -211,13 +215,13 @@ func (o *CertificateOptions) RunCertificateDeny(force bool) error { }) } -func (options *CertificateOptions) modifyCertificateCondition(builder *resource.Builder, clientSet certificatesv1beta1client.CertificatesV1beta1Interface, force bool, modify func(csr *certificatesv1beta1.CertificateSigningRequest) (*certificatesv1beta1.CertificateSigningRequest, bool)) error { +func (o *CertificateOptions) modifyCertificateCondition(builder *resource.Builder, clientSet certificatesv1beta1client.CertificatesV1beta1Interface, force bool, modify func(csr *certificatesv1beta1.CertificateSigningRequest) (*certificatesv1beta1.CertificateSigningRequest, bool)) error { var found int r := builder. WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...). ContinueOnError(). - FilenameParam(false, &options.FilenameOptions). - ResourceNames("certificatesigningrequest", options.csrNames...). + FilenameParam(false, &o.FilenameOptions). + ResourceNames("certificatesigningrequest", o.csrNames...). RequireObject(true). Flatten(). Latest(). @@ -245,10 +249,10 @@ func (options *CertificateOptions) modifyCertificateCondition(builder *resource. } found++ - return options.PrintObj(info.Object, options.Out) + return o.PrintObj(info.Object, o.Out) }) if found == 0 { - fmt.Fprintf(options.Out, "No resources found\n") + fmt.Fprintf(o.Out, "No resources found\n") } return err } diff --git a/pkg/kubectl/cmd/clusterinfo/clusterinfo_dump.go b/pkg/kubectl/cmd/clusterinfo/clusterinfo_dump.go index c79e2a972f8..2873a95176a 100644 --- a/pkg/kubectl/cmd/clusterinfo/clusterinfo_dump.go +++ b/pkg/kubectl/cmd/clusterinfo/clusterinfo_dump.go @@ -61,7 +61,6 @@ type ClusterInfoDumpOptions struct { genericclioptions.IOStreams } -// NewCmdCreateSecret groups subcommands to create various types of secrets func NewCmdClusterInfoDump(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command { o := &ClusterInfoDumpOptions{ PrintFlags: genericclioptions.NewPrintFlags("").WithTypeSetter(scheme.Scheme).WithDefaultOutput("json"), diff --git a/pkg/kubectl/cmd/completion/completion.go b/pkg/kubectl/cmd/completion/completion.go index cd8fd36e0f1..855c8306ea6 100644 --- a/pkg/kubectl/cmd/completion/completion.go +++ b/pkg/kubectl/cmd/completion/completion.go @@ -44,7 +44,7 @@ const defaultBoilerPlate = ` ` var ( - completion_long = templates.LongDesc(i18n.T(` + completionLong = templates.LongDesc(i18n.T(` Output shell completion code for the specified shell (bash or zsh). The shell code must be evaluated to provide interactive completion of kubectl commands. This can be done by sourcing it from @@ -55,7 +55,7 @@ var ( Note for zsh users: [1] zsh completions are only supported in versions of zsh >= 5.2`)) - completion_example = templates.Examples(i18n.T(` + completionExample = templates.Examples(i18n.T(` # Installing bash completion on macOS using homebrew ## If running Bash 3.2 included with macOS brew install bash-completion @@ -86,7 +86,7 @@ var ( ) var ( - completion_shells = map[string]func(out io.Writer, boilerPlate string, cmd *cobra.Command) error{ + completionShells = map[string]func(out io.Writer, boilerPlate string, cmd *cobra.Command) error{ "bash": runCompletionBash, "zsh": runCompletionZsh, } @@ -94,7 +94,7 @@ var ( func NewCmdCompletion(out io.Writer, boilerPlate string) *cobra.Command { shells := []string{} - for s := range completion_shells { + for s := range completionShells { shells = append(shells, s) } @@ -102,8 +102,8 @@ func NewCmdCompletion(out io.Writer, boilerPlate string) *cobra.Command { Use: "completion SHELL", DisableFlagsInUseLine: true, Short: i18n.T("Output shell completion code for the specified shell (bash or zsh)"), - Long: completion_long, - Example: completion_example, + Long: completionLong, + Example: completionExample, Run: func(cmd *cobra.Command, args []string) { err := RunCompletion(out, boilerPlate, cmd, args) cmdutil.CheckErr(err) @@ -121,7 +121,7 @@ func RunCompletion(out io.Writer, boilerPlate string, cmd *cobra.Command, args [ if len(args) > 1 { return cmdutil.UsageErrorf(cmd, "Too many arguments. Expected only the shell type.") } - run, found := completion_shells[args[0]] + run, found := completionShells[args[0]] if !found { return cmdutil.UsageErrorf(cmd, "Unsupported shell type %q.", args[0]) } @@ -141,9 +141,9 @@ func runCompletionBash(out io.Writer, boilerPlate string, kubectl *cobra.Command } func runCompletionZsh(out io.Writer, boilerPlate string, kubectl *cobra.Command) error { - zsh_head := "#compdef kubectl\n" + zshHead := "#compdef kubectl\n" - out.Write([]byte(zsh_head)) + out.Write([]byte(zshHead)) if len(boilerPlate) == 0 { boilerPlate = defaultBoilerPlate @@ -152,7 +152,7 @@ func runCompletionZsh(out io.Writer, boilerPlate string, kubectl *cobra.Command) return err } - zsh_initialization := ` + zshInitialization := ` __kubectl_bash_source() { alias shopt=':' alias _expand=_bash_expand @@ -294,19 +294,19 @@ __kubectl_convert_bash_to_zsh() { -e "s/\\\$(type${RWORD}/\$(__kubectl_type/g" \ <<'BASH_COMPLETION_EOF' ` - out.Write([]byte(zsh_initialization)) + out.Write([]byte(zshInitialization)) buf := new(bytes.Buffer) kubectl.GenBashCompletion(buf) out.Write(buf.Bytes()) - zsh_tail := ` + zshTail := ` BASH_COMPLETION_EOF } __kubectl_bash_source <(__kubectl_convert_bash_to_zsh) _complete kubectl 2>/dev/null ` - out.Write([]byte(zsh_tail)) + out.Write([]byte(zshTail)) return nil } diff --git a/pkg/kubectl/cmd/config/config.go b/pkg/kubectl/cmd/config/config.go index 4e1b63e623f..7a977dfd84b 100644 --- a/pkg/kubectl/cmd/config/config.go +++ b/pkg/kubectl/cmd/config/config.go @@ -46,7 +46,7 @@ func NewCmdConfig(f cmdutil.Factory, pathOptions *clientcmd.PathOptions, streams The loading order follows these rules: 1. If the --` + pathOptions.ExplicitFileFlag + ` flag is set, then only that file is loaded. The flag may only be set once and no merging takes place. - 2. If $` + pathOptions.EnvVar + ` environment variable is set, then it is used as a list of paths (normal path delimitting rules for your system). These paths are merged. When a value is modified, it is modified in the file that defines the stanza. When a value is created, it is created in the first file that exists. If no files in the chain exist, then it creates the last file in the list. + 2. If $` + pathOptions.EnvVar + ` environment variable is set, then it is used as a list of paths (normal path delimiting rules for your system). These paths are merged. When a value is modified, it is modified in the file that defines the stanza. When a value is created, it is created in the first file that exists. If no files in the chain exist, then it creates the last file in the list. 3. Otherwise, ` + path.Join("${HOME}", pathOptions.GlobalFileSubpath) + ` is used and no merging takes place.`), Run: cmdutil.DefaultSubCommandRun(streams.ErrOut), } @@ -88,5 +88,5 @@ func toBool(propertyValue string) (bool, error) { func helpErrorf(cmd *cobra.Command, format string, args ...interface{}) error { cmd.Help() msg := fmt.Sprintf(format, args...) - return fmt.Errorf("%s\n", msg) + return fmt.Errorf("%s", msg) } diff --git a/pkg/kubectl/cmd/config/create_authinfo.go b/pkg/kubectl/cmd/config/create_authinfo.go index 0dca066c1fd..5ac80dfe746 100644 --- a/pkg/kubectl/cmd/config/create_authinfo.go +++ b/pkg/kubectl/cmd/config/create_authinfo.go @@ -56,7 +56,7 @@ const ( ) var ( - create_authinfo_long = fmt.Sprintf(templates.LongDesc(` + createAuthInfoLong = fmt.Sprintf(templates.LongDesc(` Sets a user entry in kubeconfig Specifying a name that already exists will merge new fields on top of existing values. @@ -72,7 +72,7 @@ var ( Bearer token and basic auth are mutually exclusive.`), clientcmd.FlagCertFile, clientcmd.FlagKeyFile, clientcmd.FlagBearerToken, clientcmd.FlagUsername, clientcmd.FlagPassword) - create_authinfo_example = templates.Examples(` + createAuthInfoExample = templates.Examples(` # Set only the "client-key" field on the "cluster-admin" # entry, without touching other values: kubectl config set-credentials cluster-admin --client-key=~/.kube/admin.key @@ -93,6 +93,7 @@ var ( kubectl config set-credentials cluster-admin --auth-provider=oidc --auth-provider-arg=client-secret-`) ) +// NewCmdConfigSetAuthInfo returns an Command option instance for 'config set-credentials' sub command func NewCmdConfigSetAuthInfo(out io.Writer, configAccess clientcmd.ConfigAccess) *cobra.Command { options := &createAuthInfoOptions{configAccess: configAccess} return newCmdConfigSetAuthInfo(out, options) @@ -103,8 +104,8 @@ func newCmdConfigSetAuthInfo(out io.Writer, options *createAuthInfoOptions) *cob Use: fmt.Sprintf("set-credentials NAME [--%v=path/to/certfile] [--%v=path/to/keyfile] [--%v=bearer_token] [--%v=basic_user] [--%v=basic_password] [--%v=provider_name] [--%v=key=value]", clientcmd.FlagCertFile, clientcmd.FlagKeyFile, clientcmd.FlagBearerToken, clientcmd.FlagUsername, clientcmd.FlagPassword, flagAuthProvider, flagAuthProviderArg), DisableFlagsInUseLine: true, Short: i18n.T("Sets a user entry in kubeconfig"), - Long: create_authinfo_long, - Example: create_authinfo_example, + Long: createAuthInfoLong, + Example: createAuthInfoExample, Run: func(cmd *cobra.Command, args []string) { err := options.complete(cmd, out) if err != nil { @@ -247,13 +248,13 @@ func (o *createAuthInfoOptions) complete(cmd *cobra.Command, out io.Writer) erro authProviderArgs, err := cmd.Flags().GetStringSlice(flagAuthProviderArg) if err != nil { - return fmt.Errorf("Error: %s\n", err) + return fmt.Errorf("Error: %s", err) } if len(authProviderArgs) > 0 { newPairs, removePairs, err := cmdutil.ParsePairs(authProviderArgs, flagAuthProviderArg, true) if err != nil { - return fmt.Errorf("Error: %s\n", err) + return fmt.Errorf("Error: %s", err) } o.authProviderArgs = newPairs o.authProviderArgsToRemove = removePairs diff --git a/pkg/kubectl/cmd/config/create_cluster.go b/pkg/kubectl/cmd/config/create_cluster.go index a07dfd6c772..81d0560a98a 100644 --- a/pkg/kubectl/cmd/config/create_cluster.go +++ b/pkg/kubectl/cmd/config/create_cluster.go @@ -43,12 +43,12 @@ type createClusterOptions struct { } var ( - create_cluster_long = templates.LongDesc(` + createClusterLong = templates.LongDesc(` Sets a cluster entry in kubeconfig. Specifying a name that already exists will merge new fields on top of existing values for those fields.`) - create_cluster_example = templates.Examples(` + createClusterExample = templates.Examples(` # Set only the server field on the e2e cluster entry without touching other values. kubectl config set-cluster e2e --server=https://1.2.3.4 @@ -59,6 +59,7 @@ var ( kubectl config set-cluster e2e --insecure-skip-tls-verify=true`) ) +// NewCmdConfigSetCluster returns a Command instance for 'config set-cluster' sub command func NewCmdConfigSetCluster(out io.Writer, configAccess clientcmd.ConfigAccess) *cobra.Command { options := &createClusterOptions{configAccess: configAccess} @@ -66,8 +67,8 @@ func NewCmdConfigSetCluster(out io.Writer, configAccess clientcmd.ConfigAccess) Use: fmt.Sprintf("set-cluster NAME [--%v=server] [--%v=path/to/certificate/authority] [--%v=true]", clientcmd.FlagAPIServer, clientcmd.FlagCAFile, clientcmd.FlagInsecure), DisableFlagsInUseLine: true, Short: i18n.T("Sets a cluster entry in kubeconfig"), - Long: create_cluster_long, - Example: create_cluster_example, + Long: createClusterLong, + Example: createClusterExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(options.complete(cmd)) cmdutil.CheckErr(options.run()) diff --git a/pkg/kubectl/cmd/config/create_context.go b/pkg/kubectl/cmd/config/create_context.go index 50b965cc629..59af1712e4a 100644 --- a/pkg/kubectl/cmd/config/create_context.go +++ b/pkg/kubectl/cmd/config/create_context.go @@ -41,16 +41,17 @@ type createContextOptions struct { } var ( - create_context_long = templates.LongDesc(` + createContextLong = templates.LongDesc(` Sets a context entry in kubeconfig Specifying a name that already exists will merge new fields on top of existing values for those fields.`) - create_context_example = templates.Examples(` + createContextExample = templates.Examples(` # Set the user field on the gce context entry without touching other values kubectl config set-context gce --user=cluster-admin`) ) +// NewCmdConfigSetContext returns a Command instance for 'config set-context' sub command func NewCmdConfigSetContext(out io.Writer, configAccess clientcmd.ConfigAccess) *cobra.Command { options := &createContextOptions{configAccess: configAccess} @@ -58,8 +59,8 @@ func NewCmdConfigSetContext(out io.Writer, configAccess clientcmd.ConfigAccess) Use: fmt.Sprintf("set-context [NAME | --current] [--%v=cluster_nickname] [--%v=user_nickname] [--%v=namespace]", clientcmd.FlagClusterName, clientcmd.FlagAuthInfoName, clientcmd.FlagNamespace), DisableFlagsInUseLine: true, Short: i18n.T("Sets a context entry in kubeconfig"), - Long: create_context_long, - Example: create_context_example, + Long: createContextLong, + Example: createContextExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(options.complete(cmd)) name, exists, err := options.run() diff --git a/pkg/kubectl/cmd/config/current_context.go b/pkg/kubectl/cmd/config/current_context.go index 3751d8b25c9..83f855cffdc 100644 --- a/pkg/kubectl/cmd/config/current_context.go +++ b/pkg/kubectl/cmd/config/current_context.go @@ -28,27 +28,29 @@ import ( "k8s.io/kubernetes/pkg/kubectl/util/templates" ) +// CurrentContextOptions holds the command-line options for 'config current-context' sub command type CurrentContextOptions struct { ConfigAccess clientcmd.ConfigAccess } var ( - current_context_long = templates.LongDesc(` + currentContextLong = templates.LongDesc(` Displays the current-context`) - current_context_example = templates.Examples(` + currentContextExample = templates.Examples(` # Display the current-context kubectl config current-context`) ) +// NewCmdConfigCurrentContext returns a Command instance for 'config current-context' sub command func NewCmdConfigCurrentContext(out io.Writer, configAccess clientcmd.ConfigAccess) *cobra.Command { options := &CurrentContextOptions{ConfigAccess: configAccess} cmd := &cobra.Command{ Use: "current-context", Short: i18n.T("Displays the current-context"), - Long: current_context_long, - Example: current_context_example, + Long: currentContextLong, + Example: currentContextExample, Run: func(cmd *cobra.Command, args []string) { err := RunCurrentContext(out, options) cmdutil.CheckErr(err) @@ -58,6 +60,7 @@ func NewCmdConfigCurrentContext(out io.Writer, configAccess clientcmd.ConfigAcce return cmd } +// RunCurrentContext performs the execution of 'config current-context' sub command func RunCurrentContext(out io.Writer, options *CurrentContextOptions) error { config, err := options.ConfigAccess.GetStartingConfig() if err != nil { @@ -65,7 +68,7 @@ func RunCurrentContext(out io.Writer, options *CurrentContextOptions) error { } if config.CurrentContext == "" { - err = fmt.Errorf("current-context is not set\n") + err = fmt.Errorf("current-context is not set") return err } diff --git a/pkg/kubectl/cmd/config/delete_cluster.go b/pkg/kubectl/cmd/config/delete_cluster.go index 9ae52e87c11..90a5f82dd8a 100644 --- a/pkg/kubectl/cmd/config/delete_cluster.go +++ b/pkg/kubectl/cmd/config/delete_cluster.go @@ -28,18 +28,19 @@ import ( ) var ( - delete_cluster_example = templates.Examples(` + deleteClusterExample = templates.Examples(` # Delete the minikube cluster kubectl config delete-cluster minikube`) ) +// NewCmdConfigDeleteCluster returns a Command instance for 'config delete-cluster' sub command func NewCmdConfigDeleteCluster(out io.Writer, configAccess clientcmd.ConfigAccess) *cobra.Command { cmd := &cobra.Command{ Use: "delete-cluster NAME", DisableFlagsInUseLine: true, Short: i18n.T("Delete the specified cluster from the kubeconfig"), Long: "Delete the specified cluster from the kubeconfig", - Example: delete_cluster_example, + Example: deleteClusterExample, Run: func(cmd *cobra.Command, args []string) { err := runDeleteCluster(out, configAccess, cmd) cmdutil.CheckErr(err) diff --git a/pkg/kubectl/cmd/config/delete_context.go b/pkg/kubectl/cmd/config/delete_context.go index 76ae529fd7e..6ce0d72d257 100644 --- a/pkg/kubectl/cmd/config/delete_context.go +++ b/pkg/kubectl/cmd/config/delete_context.go @@ -28,18 +28,19 @@ import ( ) var ( - delete_context_example = templates.Examples(` + deleteContextExample = templates.Examples(` # Delete the context for the minikube cluster kubectl config delete-context minikube`) ) +// NewCmdConfigDeleteContext returns a Command instance for 'config delete-context' sub command func NewCmdConfigDeleteContext(out, errOut io.Writer, configAccess clientcmd.ConfigAccess) *cobra.Command { cmd := &cobra.Command{ Use: "delete-context NAME", DisableFlagsInUseLine: true, Short: i18n.T("Delete the specified context from the kubeconfig"), Long: "Delete the specified context from the kubeconfig", - Example: delete_context_example, + Example: deleteContextExample, Run: func(cmd *cobra.Command, args []string) { err := runDeleteContext(out, errOut, configAccess, cmd) cmdutil.CheckErr(err) diff --git a/pkg/kubectl/cmd/config/get_clusters.go b/pkg/kubectl/cmd/config/get_clusters.go index bbaebf9e7a7..83474d0e217 100644 --- a/pkg/kubectl/cmd/config/get_clusters.go +++ b/pkg/kubectl/cmd/config/get_clusters.go @@ -28,7 +28,7 @@ import ( ) var ( - get_clusters_example = templates.Examples(` + getClustersExample = templates.Examples(` # List the clusters kubectl knows about kubectl config get-clusters`) ) @@ -40,7 +40,7 @@ func NewCmdConfigGetClusters(out io.Writer, configAccess clientcmd.ConfigAccess) Use: "get-clusters", Short: i18n.T("Display clusters defined in the kubeconfig"), Long: "Display clusters defined in the kubeconfig.", - Example: get_clusters_example, + Example: getClustersExample, Run: func(cmd *cobra.Command, args []string) { err := runGetClusters(out, configAccess) cmdutil.CheckErr(err) diff --git a/pkg/kubectl/cmd/config/rename_context.go b/pkg/kubectl/cmd/config/rename_context.go index 535a811102e..c87be1e6fa9 100644 --- a/pkg/kubectl/cmd/config/rename_context.go +++ b/pkg/kubectl/cmd/config/rename_context.go @@ -92,6 +92,7 @@ func (o *RenameContextOptions) Complete(cmd *cobra.Command, args []string, out i return nil } +// Validate makes sure that provided values for command-line options are valid func (o RenameContextOptions) Validate() error { if len(o.newName) == 0 { return errors.New("You must specify a new non-empty context name") @@ -99,6 +100,7 @@ func (o RenameContextOptions) Validate() error { return nil } +// RunRenameContext performs the execution for 'config rename-context' sub command func (o RenameContextOptions) RunRenameContext(out io.Writer) error { config, err := o.configAccess.GetStartingConfig() if err != nil { diff --git a/pkg/kubectl/cmd/config/rename_context_test.go b/pkg/kubectl/cmd/config/rename_context_test.go index cfe371497f1..01edde9fe76 100644 --- a/pkg/kubectl/cmd/config/rename_context_test.go +++ b/pkg/kubectl/cmd/config/rename_context_test.go @@ -36,7 +36,7 @@ const ( ) var ( - contextData *clientcmdapi.Context = clientcmdapi.NewContext() + contextData = clientcmdapi.NewContext() ) type renameContextTest struct { diff --git a/pkg/kubectl/cmd/config/set.go b/pkg/kubectl/cmd/config/set.go index 9117402bba1..2b5a37a27f4 100644 --- a/pkg/kubectl/cmd/config/set.go +++ b/pkg/kubectl/cmd/config/set.go @@ -40,13 +40,14 @@ type setOptions struct { setRawBytes flag.Tristate } -var set_long = templates.LongDesc(` +var setLong = templates.LongDesc(` Sets an individual value in a kubeconfig file PROPERTY_NAME is a dot delimited name where each token represents either an attribute name or a map key. Map keys may not contain dots. PROPERTY_VALUE is the new value you wish to set. Binary fields such as 'certificate-authority-data' expect a base64 encoded string unless the --set-raw-bytes flag is used.`) +// NewCmdConfigSet returns a Command instance for 'config set' sub command func NewCmdConfigSet(out io.Writer, configAccess clientcmd.ConfigAccess) *cobra.Command { options := &setOptions{configAccess: configAccess} @@ -54,7 +55,7 @@ func NewCmdConfigSet(out io.Writer, configAccess clientcmd.ConfigAccess) *cobra. Use: "set PROPERTY_NAME PROPERTY_VALUE", DisableFlagsInUseLine: true, Short: i18n.T("Sets an individual value in a kubeconfig file"), - Long: set_long, + Long: setLong, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(options.complete(cmd)) cmdutil.CheckErr(options.run()) diff --git a/pkg/kubectl/cmd/config/unset.go b/pkg/kubectl/cmd/config/unset.go index 02a519cb42c..838957831c2 100644 --- a/pkg/kubectl/cmd/config/unset.go +++ b/pkg/kubectl/cmd/config/unset.go @@ -49,6 +49,7 @@ var ( kubectl config unset contexts.foo.namespace`) ) +// NewCmdConfigUnset returns a Command instance for 'config unset' sub command func NewCmdConfigUnset(out io.Writer, configAccess clientcmd.ConfigAccess) *cobra.Command { options := &unsetOptions{configAccess: configAccess} diff --git a/pkg/kubectl/cmd/config/use_context.go b/pkg/kubectl/cmd/config/use_context.go index b22251af261..0ff5a8e2536 100644 --- a/pkg/kubectl/cmd/config/use_context.go +++ b/pkg/kubectl/cmd/config/use_context.go @@ -31,7 +31,7 @@ import ( ) var ( - use_context_example = templates.Examples(` + useContextExample = templates.Examples(` # Use the context for the minikube cluster kubectl config use-context minikube`) ) @@ -41,6 +41,7 @@ type useContextOptions struct { contextName string } +// NewCmdConfigUseContext returns a Command instance for 'config use-context' sub command func NewCmdConfigUseContext(out io.Writer, configAccess clientcmd.ConfigAccess) *cobra.Command { options := &useContextOptions{configAccess: configAccess} @@ -50,7 +51,7 @@ func NewCmdConfigUseContext(out io.Writer, configAccess clientcmd.ConfigAccess) Short: i18n.T("Sets the current-context in a kubeconfig file"), Aliases: []string{"use"}, Long: `Sets the current-context in a kubeconfig file`, - Example: use_context_example, + Example: useContextExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(options.complete(cmd)) cmdutil.CheckErr(options.run()) @@ -98,5 +99,5 @@ func (o useContextOptions) validate(config *clientcmdapi.Config) error { } } - return fmt.Errorf("no context exists with the name: %q.", o.contextName) + return fmt.Errorf("no context exists with the name: %q", o.contextName) } diff --git a/pkg/kubectl/cmd/config/view.go b/pkg/kubectl/cmd/config/view.go index 7017cb6a9cf..4df1f9a2824 100644 --- a/pkg/kubectl/cmd/config/view.go +++ b/pkg/kubectl/cmd/config/view.go @@ -33,6 +33,7 @@ import ( "k8s.io/kubernetes/pkg/kubectl/util/templates" ) +// ViewOptions holds the command-line options for 'config view' sub command type ViewOptions struct { PrintFlags *genericclioptions.PrintFlags PrintObject printers.ResourcePrinterFunc @@ -50,12 +51,12 @@ type ViewOptions struct { } var ( - view_long = templates.LongDesc(` + viewLong = templates.LongDesc(` Display merged kubeconfig settings or a specified kubeconfig file. You can use --output jsonpath={...} to extract specific values using a jsonpath expression.`) - view_example = templates.Examples(` + viewExample = templates.Examples(` # Show merged kubeconfig settings. kubectl config view @@ -68,6 +69,7 @@ var ( defaultOutputFormat = "yaml" ) +// NewCmdConfigView returns a Command instance for 'config view' sub command func NewCmdConfigView(f cmdutil.Factory, streams genericclioptions.IOStreams, ConfigAccess clientcmd.ConfigAccess) *cobra.Command { o := &ViewOptions{ PrintFlags: genericclioptions.NewPrintFlags("").WithTypeSetter(scheme.Scheme).WithDefaultOutput("yaml"), @@ -79,8 +81,8 @@ func NewCmdConfigView(f cmdutil.Factory, streams genericclioptions.IOStreams, Co cmd := &cobra.Command{ Use: "view", Short: i18n.T("Display merged kubeconfig settings or a specified kubeconfig file"), - Long: view_long, - Example: view_example, + Long: viewLong, + Example: viewExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(cmd, args)) cmdutil.CheckErr(o.Validate()) @@ -99,6 +101,7 @@ func NewCmdConfigView(f cmdutil.Factory, streams genericclioptions.IOStreams, Co return cmd } +// Complete completes the required command-line options func (o *ViewOptions) Complete(cmd *cobra.Command, args []string) error { if len(args) != 0 { return cmdutil.UsageErrorf(cmd, "unexpected arguments: %v", args) @@ -119,6 +122,7 @@ func (o *ViewOptions) Complete(cmd *cobra.Command, args []string) error { return nil } +// Validate makes sure that provided values for command-line options are valid func (o ViewOptions) Validate() error { if !o.Merge.Value() && !o.ConfigAccess.IsExplicitFile() { return errors.New("if merge==false a precise file must to specified") @@ -127,6 +131,7 @@ func (o ViewOptions) Validate() error { return nil } +// Run performs the execution of 'config view' sub command func (o ViewOptions) Run() error { config, err := o.loadConfig() if err != nil { diff --git a/pkg/kubectl/cmd/convert/convert.go b/pkg/kubectl/cmd/convert/convert.go index a233b8cd1cb..506b7365774 100644 --- a/pkg/kubectl/cmd/convert/convert.go +++ b/pkg/kubectl/cmd/convert/convert.go @@ -37,7 +37,7 @@ import ( ) var ( - convert_long = templates.LongDesc(i18n.T(` + convertLong = templates.LongDesc(i18n.T(` Convert config files between different API versions. Both YAML and JSON formats are accepted. @@ -48,7 +48,7 @@ var ( The default output will be printed to stdout in YAML format. One can use -o option to change to output destination.`)) - convert_example = templates.Examples(i18n.T(` + convertExample = templates.Examples(i18n.T(` # Convert 'pod.yaml' to latest version and print to stdout. kubectl convert -f pod.yaml @@ -93,8 +93,8 @@ func NewCmdConvert(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *co Use: "convert -f FILENAME", DisableFlagsInUseLine: true, Short: i18n.T("Convert config files between different API versions"), - Long: convert_long, - Example: convert_example, + Long: convertLong, + Example: convertExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd)) cmdutil.CheckErr(o.RunConvert()) diff --git a/pkg/kubectl/cmd/cp/cp.go b/pkg/kubectl/cmd/cp/cp.go index a3b62fdeebd..93366db0aea 100644 --- a/pkg/kubectl/cmd/cp/cp.go +++ b/pkg/kubectl/cmd/cp/cp.go @@ -315,8 +315,8 @@ func stripPathShortcuts(p string) string { trimmed = strings.TrimPrefix(newPath, "../") } - // trim leftover ".." - if newPath == ".." { + // trim leftover {".", ".."} + if newPath == "." || newPath == ".." { newPath = "" } diff --git a/pkg/kubectl/cmd/cp/cp_test.go b/pkg/kubectl/cmd/cp/cp_test.go index 1764f8a55b3..b8a14dfc540 100644 --- a/pkg/kubectl/cmd/cp/cp_test.go +++ b/pkg/kubectl/cmd/cp/cp_test.go @@ -175,6 +175,11 @@ func TestStripPathShortcuts(t *testing.T) { input: "...foo", expected: "...foo", }, + { + name: "test root directory", + input: "/", + expected: "", + }, } for _, test := range tests { diff --git a/pkg/kubectl/cmd/create/create.go b/pkg/kubectl/cmd/create/create.go index 1a426bae616..1a8f230bc2d 100644 --- a/pkg/kubectl/cmd/create/create.go +++ b/pkg/kubectl/cmd/create/create.go @@ -45,6 +45,7 @@ import ( "k8s.io/kubernetes/pkg/kubectl/util/templates" ) +// CreateOptions is the commandline options for 'create' sub command type CreateOptions struct { PrintFlags *genericclioptions.PrintFlags RecordFlags *genericclioptions.RecordFlags @@ -79,6 +80,7 @@ var ( kubectl create -f docker-registry.yaml --edit -o json`)) ) +// NewCreateOptions returns an initialized CreateOptions instance func NewCreateOptions(ioStreams genericclioptions.IOStreams) *CreateOptions { return &CreateOptions{ PrintFlags: genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme), @@ -90,6 +92,7 @@ func NewCreateOptions(ioStreams genericclioptions.IOStreams) *CreateOptions { } } +// NewCmdCreate returns new initialized instance of create sub command func NewCmdCreate(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command { o := NewCreateOptions(ioStreams) @@ -146,6 +149,7 @@ func NewCmdCreate(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cob return cmd } +// ValidateArgs makes sure there is no discrepency in command options func (o *CreateOptions) ValidateArgs(cmd *cobra.Command, args []string) error { if len(args) != 0 { return cmdutil.UsageErrorf(cmd, "Unexpected args: %v", args) @@ -177,6 +181,7 @@ func (o *CreateOptions) ValidateArgs(cmd *cobra.Command, args []string) error { return nil } +// Complete completes all the required options func (o *CreateOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) error { var err error @@ -203,6 +208,7 @@ func (o *CreateOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) error { return nil } +// RunCreate performs the creation func (o *CreateOptions) RunCreate(f cmdutil.Factory, cmd *cobra.Command) error { // raw only makes sense for a single file resource multiple objects aren't likely to do what you want. // the validator enforces this, so @@ -300,6 +306,7 @@ func (o *CreateOptions) raw(f cmdutil.Factory) error { return nil } +// RunEditOnCreate performs edit on creation func RunEditOnCreate(f cmdutil.Factory, printFlags *genericclioptions.PrintFlags, recordFlags *genericclioptions.RecordFlags, ioStreams genericclioptions.IOStreams, cmd *cobra.Command, options *resource.FilenameOptions) error { editOptions := editor.NewEditOptions(editor.EditBeforeCreateMode, ioStreams) editOptions.FilenameOptions = *options @@ -363,6 +370,7 @@ type CreateSubcommandOptions struct { genericclioptions.IOStreams } +// NewCreateSubcommandOptions returns initialized CreateSubcommandOptions func NewCreateSubcommandOptions(ioStreams genericclioptions.IOStreams) *CreateSubcommandOptions { return &CreateSubcommandOptions{ PrintFlags: genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme), @@ -370,6 +378,7 @@ func NewCreateSubcommandOptions(ioStreams genericclioptions.IOStreams) *CreateSu } } +// Complete completes all the required options func (o *CreateSubcommandOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string, generator generate.StructuredGenerator) error { name, err := NameFromCommandArgs(cmd, args) if err != nil { @@ -411,7 +420,7 @@ func (o *CreateSubcommandOptions) Complete(f cmdutil.Factory, cmd *cobra.Command return nil } -// RunCreateSubcommand executes a create subcommand using the specified options +// Run executes a create subcommand using the specified options func (o *CreateSubcommandOptions) Run() error { obj, err := o.StructuredGenerator.StructuredGenerate() if err != nil { diff --git a/pkg/kubectl/cmd/create/create_clusterrole.go b/pkg/kubectl/cmd/create/create_clusterrole.go index ce170028275..276ae7f5891 100644 --- a/pkg/kubectl/cmd/create/create_clusterrole.go +++ b/pkg/kubectl/cmd/create/create_clusterrole.go @@ -58,13 +58,14 @@ var ( validNonResourceVerbs = []string{"*", "get", "post", "put", "delete", "patch", "head", "options"} ) +// CreateClusterRoleOptions is returned by NewCmdCreateClusterRole type CreateClusterRoleOptions struct { *CreateRoleOptions NonResourceURLs []string AggregationRule map[string]string } -// ClusterRole is a command to ease creating ClusterRoles. +// NewCmdCreateClusterRole initializes and returns new ClusterRoles command func NewCmdCreateClusterRole(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command { c := &CreateClusterRoleOptions{ CreateRoleOptions: NewCreateRoleOptions(ioStreams), @@ -97,6 +98,7 @@ func NewCmdCreateClusterRole(f cmdutil.Factory, ioStreams genericclioptions.IOSt return cmd } +// Complete completes all the required options func (c *CreateClusterRoleOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { // Remove duplicate nonResourceURLs nonResourceURLs := []string{} @@ -110,6 +112,7 @@ func (c *CreateClusterRoleOptions) Complete(f cmdutil.Factory, cmd *cobra.Comman return c.CreateRoleOptions.Complete(f, cmd, args) } +// Validate makes sure there is no discrepency in CreateClusterRoleOptions func (c *CreateClusterRoleOptions) Validate() error { if c.Name == "" { return fmt.Errorf("name must be specified") @@ -170,6 +173,7 @@ func (c *CreateClusterRoleOptions) Validate() error { } +// RunCreateRole creates a new clusterRole func (c *CreateClusterRoleOptions) RunCreateRole() error { clusterRole := &rbacv1.ClusterRole{ // this is ok because we know exactly how we want to be serialized diff --git a/pkg/kubectl/cmd/create/create_clusterrolebinding.go b/pkg/kubectl/cmd/create/create_clusterrolebinding.go index 882da888cc3..acff50b6eff 100644 --- a/pkg/kubectl/cmd/create/create_clusterrolebinding.go +++ b/pkg/kubectl/cmd/create/create_clusterrolebinding.go @@ -36,13 +36,14 @@ var ( kubectl create clusterrolebinding cluster-admin --clusterrole=cluster-admin --user=user1 --user=user2 --group=group1`)) ) +// ClusterRoleBindingOpts is returned by NewCmdCreateClusterRoleBinding type ClusterRoleBindingOpts struct { CreateSubcommandOptions *CreateSubcommandOptions } -// ClusterRoleBinding is a command to ease creating ClusterRoleBindings. +// NewCmdCreateClusterRoleBinding returns an initialized command instance of ClusterRoleBinding func NewCmdCreateClusterRoleBinding(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command { - options := &ClusterRoleBindingOpts{ + o := &ClusterRoleBindingOpts{ CreateSubcommandOptions: NewCreateSubcommandOptions(ioStreams), } @@ -53,12 +54,12 @@ func NewCmdCreateClusterRoleBinding(f cmdutil.Factory, ioStreams genericclioptio Long: clusterRoleBindingLong, Example: clusterRoleBindingExample, Run: func(cmd *cobra.Command, args []string) { - cmdutil.CheckErr(options.Complete(f, cmd, args)) - cmdutil.CheckErr(options.Run()) + cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.Run()) }, } - options.CreateSubcommandOptions.PrintFlags.AddFlags(cmd) + o.CreateSubcommandOptions.PrintFlags.AddFlags(cmd) cmdutil.AddApplyAnnotationFlags(cmd) cmdutil.AddValidateFlags(cmd) @@ -71,6 +72,7 @@ func NewCmdCreateClusterRoleBinding(f cmdutil.Factory, ioStreams genericclioptio return cmd } +// Complete completes all the required options func (o *ClusterRoleBindingOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { name, err := NameFromCommandArgs(cmd, args) if err != nil { @@ -94,7 +96,7 @@ func (o *ClusterRoleBindingOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, return o.CreateSubcommandOptions.Complete(f, cmd, args, generator) } -// CreateClusterRoleBinding is the implementation of the create clusterrolebinding command. +// Run calls the CreateSubcommandOptions.Run in ClusterRoleBindingOpts instance func (o *ClusterRoleBindingOpts) Run() error { return o.CreateSubcommandOptions.Run() } diff --git a/pkg/kubectl/cmd/create/create_configmap.go b/pkg/kubectl/cmd/create/create_configmap.go index 4837d658f9e..63ea712300c 100644 --- a/pkg/kubectl/cmd/create/create_configmap.go +++ b/pkg/kubectl/cmd/create/create_configmap.go @@ -57,11 +57,12 @@ var ( kubectl create configmap my-config --from-env-file=path/to/bar.env`)) ) +// ConfigMapOpts holds properties for create configmap sub-command type ConfigMapOpts struct { CreateSubcommandOptions *CreateSubcommandOptions } -// ConfigMap is a command to ease creating ConfigMaps. +// NewCmdCreateConfigMap initializes and returns ConfigMapOpts func NewCmdCreateConfigMap(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command { options := &ConfigMapOpts{ CreateSubcommandOptions: NewCreateSubcommandOptions(ioStreams), @@ -92,6 +93,7 @@ func NewCmdCreateConfigMap(f cmdutil.Factory, ioStreams genericclioptions.IOStre return cmd } +// Complete completes all the required options func (o *ConfigMapOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { name, err := NameFromCommandArgs(cmd, args) if err != nil { @@ -115,7 +117,7 @@ func (o *ConfigMapOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, args []s return o.CreateSubcommandOptions.Complete(f, cmd, args, generator) } -// CreateConfigMap is the implementation of the create configmap command. +// Run performs the execution of 'create' sub command options func (o *ConfigMapOpts) Run() error { return o.CreateSubcommandOptions.Run() } diff --git a/pkg/kubectl/cmd/create/create_deployment.go b/pkg/kubectl/cmd/create/create_deployment.go index 1a069f73ff2..ed98b790d03 100644 --- a/pkg/kubectl/cmd/create/create_deployment.go +++ b/pkg/kubectl/cmd/create/create_deployment.go @@ -36,6 +36,7 @@ var ( kubectl create deployment my-dep --image=busybox`)) ) +// DeploymentOpts is returned by NewCmdCreateDeployment type DeploymentOpts struct { CreateSubcommandOptions *CreateSubcommandOptions } @@ -112,6 +113,7 @@ func generatorFromName( return nil, false } +// Complete completes all the options func (o *DeploymentOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { name, err := NameFromCommandArgs(cmd, args) if err != nil { @@ -147,10 +149,7 @@ func (o *DeploymentOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, args [] return o.CreateSubcommandOptions.Complete(f, cmd, args, generator) } -// createDeployment -// 1. Reads user config values from Cobra. -// 2. Sets up the correct Generator object. -// 3. Calls RunCreateSubcommand. +// Run performs the execution of 'create deployment' sub command func (o *DeploymentOpts) Run() error { return o.CreateSubcommandOptions.Run() } diff --git a/pkg/kubectl/cmd/create/create_job.go b/pkg/kubectl/cmd/create/create_job.go index a82348e1b8c..285e239fc2b 100644 --- a/pkg/kubectl/cmd/create/create_job.go +++ b/pkg/kubectl/cmd/create/create_job.go @@ -50,6 +50,7 @@ var ( kubectl create job test-job --from=cronjob/a-cronjob`)) ) +// CreateJobOptions is the command line options for 'create job' type CreateJobOptions struct { PrintFlags *genericclioptions.PrintFlags @@ -69,6 +70,7 @@ type CreateJobOptions struct { genericclioptions.IOStreams } +// NewCreateJobOptions initializes and returns new CreateJobOptions instance func NewCreateJobOptions(ioStreams genericclioptions.IOStreams) *CreateJobOptions { return &CreateJobOptions{ PrintFlags: genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme), @@ -102,6 +104,7 @@ func NewCmdCreateJob(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) * return cmd } +// Complete completes all the required options func (o *CreateJobOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { name, err := NameFromCommandArgs(cmd, args) if err != nil { @@ -143,6 +146,7 @@ func (o *CreateJobOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args return nil } +// Validate makes sure provided values and valid Job options func (o *CreateJobOptions) Validate() error { if (len(o.Image) == 0 && len(o.From) == 0) || (len(o.Image) != 0 && len(o.From) != 0) { return fmt.Errorf("either --image or --from must be specified") @@ -153,6 +157,7 @@ func (o *CreateJobOptions) Validate() error { return nil } +// Run performs the execution of 'create job' sub command func (o *CreateJobOptions) Run() error { var job *batchv1.Job if len(o.Image) > 0 { diff --git a/pkg/kubectl/cmd/create/create_namespace.go b/pkg/kubectl/cmd/create/create_namespace.go index 04ea190cf8a..1a329efd163 100644 --- a/pkg/kubectl/cmd/create/create_namespace.go +++ b/pkg/kubectl/cmd/create/create_namespace.go @@ -36,6 +36,7 @@ var ( kubectl create namespace my-namespace`)) ) +// NamespaceOpts is the options for 'create namespare' sub command type NamespaceOpts struct { CreateSubcommandOptions *CreateSubcommandOptions } @@ -68,6 +69,7 @@ func NewCmdCreateNamespace(f cmdutil.Factory, ioStreams genericclioptions.IOStre return cmd } +// Complete completes all the required options func (o *NamespaceOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { name, err := NameFromCommandArgs(cmd, args) if err != nil { @@ -85,7 +87,7 @@ func (o *NamespaceOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, args []s return o.CreateSubcommandOptions.Complete(f, cmd, args, generator) } -// CreateNamespace implements the behavior to run the create namespace command +// Run calls the CreateSubcommandOptions.Run in NamespaceOpts instance func (o *NamespaceOpts) Run() error { return o.CreateSubcommandOptions.Run() } diff --git a/pkg/kubectl/cmd/create/create_pdb.go b/pkg/kubectl/cmd/create/create_pdb.go index 7a0f5b91af5..b085509bbc4 100644 --- a/pkg/kubectl/cmd/create/create_pdb.go +++ b/pkg/kubectl/cmd/create/create_pdb.go @@ -41,6 +41,7 @@ var ( kubectl create pdb my-pdb --selector=app=nginx --min-available=50%`)) ) +// PodDisruptionBudgetOpts holds the command-line options for poddisruptionbudget sub command type PodDisruptionBudgetOpts struct { CreateSubcommandOptions *CreateSubcommandOptions } @@ -76,6 +77,7 @@ func NewCmdCreatePodDisruptionBudget(f cmdutil.Factory, ioStreams genericcliopti return cmd } +// Complete completes all the required options func (o *PodDisruptionBudgetOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { name, err := NameFromCommandArgs(cmd, args) if err != nil { @@ -104,7 +106,7 @@ func (o *PodDisruptionBudgetOpts) Complete(f cmdutil.Factory, cmd *cobra.Command return o.CreateSubcommandOptions.Complete(f, cmd, args, generator) } -// CreatePodDisruptionBudget implements the behavior to run the create pdb command. +// Run calls the CreateSubcommandOptions.Run in PodDisruptionBudgetOpts instance func (o *PodDisruptionBudgetOpts) Run() error { return o.CreateSubcommandOptions.Run() } diff --git a/pkg/kubectl/cmd/create/create_priorityclass.go b/pkg/kubectl/cmd/create/create_priorityclass.go index 5cf7f529204..7555593763c 100644 --- a/pkg/kubectl/cmd/create/create_priorityclass.go +++ b/pkg/kubectl/cmd/create/create_priorityclass.go @@ -39,6 +39,7 @@ var ( kubectl create priorityclass default-priority --value=1000 --global-default=true --description="default priority"`)) ) +// PriorityClassOpts holds the options for 'create priorityclass' sub command type PriorityClassOpts struct { CreateSubcommandOptions *CreateSubcommandOptions } @@ -74,6 +75,7 @@ func NewCmdCreatePriorityClass(f cmdutil.Factory, ioStreams genericclioptions.IO return cmd } +// Complete completes all the required options func (o *PriorityClassOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { name, err := NameFromCommandArgs(cmd, args) if err != nil { @@ -96,7 +98,7 @@ func (o *PriorityClassOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, args return o.CreateSubcommandOptions.Complete(f, cmd, args, generator) } -// CreatePriorityClass implements the behavior to run the create priorityClass command. +// Run calls the CreateSubcommandOptions.Run in the PriorityClassOpts instance func (o *PriorityClassOpts) Run() error { return o.CreateSubcommandOptions.Run() } diff --git a/pkg/kubectl/cmd/create/create_quota.go b/pkg/kubectl/cmd/create/create_quota.go index 95676a18303..31c83c72f16 100644 --- a/pkg/kubectl/cmd/create/create_quota.go +++ b/pkg/kubectl/cmd/create/create_quota.go @@ -39,6 +39,7 @@ var ( kubectl create quota best-effort --hard=pods=100 --scopes=BestEffort`)) ) +// QuotaOpts holds the command-line options for 'create quota' sub command type QuotaOpts struct { CreateSubcommandOptions *CreateSubcommandOptions } @@ -72,6 +73,7 @@ func NewCmdCreateQuota(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) return cmd } +// Complete completes all the required options func (o *QuotaOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { name, err := NameFromCommandArgs(cmd, args) if err != nil { @@ -93,7 +95,7 @@ func (o *QuotaOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, args []strin return o.CreateSubcommandOptions.Complete(f, cmd, args, generator) } -// CreateQuota implements the behavior to run the create quota command +// Run calls the CreateSubcommandOptions.Run in QuotaOpts instance func (o *QuotaOpts) Run() error { return o.CreateSubcommandOptions.Run() } diff --git a/pkg/kubectl/cmd/create/create_role.go b/pkg/kubectl/cmd/create/create_role.go index 7124c509e80..bb183438d94 100644 --- a/pkg/kubectl/cmd/create/create_role.go +++ b/pkg/kubectl/cmd/create/create_role.go @@ -105,12 +105,14 @@ var ( } ) +// ResourceOptions holds the related options for '--resource' option type ResourceOptions struct { Group string Resource string SubResource string } +// CreateRoleOptions holds the options for 'create role' sub command type CreateRoleOptions struct { PrintFlags *genericclioptions.PrintFlags @@ -129,6 +131,7 @@ type CreateRoleOptions struct { genericclioptions.IOStreams } +// NewCreateRoleOptions returns an initialized CreateRoleOptions instance func NewCreateRoleOptions(ioStreams genericclioptions.IOStreams) *CreateRoleOptions { return &CreateRoleOptions{ PrintFlags: genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme), @@ -137,7 +140,7 @@ func NewCreateRoleOptions(ioStreams genericclioptions.IOStreams) *CreateRoleOpti } } -// Role is a command to ease creating Roles. +// NewCmdCreateRole returnns an initialized Command instance for 'create role' sub command func NewCmdCreateRole(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command { o := NewCreateRoleOptions(ioStreams) @@ -166,6 +169,7 @@ func NewCmdCreateRole(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) return cmd } +// Complete completes all the required options func (o *CreateRoleOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { name, err := NameFromCommandArgs(cmd, args) if err != nil { @@ -255,6 +259,7 @@ func (o *CreateRoleOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args return nil } +// Validate makes sure there is no discrepency in provided option values func (o *CreateRoleOptions) Validate() error { if o.Name == "" { return fmt.Errorf("name must be specified") @@ -317,6 +322,7 @@ func (o *CreateRoleOptions) validateResource() error { return nil } +// RunCreateRole performs the execution of 'create role' sub command func (o *CreateRoleOptions) RunCreateRole() error { role := &rbacv1.Role{ // this is ok because we know exactly how we want to be serialized diff --git a/pkg/kubectl/cmd/create/create_rolebinding.go b/pkg/kubectl/cmd/create/create_rolebinding.go index 4b8f2b28ed6..605909d14c6 100644 --- a/pkg/kubectl/cmd/create/create_rolebinding.go +++ b/pkg/kubectl/cmd/create/create_rolebinding.go @@ -36,13 +36,14 @@ var ( kubectl create rolebinding admin --clusterrole=admin --user=user1 --user=user2 --group=group1`)) ) +// RoleBindingOpts holds the options for 'create rolebinding' sub command type RoleBindingOpts struct { CreateSubcommandOptions *CreateSubcommandOptions } -// RoleBinding is a command to ease creating RoleBindings. +// NewCmdCreateRoleBinding returns an initialized Command instance for 'create rolebinding' sub command func NewCmdCreateRoleBinding(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command { - options := &RoleBindingOpts{ + o := &RoleBindingOpts{ CreateSubcommandOptions: NewCreateSubcommandOptions(ioStreams), } @@ -53,12 +54,12 @@ func NewCmdCreateRoleBinding(f cmdutil.Factory, ioStreams genericclioptions.IOSt Long: roleBindingLong, Example: roleBindingExample, Run: func(cmd *cobra.Command, args []string) { - cmdutil.CheckErr(options.Complete(f, cmd, args)) - cmdutil.CheckErr(options.Run()) + cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.Run()) }, } - options.CreateSubcommandOptions.PrintFlags.AddFlags(cmd) + o.CreateSubcommandOptions.PrintFlags.AddFlags(cmd) cmdutil.AddApplyAnnotationFlags(cmd) cmdutil.AddValidateFlags(cmd) @@ -71,6 +72,7 @@ func NewCmdCreateRoleBinding(f cmdutil.Factory, ioStreams genericclioptions.IOSt return cmd } +// Complete completes all the required options func (o *RoleBindingOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { name, err := NameFromCommandArgs(cmd, args) if err != nil { @@ -95,6 +97,7 @@ func (o *RoleBindingOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, args [ return o.CreateSubcommandOptions.Complete(f, cmd, args, generator) } +// Run calls the CreateSubcommandOptions.Run in RoleBindingOpts instance func (o *RoleBindingOpts) Run() error { return o.CreateSubcommandOptions.Run() } diff --git a/pkg/kubectl/cmd/create/create_secret.go b/pkg/kubectl/cmd/create/create_secret.go index 4512dffbda8..8ba703c2830 100644 --- a/pkg/kubectl/cmd/create/create_secret.go +++ b/pkg/kubectl/cmd/create/create_secret.go @@ -73,6 +73,7 @@ var ( kubectl create secret generic my-secret --from-env-file=path/to/bar.env`)) ) +// SecretGenericOpts holds the options for 'create secret' sub command type SecretGenericOpts struct { CreateSubcommandOptions *CreateSubcommandOptions } @@ -108,6 +109,7 @@ func NewCmdCreateSecretGeneric(f cmdutil.Factory, ioStreams genericclioptions.IO return cmd } +// Complete completes all the required options func (o *SecretGenericOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { name, err := NameFromCommandArgs(cmd, args) if err != nil { @@ -132,7 +134,7 @@ func (o *SecretGenericOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, args return o.CreateSubcommandOptions.Complete(f, cmd, args, generator) } -// CreateSecretGeneric is the implementation of the create secret generic command +// Run calls the CreateSubcommandOptions.Run in SecretGenericOpts instance func (o *SecretGenericOpts) Run() error { return o.CreateSubcommandOptions.Run() } @@ -158,6 +160,7 @@ var ( kubectl create secret docker-registry my-secret --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL`)) ) +// SecretDockerRegistryOpts holds the options for 'create secret docker-registry' sub command type SecretDockerRegistryOpts struct { CreateSubcommandOptions *CreateSubcommandOptions } @@ -197,6 +200,7 @@ func NewCmdCreateSecretDockerRegistry(f cmdutil.Factory, ioStreams genericcliopt return cmd } +// Complete completes all the required options func (o *SecretDockerRegistryOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { name, err := NameFromCommandArgs(cmd, args) if err != nil { @@ -232,7 +236,7 @@ func (o *SecretDockerRegistryOpts) Complete(f cmdutil.Factory, cmd *cobra.Comman return o.CreateSubcommandOptions.Complete(f, cmd, args, generator) } -// CreateSecretDockerRegistry is the implementation of the create secret docker-registry command +// Run calls CreateSubcommandOptions.Run in SecretDockerRegistryOpts instance func (o *SecretDockerRegistryOpts) Run() error { return o.CreateSubcommandOptions.Run() } @@ -249,6 +253,7 @@ var ( kubectl create secret tls tls-secret --cert=path/to/tls.cert --key=path/to/tls.key`)) ) +// SecretTLSOpts holds the options for 'create secret tls' sub command type SecretTLSOpts struct { CreateSubcommandOptions *CreateSubcommandOptions } @@ -282,6 +287,7 @@ func NewCmdCreateSecretTLS(f cmdutil.Factory, ioStreams genericclioptions.IOStre return cmd } +// Complete completes all the required options func (o *SecretTLSOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { name, err := NameFromCommandArgs(cmd, args) if err != nil { @@ -310,7 +316,7 @@ func (o *SecretTLSOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, args []s return o.CreateSubcommandOptions.Complete(f, cmd, args, generator) } -// CreateSecretTLS is the implementation of the create secret tls command +// Run calls CreateSubcommandOptions.Run in the SecretTLSOpts instance func (o *SecretTLSOpts) Run() error { return o.CreateSubcommandOptions.Run() } diff --git a/pkg/kubectl/cmd/create/create_service.go b/pkg/kubectl/cmd/create/create_service.go index ee34f1814f7..f36fca9b482 100644 --- a/pkg/kubectl/cmd/create/create_service.go +++ b/pkg/kubectl/cmd/create/create_service.go @@ -61,6 +61,7 @@ func addPortFlags(cmd *cobra.Command) { cmd.Flags().StringSlice("tcp", []string{}, "Port pairs can be specified as ':'.") } +// ServiceClusterIPOpts holds the options for 'create service clusterip' sub command type ServiceClusterIPOpts struct { CreateSubcommandOptions *CreateSubcommandOptions } @@ -97,6 +98,7 @@ func errUnsupportedGenerator(cmd *cobra.Command, generatorName string) error { return cmdutil.UsageErrorf(cmd, "Generator %s not supported. ", generatorName) } +// Complete completes all the required options func (o *ServiceClusterIPOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { name, err := NameFromCommandArgs(cmd, args) if err != nil { @@ -119,7 +121,7 @@ func (o *ServiceClusterIPOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, a return o.CreateSubcommandOptions.Complete(f, cmd, args, generator) } -// CreateServiceClusterIP is the implementation of the create service clusterip command +// Run calls the CreateSubcommandOptions.Run in ServiceClusterIPOpts instance func (o *ServiceClusterIPOpts) Run() error { return o.CreateSubcommandOptions.Run() } @@ -133,6 +135,7 @@ var ( kubectl create service nodeport my-ns --tcp=5678:8080`)) ) +// ServiceNodePortOpts holds the options for 'create service nodeport' sub command type ServiceNodePortOpts struct { CreateSubcommandOptions *CreateSubcommandOptions } @@ -165,6 +168,7 @@ func NewCmdCreateServiceNodePort(f cmdutil.Factory, ioStreams genericclioptions. return cmd } +// Complete completes all the required options func (o *ServiceNodePortOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { name, err := NameFromCommandArgs(cmd, args) if err != nil { @@ -188,7 +192,7 @@ func (o *ServiceNodePortOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, ar return o.CreateSubcommandOptions.Complete(f, cmd, args, generator) } -// CreateServiceNodePort is the implementation of the create service nodeport command +// Run calls the CreateSubcommandOptions.Run in ServiceNodePortOpts instance func (o *ServiceNodePortOpts) Run() error { return o.CreateSubcommandOptions.Run() } @@ -202,6 +206,7 @@ var ( kubectl create service loadbalancer my-lbs --tcp=5678:8080`)) ) +// ServiceLoadBalancerOpts holds the options for 'create service loadbalancer' sub command type ServiceLoadBalancerOpts struct { CreateSubcommandOptions *CreateSubcommandOptions } @@ -233,6 +238,7 @@ func NewCmdCreateServiceLoadBalancer(f cmdutil.Factory, ioStreams genericcliopti return cmd } +// Complete completes all the required options func (o *ServiceLoadBalancerOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { name, err := NameFromCommandArgs(cmd, args) if err != nil { @@ -255,7 +261,7 @@ func (o *ServiceLoadBalancerOpts) Complete(f cmdutil.Factory, cmd *cobra.Command return o.CreateSubcommandOptions.Complete(f, cmd, args, generator) } -// CreateServiceLoadBalancer is the implementation of the create service loadbalancer command +// Run calls the CreateSubcommandOptions.Run in ServiceLoadBalancerOpts instance func (o *ServiceLoadBalancerOpts) Run() error { return o.CreateSubcommandOptions.Run() } @@ -273,6 +279,7 @@ var ( kubectl create service externalname my-ns --external-name bar.com`)) ) +// ServiceExternalNameOpts holds the options for 'create service externalname' sub command type ServiceExternalNameOpts struct { CreateSubcommandOptions *CreateSubcommandOptions } @@ -306,6 +313,7 @@ func NewCmdCreateServiceExternalName(f cmdutil.Factory, ioStreams genericcliopti return cmd } +// Complete completes all the required options func (o *ServiceExternalNameOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { name, err := NameFromCommandArgs(cmd, args) if err != nil { @@ -328,7 +336,7 @@ func (o *ServiceExternalNameOpts) Complete(f cmdutil.Factory, cmd *cobra.Command return o.CreateSubcommandOptions.Complete(f, cmd, args, generator) } -// CreateExternalNameService is the implementation of the create service externalname command +// Run calls the CreateSubcommandOptions.Run in ServiceExternalNameOpts instance func (o *ServiceExternalNameOpts) Run() error { return o.CreateSubcommandOptions.Run() } diff --git a/pkg/kubectl/cmd/create/create_serviceaccount.go b/pkg/kubectl/cmd/create/create_serviceaccount.go index 69090356106..bbf9eb98fb7 100644 --- a/pkg/kubectl/cmd/create/create_serviceaccount.go +++ b/pkg/kubectl/cmd/create/create_serviceaccount.go @@ -36,6 +36,7 @@ var ( kubectl create serviceaccount my-service-account`)) ) +// ServiceAccountOpts holds the options for 'create serviceaccount' sub command type ServiceAccountOpts struct { CreateSubcommandOptions *CreateSubcommandOptions } @@ -67,6 +68,7 @@ func NewCmdCreateServiceAccount(f cmdutil.Factory, ioStreams genericclioptions.I return cmd } +// Complete completes all the required options func (o *ServiceAccountOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { name, err := NameFromCommandArgs(cmd, args) if err != nil { @@ -84,7 +86,7 @@ func (o *ServiceAccountOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, arg return o.CreateSubcommandOptions.Complete(f, cmd, args, generator) } -// CreateServiceAccount implements the behavior to run the create service account command +// Run calls the CreateSubcommandOptions.Run in ServiceAccountOpts instance func (o *ServiceAccountOpts) Run() error { return o.CreateSubcommandOptions.Run() } diff --git a/pkg/kubectl/cmd/delete/delete.go b/pkg/kubectl/cmd/delete/delete.go index 84b6db50bfe..dcb858f8105 100644 --- a/pkg/kubectl/cmd/delete/delete.go +++ b/pkg/kubectl/cmd/delete/delete.go @@ -33,13 +33,13 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions/resource" "k8s.io/client-go/dynamic" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - kubectlwait "k8s.io/kubernetes/pkg/kubectl/cmd/wait" + cmdwait "k8s.io/kubernetes/pkg/kubectl/cmd/wait" "k8s.io/kubernetes/pkg/kubectl/util/i18n" "k8s.io/kubernetes/pkg/kubectl/util/templates" ) var ( - delete_long = templates.LongDesc(i18n.T(` + deleteLong = templates.LongDesc(i18n.T(` Delete resources by filenames, stdin, resources and names, or by resources and label selector. JSON and YAML formats are accepted. Only one type of the arguments may be specified: filenames, @@ -67,7 +67,7 @@ var ( update to a resource right when you submit a delete, their update will be lost along with the rest of the resource.`)) - delete_example = templates.Examples(i18n.T(` + deleteExample = templates.Examples(i18n.T(` # Delete a pod using the type and name specified in pod.json. kubectl delete -f ./pod.json @@ -121,8 +121,8 @@ func NewCmdDelete(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra Use: "delete ([-f FILENAME] | TYPE [(NAME | -l label | --all)])", DisableFlagsInUseLine: true, Short: i18n.T("Delete resources by filenames, stdin, resources and names, or by resources and label selector"), - Long: delete_long, - Example: delete_example, + Long: deleteLong, + Example: deleteExample, Run: func(cmd *cobra.Command, args []string) { o := deleteFlags.ToOptions(nil, streams) cmdutil.CheckErr(o.Complete(f, args, cmd)) @@ -196,7 +196,7 @@ func (o *DeleteOptions) Complete(f cmdutil.Factory, args []string, cmd *cobra.Co func (o *DeleteOptions) Validate() error { if o.Output != "" && o.Output != "name" { - return fmt.Errorf("unexpected -o output mode: %v. We only support '-o name'.", o.Output) + return fmt.Errorf("unexpected -o output mode: %v. We only support '-o name'", o.Output) } if o.DeleteAll && len(o.LabelSelector) > 0 { @@ -225,7 +225,7 @@ func (o *DeleteOptions) DeleteResult(r *resource.Result) error { r = r.IgnoreErrors(errors.IsNotFound) } deletedInfos := []*resource.Info{} - uidMap := kubectlwait.UIDMap{} + uidMap := cmdwait.UIDMap{} err := r.Visit(func(info *resource.Info, err error) error { if err != nil { return err @@ -247,7 +247,7 @@ func (o *DeleteOptions) DeleteResult(r *resource.Result) error { if err != nil { return err } - resourceLocation := kubectlwait.ResourceLocation{ + resourceLocation := cmdwait.ResourceLocation{ GroupResource: info.Mapping.Resource.GroupResource(), Namespace: info.Namespace, Name: info.Name, @@ -287,14 +287,14 @@ func (o *DeleteOptions) DeleteResult(r *resource.Result) error { // if we requested to wait forever, set it to a week. effectiveTimeout = 168 * time.Hour } - waitOptions := kubectlwait.WaitOptions{ + waitOptions := cmdwait.WaitOptions{ ResourceFinder: genericclioptions.ResourceFinderForResult(resource.InfoListVisitor(deletedInfos)), UIDMap: uidMap, DynamicClient: o.DynamicClient, Timeout: effectiveTimeout, Printer: printers.NewDiscardingPrinter(), - ConditionFn: kubectlwait.IsDeleted, + ConditionFn: cmdwait.IsDeleted, IOStreams: o.IOStreams, } err = waitOptions.RunWait() @@ -317,7 +317,7 @@ func (o *DeleteOptions) deleteResource(info *resource.Info, deleteOptions *metav return deleteResponse, nil } -// deletion printing is special because we do not have an object to print. +// PrintObj for deleted objects is special because we do not have an object to print. // This mirrors name printer behavior func (o *DeleteOptions) PrintObj(info *resource.Info) { operation := "deleted" diff --git a/pkg/kubectl/cmd/delete/delete_flags.go b/pkg/kubectl/cmd/delete/delete_flags.go index f6dcc74d95f..22751c697d0 100644 --- a/pkg/kubectl/cmd/delete/delete_flags.go +++ b/pkg/kubectl/cmd/delete/delete_flags.go @@ -25,7 +25,7 @@ import ( "k8s.io/client-go/dynamic" ) -// PrintFlags composes common printer flag structs +// DeleteFlags composes common printer flag structs // used for commands requiring deletion logic. type DeleteFlags struct { FileNameFlags *genericclioptions.FileNameFlags diff --git a/pkg/kubectl/cmd/describe/describe.go b/pkg/kubectl/cmd/describe/describe.go index c1c84cd1717..d7ec6bca7ab 100644 --- a/pkg/kubectl/cmd/describe/describe.go +++ b/pkg/kubectl/cmd/describe/describe.go @@ -105,7 +105,7 @@ func NewCmdDescribe(parent string, f cmdutil.Factory, streams genericclioptions. Use: "describe (-f FILENAME | TYPE [NAME_PREFIX | -l label] | TYPE/NAME)", DisableFlagsInUseLine: true, Short: i18n.T("Show details of a specific resource or group of resources"), - Long: describeLong + "\n\n" + cmdutil.SuggestApiResources(parent), + Long: describeLong + "\n\n" + cmdutil.SuggestAPIResources(parent), Example: describeExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd, args)) @@ -133,7 +133,7 @@ func (o *DescribeOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args [ } if len(args) == 0 && cmdutil.IsFilenameSliceEmpty(o.FilenameOptions.Filenames) { - return fmt.Errorf("You must specify the type of resource to describe. %s\n", cmdutil.SuggestApiResources(o.CmdParent)) + return fmt.Errorf("You must specify the type of resource to describe. %s\n", cmdutil.SuggestAPIResources(o.CmdParent)) } o.BuilderArgs = args diff --git a/pkg/kubectl/cmd/diff/BUILD b/pkg/kubectl/cmd/diff/BUILD index 4a99f161251..93e0fb64ebd 100644 --- a/pkg/kubectl/cmd/diff/BUILD +++ b/pkg/kubectl/cmd/diff/BUILD @@ -14,6 +14,7 @@ go_library( "//pkg/kubectl/util/i18n:go_default_library", "//pkg/kubectl/util/templates:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", @@ -21,6 +22,7 @@ go_library( "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", "//vendor/github.com/jonboulle/clockwork:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", "//vendor/sigs.k8s.io/yaml:go_default_library", ], diff --git a/pkg/kubectl/cmd/diff/diff.go b/pkg/kubectl/cmd/diff/diff.go index 9c52dbfb6b4..1956cf1bb0f 100644 --- a/pkg/kubectl/cmd/diff/diff.go +++ b/pkg/kubectl/cmd/diff/diff.go @@ -26,11 +26,13 @@ import ( "github.com/jonboulle/clockwork" "github.com/spf13/cobra" "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/genericclioptions/resource" + "k8s.io/klog" "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl/cmd/apply" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" @@ -60,6 +62,9 @@ var ( cat service.yaml | kubectl diff -f -`)) ) +// Number of times we try to diff before giving-up +const maxRetries = 4 + type DiffOptions struct { FilenameOptions resource.FilenameOptions } @@ -228,6 +233,7 @@ type InfoObject struct { Info *resource.Info Encoder runtime.Encoder OpenAPI openapi.Resources + Force bool } var _ Object = &InfoObject{} @@ -251,6 +257,16 @@ func (obj InfoObject) Merged() (runtime.Object, error) { ) } + var resourceVersion *string + if !obj.Force { + accessor, err := meta.Accessor(obj.Info.Object) + if err != nil { + return nil, err + } + str := accessor.GetResourceVersion() + resourceVersion = &str + } + modified, err := kubectl.GetModifiedConfiguration(obj.LocalObj, false, unstructured.UnstructuredJSONScheme) if err != nil { return nil, err @@ -259,12 +275,13 @@ func (obj InfoObject) Merged() (runtime.Object, error) { // This is using the patcher from apply, to keep the same behavior. // We plan on replacing this with server-side apply when it becomes available. patcher := &apply.Patcher{ - Mapping: obj.Info.Mapping, - Helper: resource.NewHelper(obj.Info.Client, obj.Info.Mapping), - Overwrite: true, - BackOff: clockwork.NewRealClock(), - ServerDryRun: true, - OpenapiSchema: obj.OpenAPI, + Mapping: obj.Info.Mapping, + Helper: resource.NewHelper(obj.Info.Client, obj.Info.Mapping), + Overwrite: true, + BackOff: clockwork.NewRealClock(), + ServerDryRun: true, + OpenapiSchema: obj.OpenAPI, + ResourceVersion: resourceVersion, } _, result, err := patcher.Patch(obj.Info.Object, modified, obj.Info.Source, obj.Info.Namespace, obj.Info.Name, nil) @@ -272,7 +289,17 @@ func (obj InfoObject) Merged() (runtime.Object, error) { } func (obj InfoObject) Name() string { - return obj.Info.Name + group := "" + if obj.Info.Mapping.GroupVersionKind.Group != "" { + group = fmt.Sprintf("%v.", obj.Info.Mapping.GroupVersionKind.Group) + } + return group + fmt.Sprintf( + "%v.%v.%v.%v", + obj.Info.Mapping.GroupVersionKind.Version, + obj.Info.Mapping.GroupVersionKind.Kind, + obj.Info.Namespace, + obj.Info.Name, + ) } // Differ creates two DiffVersion and diffs them. @@ -319,6 +346,10 @@ func (d *Differ) TearDown() { d.To.Dir.Delete() // Ignore error } +func isConflict(err error) bool { + return err != nil && errors.IsConflict(err) +} + // RunDiff uses the factory to parse file arguments, find the version to // diff, and find each Info object for each files, and runs against the // differ. @@ -376,21 +407,36 @@ func RunDiff(f cmdutil.Factory, diff *DiffProgram, options *DiffOptions) error { } local := info.Object.DeepCopyObject() - if err := info.Get(); err != nil { - if !errors.IsNotFound(err) { - return err + for i := 1; i <= maxRetries; i++ { + if err = info.Get(); err != nil { + if !errors.IsNotFound(err) { + return err + } + info.Object = nil } - info.Object = nil - } - obj := InfoObject{ - LocalObj: local, - Info: info, - Encoder: scheme.DefaultJSONEncoder(), - OpenAPI: schema, - } + force := i == maxRetries + if force { + klog.Warningf( + "Object (%v: %v) keeps changing, diffing without lock", + info.Object.GetObjectKind().GroupVersionKind(), + info.Name, + ) + } + obj := InfoObject{ + LocalObj: local, + Info: info, + Encoder: scheme.DefaultJSONEncoder(), + OpenAPI: schema, + Force: force, + } - return differ.Diff(obj, printer) + err = differ.Diff(obj, printer) + if !isConflict(err) { + break + } + } + return err }) if err != nil { return err diff --git a/pkg/kubectl/cmd/drain/drain.go b/pkg/kubectl/cmd/drain/drain.go index d24ab8ac6bb..4f4d2d88b00 100644 --- a/pkg/kubectl/cmd/drain/drain.go +++ b/pkg/kubectl/cmd/drain/drain.go @@ -84,79 +84,72 @@ const ( EvictionKind = "Eviction" EvictionSubresource = "pods/eviction" - kDaemonsetFatal = "DaemonSet-managed pods (use --ignore-daemonsets to ignore)" - kDaemonsetWarning = "Ignoring DaemonSet-managed pods" - kLocalStorageFatal = "pods with local storage (use --delete-local-data to override)" - kLocalStorageWarning = "Deleting pods with local storage" - kUnmanagedFatal = "pods not managed by ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet (use --force to override)" - kUnmanagedWarning = "Deleting pods not managed by ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet" + daemonsetFatal = "DaemonSet-managed Pods (use --ignore-daemonsets to ignore)" + daemonsetWarning = "ignoring DaemonSet-managed Pods" + localStorageFatal = "Pods with local storage (use --delete-local-data to override)" + localStorageWarning = "deleting Pods with local storage" + unmanagedFatal = "Pods not managed by ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet (use --force to override)" + unmanagedWarning = "deleting Pods not managed by ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet" ) var ( - cordon_long = templates.LongDesc(i18n.T(` + cordonLong = templates.LongDesc(i18n.T(` Mark node as unschedulable.`)) - cordon_example = templates.Examples(i18n.T(` + cordonExample = templates.Examples(i18n.T(` # Mark node "foo" as unschedulable. kubectl cordon foo`)) ) func NewCmdCordon(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command { - options := &DrainOptions{ - PrintFlags: genericclioptions.NewPrintFlags("cordoned").WithTypeSetter(scheme.Scheme), - - IOStreams: ioStreams, - } + o := NewDrainOptions(f, ioStreams) cmd := &cobra.Command{ Use: "cordon NODE", DisableFlagsInUseLine: true, Short: i18n.T("Mark node as unschedulable"), - Long: cordon_long, - Example: cordon_example, + Long: cordonLong, + Example: cordonExample, Run: func(cmd *cobra.Command, args []string) { - cmdutil.CheckErr(options.Complete(f, cmd, args)) - cmdutil.CheckErr(options.RunCordonOrUncordon(true)) + cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.RunCordonOrUncordon(true)) }, } - cmd.Flags().StringVarP(&options.Selector, "selector", "l", options.Selector, "Selector (label query) to filter on") + cmd.Flags().StringVarP(&o.Selector, "selector", "l", o.Selector, "Selector (label query) to filter on") cmdutil.AddDryRunFlag(cmd) return cmd } var ( - uncordon_long = templates.LongDesc(i18n.T(` + uncordonLong = templates.LongDesc(i18n.T(` Mark node as schedulable.`)) - uncordon_example = templates.Examples(i18n.T(` + uncordonExample = templates.Examples(i18n.T(` # Mark node "foo" as schedulable. $ kubectl uncordon foo`)) ) func NewCmdUncordon(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command { - options := &DrainOptions{ - PrintFlags: genericclioptions.NewPrintFlags("uncordoned").WithTypeSetter(scheme.Scheme), - IOStreams: ioStreams, - } + o := NewDrainOptions(f, ioStreams) cmd := &cobra.Command{ Use: "uncordon NODE", DisableFlagsInUseLine: true, Short: i18n.T("Mark node as schedulable"), - Long: uncordon_long, - Example: uncordon_example, + Long: uncordonLong, + Example: uncordonExample, Run: func(cmd *cobra.Command, args []string) { - cmdutil.CheckErr(options.Complete(f, cmd, args)) - cmdutil.CheckErr(options.RunCordonOrUncordon(false)) + cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.RunCordonOrUncordon(false)) }, } - cmd.Flags().StringVarP(&options.Selector, "selector", "l", options.Selector, "Selector (label query) to filter on") + cmd.Flags().StringVarP(&o.Selector, "selector", "l", o.Selector, "Selector (label query) to filter on") cmdutil.AddDryRunFlag(cmd) return cmd } var ( - drain_long = templates.LongDesc(i18n.T(` + drainLong = templates.LongDesc(i18n.T(` Drain node in preparation for maintenance. The given node will be marked unschedulable to prevent new pods from arriving. @@ -181,7 +174,7 @@ var ( ![Workflow](http://kubernetes.io/images/docs/kubectl_drain.svg)`)) - drain_example = templates.Examples(i18n.T(` + drainExample = templates.Examples(i18n.T(` # Drain node "foo", even if there are pods not managed by a ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet on it. $ kubectl drain foo --force @@ -199,26 +192,26 @@ func NewDrainOptions(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) * } func NewCmdDrain(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command { - options := NewDrainOptions(f, ioStreams) + o := NewDrainOptions(f, ioStreams) cmd := &cobra.Command{ Use: "drain NODE", DisableFlagsInUseLine: true, Short: i18n.T("Drain node in preparation for maintenance"), - Long: drain_long, - Example: drain_example, + Long: drainLong, + Example: drainExample, Run: func(cmd *cobra.Command, args []string) { - cmdutil.CheckErr(options.Complete(f, cmd, args)) - cmdutil.CheckErr(options.RunDrain()) + cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.RunDrain()) }, } - cmd.Flags().BoolVar(&options.Force, "force", options.Force, "Continue even if there are pods not managed by a ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet.") - cmd.Flags().BoolVar(&options.IgnoreDaemonsets, "ignore-daemonsets", options.IgnoreDaemonsets, "Ignore DaemonSet-managed pods.") - cmd.Flags().BoolVar(&options.DeleteLocalData, "delete-local-data", options.DeleteLocalData, "Continue even if there are pods using emptyDir (local data that will be deleted when the node is drained).") - cmd.Flags().IntVar(&options.GracePeriodSeconds, "grace-period", options.GracePeriodSeconds, "Period of time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used.") - cmd.Flags().DurationVar(&options.Timeout, "timeout", options.Timeout, "The length of time to wait before giving up, zero means infinite") - cmd.Flags().StringVarP(&options.Selector, "selector", "l", options.Selector, "Selector (label query) to filter on") - cmd.Flags().StringVarP(&options.PodSelector, "pod-selector", "", options.PodSelector, "Label selector to filter pods on the node") + cmd.Flags().BoolVar(&o.Force, "force", o.Force, "Continue even if there are pods not managed by a ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet.") + cmd.Flags().BoolVar(&o.IgnoreDaemonsets, "ignore-daemonsets", o.IgnoreDaemonsets, "Ignore DaemonSet-managed pods.") + cmd.Flags().BoolVar(&o.DeleteLocalData, "delete-local-data", o.DeleteLocalData, "Continue even if there are pods using emptyDir (local data that will be deleted when the node is drained).") + cmd.Flags().IntVar(&o.GracePeriodSeconds, "grace-period", o.GracePeriodSeconds, "Period of time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used.") + cmd.Flags().DurationVar(&o.Timeout, "timeout", o.Timeout, "The length of time to wait before giving up, zero means infinite") + cmd.Flags().StringVarP(&o.Selector, "selector", "l", o.Selector, "Selector (label query) to filter on") + cmd.Flags().StringVarP(&o.PodSelector, "pod-selector", "", o.PodSelector, "Label selector to filter pods on the node") cmdutil.AddDryRunFlag(cmd) return cmd @@ -386,10 +379,10 @@ func (o *DrainOptions) unreplicatedFilter(pod corev1.Pod) (bool, *warning, *fata return true, nil, nil } if o.Force { - return true, &warning{kUnmanagedWarning}, nil + return true, &warning{unmanagedWarning}, nil } - return false, nil, &fatal{kUnmanagedFatal} + return false, nil, &fatal{unmanagedFatal} } func (o *DrainOptions) daemonsetFilter(pod corev1.Pod) (bool, *warning, *fatal) { @@ -418,10 +411,10 @@ func (o *DrainOptions) daemonsetFilter(pod corev1.Pod) (bool, *warning, *fatal) } if !o.IgnoreDaemonsets { - return false, nil, &fatal{kDaemonsetFatal} + return false, nil, &fatal{daemonsetFatal} } - return false, &warning{kDaemonsetWarning}, nil + return false, &warning{daemonsetWarning}, nil } func mirrorPodFilter(pod corev1.Pod) (bool, *warning, *fatal) { @@ -450,10 +443,10 @@ func (o *DrainOptions) localStorageFilter(pod corev1.Pod) (bool, *warning, *fata return true, nil, nil } if !o.DeleteLocalData { - return false, nil, &fatal{kLocalStorageFatal} + return false, nil, &fatal{localStorageFatal} } - return true, &warning{kLocalStorageWarning}, nil + return true, &warning{localStorageWarning}, nil } // Map of status message to a list of pod names having that status. diff --git a/pkg/kubectl/cmd/drain/drain_test.go b/pkg/kubectl/cmd/drain/drain_test.go index 0d4d2bbf1a5..d0a3a4e17e8 100644 --- a/pkg/kubectl/cmd/drain/drain_test.go +++ b/pkg/kubectl/cmd/drain/drain_test.go @@ -57,7 +57,7 @@ const ( ) var node *corev1.Node -var cordoned_node *corev1.Node +var cordonedNode *corev1.Node func boolptr(b bool) *bool { return &b } @@ -72,8 +72,8 @@ func TestMain(m *testing.M) { } // A copy of the same node, but cordoned. - cordoned_node = node.DeepCopy() - cordoned_node.Spec.Unschedulable = true + cordonedNode = node.DeepCopy() + cordonedNode.Spec.Unschedulable = true os.Exit(m.Run()) } @@ -88,7 +88,7 @@ func TestCordon(t *testing.T) { }{ { description: "node/node syntax", - node: cordoned_node, + node: cordonedNode, expected: node, cmd: NewCmdUncordon, arg: "node/node", @@ -96,7 +96,7 @@ func TestCordon(t *testing.T) { }, { description: "uncordon for real", - node: cordoned_node, + node: cordonedNode, expected: node, cmd: NewCmdUncordon, arg: "node", @@ -112,8 +112,8 @@ func TestCordon(t *testing.T) { }, { description: "cordon does nothing", - node: cordoned_node, - expected: cordoned_node, + node: cordonedNode, + expected: cordonedNode, cmd: NewCmdCordon, arg: "node", expectFatal: false, @@ -121,7 +121,7 @@ func TestCordon(t *testing.T) { { description: "cordon for real", node: node, - expected: cordoned_node, + expected: cordonedNode, cmd: NewCmdCordon, arg: "node", expectFatal: false, @@ -145,14 +145,14 @@ func TestCordon(t *testing.T) { { description: "cordon for multiple nodes", node: node, - expected: cordoned_node, + expected: cordonedNode, cmd: NewCmdCordon, arg: "node node1 node2", expectFatal: false, }, { description: "uncordon for multiple nodes", - node: cordoned_node, + node: cordonedNode, expected: node, cmd: NewCmdUncordon, arg: "node node1 node2", @@ -168,7 +168,7 @@ func TestCordon(t *testing.T) { codec := scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) ns := scheme.Codecs - new_node := &corev1.Node{} + newNode := &corev1.Node{} updated := false tf.Client = &fake.RESTClient{ GroupVersion: schema.GroupVersion{Group: "", Version: "v1"}, @@ -202,14 +202,14 @@ func TestCordon(t *testing.T) { if err != nil { t.Fatalf("%s: unexpected error: %v", test.description, err) } - if err := runtime.DecodeInto(codec, appliedPatch, new_node); err != nil { + if err := runtime.DecodeInto(codec, appliedPatch, newNode); err != nil { t.Fatalf("%s: unexpected error: %v", test.description, err) } - if !reflect.DeepEqual(test.expected.Spec, new_node.Spec) { - t.Fatalf("%s: expected:\n%v\nsaw:\n%v\n", test.description, test.expected.Spec.Unschedulable, new_node.Spec.Unschedulable) + if !reflect.DeepEqual(test.expected.Spec, newNode.Spec) { + t.Fatalf("%s: expected:\n%v\nsaw:\n%v\n", test.description, test.expected.Spec.Unschedulable, newNode.Spec.Unschedulable) } updated = true - return &http.Response{StatusCode: 200, Header: cmdtesting.DefaultHeader(), Body: cmdtesting.ObjBody(codec, new_node)}, nil + return &http.Response{StatusCode: 200, Header: cmdtesting.DefaultHeader(), Body: cmdtesting.ObjBody(codec, newNode)}, nil default: t.Fatalf("%s: unexpected request: %v %#v\n%#v", test.description, req.Method, req.URL, req) return nil, nil @@ -221,7 +221,7 @@ func TestCordon(t *testing.T) { ioStreams, _, _, _ := genericclioptions.NewTestIOStreams() cmd := test.cmd(tf, ioStreams) - saw_fatal := false + sawFatal := false func() { defer func() { // Recover from the panic below. @@ -230,7 +230,7 @@ func TestCordon(t *testing.T) { cmdutil.DefaultBehaviorOnFatal() }() cmdutil.BehaviorOnFatal(func(e string, code int) { - saw_fatal = true + sawFatal = true panic(e) }) cmd.SetArgs(strings.Split(test.arg, " ")) @@ -238,7 +238,7 @@ func TestCordon(t *testing.T) { }() if test.expectFatal { - if !saw_fatal { + if !sawFatal { t.Fatalf("%s: unexpected non-error", test.description) } if updated { @@ -246,7 +246,7 @@ func TestCordon(t *testing.T) { } } - if !test.expectFatal && saw_fatal { + if !test.expectFatal && sawFatal { t.Fatalf("%s: unexpected error", test.description) } if !reflect.DeepEqual(test.expected.Spec, test.node.Spec) && !updated { @@ -272,7 +272,7 @@ func TestDrain(t *testing.T) { }, } - rc_pod := corev1.Pod{ + rcPod := corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "bar", Namespace: "default", @@ -305,7 +305,7 @@ func TestDrain(t *testing.T) { }, } - ds_pod := corev1.Pod{ + dsPod := corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "bar", Namespace: "default", @@ -326,7 +326,7 @@ func TestDrain(t *testing.T) { }, } - ds_terminated_pod := corev1.Pod{ + dsTerminatedPod := corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "bar", Namespace: "default", @@ -350,7 +350,7 @@ func TestDrain(t *testing.T) { }, } - ds_pod_with_emptyDir := corev1.Pod{ + dsPodWithEmptyDir := corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "bar", Namespace: "default", @@ -377,7 +377,7 @@ func TestDrain(t *testing.T) { }, } - orphaned_ds_pod := corev1.Pod{ + orphanedDsPod := corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "bar", Namespace: "default", @@ -400,7 +400,7 @@ func TestDrain(t *testing.T) { }, } - job_pod := corev1.Pod{ + jobPod := corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "bar", Namespace: "default", @@ -427,7 +427,7 @@ func TestDrain(t *testing.T) { }, } - terminated_job_pod_with_local_storage := corev1.Pod{ + terminatedJobPodWithLocalStorage := corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "bar", Namespace: "default", @@ -469,7 +469,7 @@ func TestDrain(t *testing.T) { }, } - rs_pod := corev1.Pod{ + rsPod := corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "bar", Namespace: "default", @@ -490,7 +490,7 @@ func TestDrain(t *testing.T) { }, } - naked_pod := corev1.Pod{ + nakedPod := corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "bar", Namespace: "default", @@ -502,7 +502,7 @@ func TestDrain(t *testing.T) { }, } - emptydir_pod := corev1.Pod{ + emptydirPod := corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "bar", Namespace: "default", @@ -519,7 +519,7 @@ func TestDrain(t *testing.T) { }, }, } - emptydir_terminated_pod := corev1.Pod{ + emptydirTerminatedPod := corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "bar", Namespace: "default", @@ -555,8 +555,8 @@ func TestDrain(t *testing.T) { { description: "RC-managed pod", node: node, - expected: cordoned_node, - pods: []corev1.Pod{rc_pod}, + expected: cordonedNode, + pods: []corev1.Pod{rcPod}, rcs: []corev1.ReplicationController{rc}, args: []string{"node"}, expectFatal: false, @@ -565,8 +565,8 @@ func TestDrain(t *testing.T) { { description: "DS-managed pod", node: node, - expected: cordoned_node, - pods: []corev1.Pod{ds_pod}, + expected: cordonedNode, + pods: []corev1.Pod{dsPod}, rcs: []corev1.ReplicationController{rc}, args: []string{"node"}, expectFatal: true, @@ -575,8 +575,8 @@ func TestDrain(t *testing.T) { { description: "DS-managed terminated pod", node: node, - expected: cordoned_node, - pods: []corev1.Pod{ds_terminated_pod}, + expected: cordonedNode, + pods: []corev1.Pod{dsTerminatedPod}, rcs: []corev1.ReplicationController{rc}, args: []string{"node"}, expectFatal: false, @@ -585,8 +585,8 @@ func TestDrain(t *testing.T) { { description: "orphaned DS-managed pod", node: node, - expected: cordoned_node, - pods: []corev1.Pod{orphaned_ds_pod}, + expected: cordonedNode, + pods: []corev1.Pod{orphanedDsPod}, rcs: []corev1.ReplicationController{}, args: []string{"node"}, expectFatal: true, @@ -595,8 +595,8 @@ func TestDrain(t *testing.T) { { description: "orphaned DS-managed pod with --force", node: node, - expected: cordoned_node, - pods: []corev1.Pod{orphaned_ds_pod}, + expected: cordonedNode, + pods: []corev1.Pod{orphanedDsPod}, rcs: []corev1.ReplicationController{}, args: []string{"node", "--force"}, expectFatal: false, @@ -605,8 +605,8 @@ func TestDrain(t *testing.T) { { description: "DS-managed pod with --ignore-daemonsets", node: node, - expected: cordoned_node, - pods: []corev1.Pod{ds_pod}, + expected: cordonedNode, + pods: []corev1.Pod{dsPod}, rcs: []corev1.ReplicationController{rc}, args: []string{"node", "--ignore-daemonsets"}, expectFatal: false, @@ -615,19 +615,19 @@ func TestDrain(t *testing.T) { { description: "DS-managed pod with emptyDir with --ignore-daemonsets", node: node, - expected: cordoned_node, - pods: []corev1.Pod{ds_pod_with_emptyDir}, + expected: cordonedNode, + pods: []corev1.Pod{dsPodWithEmptyDir}, rcs: []corev1.ReplicationController{rc}, args: []string{"node", "--ignore-daemonsets"}, - expectWarning: "WARNING: Ignoring DaemonSet-managed pods: bar", + expectWarning: "WARNING: ignoring DaemonSet-managed Pods: bar", expectFatal: false, expectDelete: false, }, { description: "Job-managed pod with local storage", node: node, - expected: cordoned_node, - pods: []corev1.Pod{job_pod}, + expected: cordonedNode, + pods: []corev1.Pod{jobPod}, rcs: []corev1.ReplicationController{rc}, args: []string{"node", "--force", "--delete-local-data=true"}, expectFatal: false, @@ -636,8 +636,8 @@ func TestDrain(t *testing.T) { { description: "Job-managed terminated pod", node: node, - expected: cordoned_node, - pods: []corev1.Pod{terminated_job_pod_with_local_storage}, + expected: cordonedNode, + pods: []corev1.Pod{terminatedJobPodWithLocalStorage}, rcs: []corev1.ReplicationController{rc}, args: []string{"node"}, expectFatal: false, @@ -646,8 +646,8 @@ func TestDrain(t *testing.T) { { description: "RS-managed pod", node: node, - expected: cordoned_node, - pods: []corev1.Pod{rs_pod}, + expected: cordonedNode, + pods: []corev1.Pod{rsPod}, replicaSets: []extensionsv1beta1.ReplicaSet{rs}, args: []string{"node"}, expectFatal: false, @@ -656,8 +656,8 @@ func TestDrain(t *testing.T) { { description: "naked pod", node: node, - expected: cordoned_node, - pods: []corev1.Pod{naked_pod}, + expected: cordonedNode, + pods: []corev1.Pod{nakedPod}, rcs: []corev1.ReplicationController{}, args: []string{"node"}, expectFatal: true, @@ -666,8 +666,8 @@ func TestDrain(t *testing.T) { { description: "naked pod with --force", node: node, - expected: cordoned_node, - pods: []corev1.Pod{naked_pod}, + expected: cordonedNode, + pods: []corev1.Pod{nakedPod}, rcs: []corev1.ReplicationController{}, args: []string{"node", "--force"}, expectFatal: false, @@ -676,8 +676,8 @@ func TestDrain(t *testing.T) { { description: "pod with EmptyDir", node: node, - expected: cordoned_node, - pods: []corev1.Pod{emptydir_pod}, + expected: cordonedNode, + pods: []corev1.Pod{emptydirPod}, args: []string{"node", "--force"}, expectFatal: true, expectDelete: false, @@ -685,8 +685,8 @@ func TestDrain(t *testing.T) { { description: "terminated pod with emptyDir", node: node, - expected: cordoned_node, - pods: []corev1.Pod{emptydir_terminated_pod}, + expected: cordonedNode, + pods: []corev1.Pod{emptydirTerminatedPod}, rcs: []corev1.ReplicationController{rc}, args: []string{"node"}, expectFatal: false, @@ -695,8 +695,8 @@ func TestDrain(t *testing.T) { { description: "pod with EmptyDir and --delete-local-data", node: node, - expected: cordoned_node, - pods: []corev1.Pod{emptydir_pod}, + expected: cordonedNode, + pods: []corev1.Pod{emptydirPod}, args: []string{"node", "--force", "--delete-local-data=true"}, expectFatal: false, expectDelete: true, @@ -704,7 +704,7 @@ func TestDrain(t *testing.T) { { description: "empty node", node: node, - expected: cordoned_node, + expected: cordonedNode, pods: []corev1.Pod{}, rcs: []corev1.ReplicationController{rc}, args: []string{"node"}, @@ -724,7 +724,7 @@ func TestDrain(t *testing.T) { } for _, test := range tests { t.Run(test.description, func(t *testing.T) { - new_node := &corev1.Node{} + newNode := &corev1.Node{} deleted := false evicted := false tf := cmdtesting.NewTestFactory() @@ -788,10 +788,10 @@ func TestDrain(t *testing.T) { if err != nil { t.Fatalf("%s: unexpected error: %v", test.description, err) } - get_params := make(url.Values) - get_params["fieldSelector"] = []string{"spec.nodeName=node"} - if !reflect.DeepEqual(get_params, values) { - t.Fatalf("%s: expected:\n%v\nsaw:\n%v\n", test.description, get_params, values) + getParams := make(url.Values) + getParams["fieldSelector"] = []string{"spec.nodeName=node"} + if !reflect.DeepEqual(getParams, values) { + t.Fatalf("%s: expected:\n%v\nsaw:\n%v\n", test.description, getParams, values) } return &http.Response{StatusCode: 200, Header: cmdtesting.DefaultHeader(), Body: cmdtesting.ObjBody(codec, &corev1.PodList{Items: test.pods})}, nil case m.isFor("GET", "/replicationcontrollers"): @@ -810,13 +810,13 @@ func TestDrain(t *testing.T) { if err != nil { t.Fatalf("%s: unexpected error: %v", test.description, err) } - if err := runtime.DecodeInto(codec, appliedPatch, new_node); err != nil { + if err := runtime.DecodeInto(codec, appliedPatch, newNode); err != nil { t.Fatalf("%s: unexpected error: %v", test.description, err) } - if !reflect.DeepEqual(test.expected.Spec, new_node.Spec) { - t.Fatalf("%s: expected:\n%v\nsaw:\n%v\n", test.description, test.expected.Spec, new_node.Spec) + if !reflect.DeepEqual(test.expected.Spec, newNode.Spec) { + t.Fatalf("%s: expected:\n%v\nsaw:\n%v\n", test.description, test.expected.Spec, newNode.Spec) } - return &http.Response{StatusCode: 200, Header: cmdtesting.DefaultHeader(), Body: cmdtesting.ObjBody(codec, new_node)}, nil + return &http.Response{StatusCode: 200, Header: cmdtesting.DefaultHeader(), Body: cmdtesting.ObjBody(codec, newNode)}, nil case m.isFor("DELETE", "/namespaces/default/pods/bar"): deleted = true return &http.Response{StatusCode: 204, Header: cmdtesting.DefaultHeader(), Body: cmdtesting.ObjBody(codec, &test.pods[0])}, nil @@ -834,8 +834,8 @@ func TestDrain(t *testing.T) { ioStreams, _, _, errBuf := genericclioptions.NewTestIOStreams() cmd := NewCmdDrain(tf, ioStreams) - saw_fatal := false - fatal_msg := "" + sawFatal := false + fatalMsg := "" func() { defer func() { // Recover from the panic below. @@ -843,17 +843,17 @@ func TestDrain(t *testing.T) { // Restore cmdutil behavior cmdutil.DefaultBehaviorOnFatal() }() - cmdutil.BehaviorOnFatal(func(e string, code int) { saw_fatal = true; fatal_msg = e; panic(e) }) + cmdutil.BehaviorOnFatal(func(e string, code int) { sawFatal = true; fatalMsg = e; panic(e) }) cmd.SetArgs(test.args) cmd.Execute() }() if test.expectFatal { - if !saw_fatal { + if !sawFatal { t.Fatalf("%s: unexpected non-error when using %s", test.description, currMethod) } } else { - if saw_fatal { - t.Fatalf("%s: unexpected error when using %s: %s", test.description, currMethod, fatal_msg) + if sawFatal { + t.Fatalf("%s: unexpected error when using %s: %s", test.description, currMethod, fatalMsg) } } diff --git a/pkg/kubectl/cmd/edit/testdata/testcase-edit-from-empty/test.yaml b/pkg/kubectl/cmd/edit/testdata/testcase-edit-from-empty/test.yaml index 1111145af99..7b7ecf8956c 100755 --- a/pkg/kubectl/cmd/edit/testdata/testcase-edit-from-empty/test.yaml +++ b/pkg/kubectl/cmd/edit/testdata/testcase-edit-from-empty/test.yaml @@ -4,7 +4,7 @@ args: - configmap namespace: "edit-test" expectedStderr: -- edit cancelled, no objects found. +- edit cancelled, no objects found expectedExitCode: 1 steps: - type: request diff --git a/pkg/kubectl/cmd/exec/exec.go b/pkg/kubectl/cmd/exec/exec.go index 2cf2aa57bb6..9f27a8a79d7 100644 --- a/pkg/kubectl/cmd/exec/exec.go +++ b/pkg/kubectl/cmd/exec/exec.go @@ -39,7 +39,7 @@ import ( ) var ( - exec_example = templates.Examples(i18n.T(` + execExample = templates.Examples(i18n.T(` # Get output from running 'date' from pod 123456-7890, using the first container by default kubectl exec 123456-7890 date @@ -76,7 +76,7 @@ func NewCmdExec(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.C DisableFlagsInUseLine: true, Short: i18n.T("Execute a command in a container"), Long: "Execute a command in a container.", - Example: exec_example, + Example: execExample, Run: func(cmd *cobra.Command, args []string) { argsLenAtDash := cmd.ArgsLenAtDash() cmdutil.CheckErr(options.Complete(f, cmd, args, argsLenAtDash)) diff --git a/pkg/kubectl/cmd/explain/explain.go b/pkg/kubectl/cmd/explain/explain.go index 73f6289fb4c..05ab3d89554 100644 --- a/pkg/kubectl/cmd/explain/explain.go +++ b/pkg/kubectl/cmd/explain/explain.go @@ -55,7 +55,7 @@ type ExplainOptions struct { genericclioptions.IOStreams CmdParent string - ApiVersion string + APIVersion string Recursive bool Mapper meta.RESTMapper @@ -77,7 +77,7 @@ func NewCmdExplain(parent string, f cmdutil.Factory, streams genericclioptions.I Use: "explain RESOURCE", DisableFlagsInUseLine: true, Short: i18n.T("Documentation of resources"), - Long: explainLong + "\n\n" + cmdutil.SuggestApiResources(parent), + Long: explainLong + "\n\n" + cmdutil.SuggestAPIResources(parent), Example: explainExamples, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd)) @@ -86,7 +86,7 @@ func NewCmdExplain(parent string, f cmdutil.Factory, streams genericclioptions.I }, } cmd.Flags().BoolVar(&o.Recursive, "recursive", o.Recursive, "Print the fields of fields (Currently only 1 level deep)") - cmd.Flags().StringVar(&o.ApiVersion, "api-version", o.ApiVersion, "Get different explanations for particular API version") + cmd.Flags().StringVar(&o.APIVersion, "api-version", o.APIVersion, "Get different explanations for particular API version") return cmd } @@ -106,7 +106,7 @@ func (o *ExplainOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) error { func (o *ExplainOptions) Validate(args []string) error { if len(args) == 0 { - return fmt.Errorf("You must specify the type of resource to explain. %s\n", cmdutil.SuggestApiResources(o.CmdParent)) + return fmt.Errorf("You must specify the type of resource to explain. %s\n", cmdutil.SuggestAPIResources(o.CmdParent)) } if len(args) > 1 { return fmt.Errorf("We accept only this format: explain RESOURCE\n") @@ -118,7 +118,7 @@ func (o *ExplainOptions) Validate(args []string) error { // Run executes the appropriate steps to print a model's documentation func (o *ExplainOptions) Run(args []string) error { recursive := o.Recursive - apiVersionString := o.ApiVersion + apiVersionString := o.APIVersion // TODO: After we figured out the new syntax to separate group and resource, allow // the users to use it in explain (kubectl explain ). diff --git a/pkg/kubectl/cmd/get/get.go b/pkg/kubectl/cmd/get/get.go index 8011485e412..8489f98185d 100644 --- a/pkg/kubectl/cmd/get/get.go +++ b/pkg/kubectl/cmd/get/get.go @@ -153,7 +153,7 @@ func NewCmdGet(parent string, f cmdutil.Factory, streams genericclioptions.IOStr Use: "get [(-o|--output=)json|yaml|wide|custom-columns=...|custom-columns-file=...|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=...] (TYPE[.VERSION][.GROUP] [NAME | -l label] | TYPE[.VERSION][.GROUP]/NAME ...) [flags]", DisableFlagsInUseLine: true, Short: i18n.T("Display one or many resources"), - Long: getLong + "\n\n" + cmdutil.SuggestApiResources(parent), + Long: getLong + "\n\n" + cmdutil.SuggestAPIResources(parent), Example: getExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd, args)) @@ -261,7 +261,7 @@ func (o *GetOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []stri o.IncludeUninitialized = cmdutil.ShouldIncludeUninitialized(cmd, len(args) == 2) default: if len(args) == 0 && cmdutil.IsFilenameSliceEmpty(o.Filenames) { - fmt.Fprintf(o.ErrOut, "You must specify the type of resource to get. %s\n\n", cmdutil.SuggestApiResources(o.CmdParent)) + fmt.Fprintf(o.ErrOut, "You must specify the type of resource to get. %s\n\n", cmdutil.SuggestAPIResources(o.CmdParent)) fullCmdName := cmd.Parent().CommandPath() usageString := "Required resource not specified." if len(fullCmdName) > 0 && cmdutil.IsSiblingCommandExists(cmd, "explain") { @@ -296,16 +296,20 @@ func (o *GetOptions) Validate(cmd *cobra.Command) error { return nil } +// OriginalPositioner and NopPositioner is required for swap/sort operations of data in table format type OriginalPositioner interface { OriginalPosition(int) int } +// NopPositioner and OriginalPositioner is required for swap/sort operations of data in table format type NopPositioner struct{} +// OriginalPosition returns the original position from NopPositioner object func (t *NopPositioner) OriginalPosition(ix int) int { return ix } +// RuntimeSorter holds the required objects to perform sorting of runtime objects type RuntimeSorter struct { field string decoder runtime.Decoder @@ -313,12 +317,16 @@ type RuntimeSorter struct { positioner OriginalPositioner } +// Sort performs the sorting of runtime objects func (r *RuntimeSorter) Sort() error { - if len(r.objects) <= 1 { - // a list is only considered "sorted" if there are 0 or 1 items in it - // AND (if 1 item) the item is not a Table object + // a list is only considered "sorted" if there are 0 or 1 items in it + // AND (if 1 item) the item is not a Table object + if len(r.objects) == 0 { + return nil + } + if len(r.objects) == 1 { _, isTable := r.objects[0].(*metav1beta1.Table) - if len(r.objects) == 0 || !isTable { + if !isTable { return nil } } @@ -331,8 +339,10 @@ func (r *RuntimeSorter) Sort() error { case *metav1beta1.Table: includesTable = true - if err := NewTableSorter(t, r.field).Sort(); err != nil { - continue + if sorter, err := NewTableSorter(t, r.field); err != nil { + return err + } else if err := sorter.Sort(); err != nil { + return err } default: includesRuntimeObjs = true @@ -360,6 +370,7 @@ func (r *RuntimeSorter) Sort() error { return nil } +// OriginalPosition returns the original position of a runtime object func (r *RuntimeSorter) OriginalPosition(ix int) int { if r.positioner == nil { return 0 @@ -367,12 +378,13 @@ func (r *RuntimeSorter) OriginalPosition(ix int) int { return r.positioner.OriginalPosition(ix) } -// allows custom decoder to be set for testing +// WithDecoder allows custom decoder to be set for testing func (r *RuntimeSorter) WithDecoder(decoder runtime.Decoder) *RuntimeSorter { r.decoder = decoder return r } +// NewRuntimeSorter returns a new instance of RuntimeSorter func NewRuntimeSorter(objects []runtime.Object, sortBy string) *RuntimeSorter { parsedField, err := RelaxedJSONPathExpression(sortBy) if err != nil { diff --git a/pkg/kubectl/cmd/get/get_flags.go b/pkg/kubectl/cmd/get/get_flags.go index 93700a85524..fcba4e5aa74 100644 --- a/pkg/kubectl/cmd/get/get_flags.go +++ b/pkg/kubectl/cmd/get/get_flags.go @@ -64,6 +64,7 @@ func (f *PrintFlags) Copy() PrintFlags { return printFlags } +// AllowedFormats is the list of formats in which data can be displayed func (f *PrintFlags) AllowedFormats() []string { formats := f.JSONYamlPrintFlags.AllowedFormats() formats = append(formats, f.NamePrintFlags.AllowedFormats()...) diff --git a/pkg/kubectl/cmd/get/get_test.go b/pkg/kubectl/cmd/get/get_test.go index 2c7da6967c8..9d12a895794 100644 --- a/pkg/kubectl/cmd/get/get_test.go +++ b/pkg/kubectl/cmd/get/get_test.go @@ -560,6 +560,15 @@ func TestRuntimeSorter(t *testing.T) { expect string expectError string }{ + { + name: "ensure sorter works with an empty object list", + field: "metadata.name", + objs: []runtime.Object{}, + op: func(sorter *RuntimeSorter, objs []runtime.Object, out io.Writer) error { + return nil + }, + expect: "", + }, { name: "ensure sorter returns original position", field: "metadata.name", diff --git a/pkg/kubectl/cmd/get/humanreadable_flags.go b/pkg/kubectl/cmd/get/humanreadable_flags.go index fa7bbca883a..efd597c4598 100644 --- a/pkg/kubectl/cmd/get/humanreadable_flags.go +++ b/pkg/kubectl/cmd/get/humanreadable_flags.go @@ -61,6 +61,7 @@ func (f *HumanPrintFlags) EnsureWithNamespace() error { return nil } +// AllowedFormats returns more customized formating options func (f *HumanPrintFlags) AllowedFormats() []string { return []string{"wide"} } diff --git a/pkg/kubectl/cmd/get/sorter.go b/pkg/kubectl/cmd/get/sorter.go index cd8bb28b8d8..d806decd19a 100644 --- a/pkg/kubectl/cmd/get/sorter.go +++ b/pkg/kubectl/cmd/get/sorter.go @@ -318,13 +318,18 @@ func (t *TableSorter) Len() int { func (t *TableSorter) Swap(i, j int) { t.obj.Rows[i], t.obj.Rows[j] = t.obj.Rows[j], t.obj.Rows[i] + t.parsedRows[i], t.parsedRows[j] = t.parsedRows[j], t.parsedRows[i] } func (t *TableSorter) Less(i, j int) bool { iValues := t.parsedRows[i] jValues := t.parsedRows[j] - if len(iValues) == 0 || len(iValues[0]) == 0 || len(jValues) == 0 || len(jValues[0]) == 0 { - klog.Fatalf("couldn't find any field with path %q in the list of objects", t.field) + + if len(iValues) == 0 || len(iValues[0]) == 0 { + return true + } + if len(jValues) == 0 || len(jValues[0]) == 0 { + return false } iField := iValues[0][0] @@ -342,28 +347,36 @@ func (t *TableSorter) Sort() error { return nil } -func NewTableSorter(table *metav1beta1.Table, field string) *TableSorter { +func NewTableSorter(table *metav1beta1.Table, field string) (*TableSorter, error) { var parsedRows [][][]reflect.Value parser := jsonpath.New("sorting").AllowMissingKeys(true) err := parser.Parse(field) if err != nil { - klog.Fatalf("sorting error: %v\n", err) + return nil, fmt.Errorf("sorting error: %v", err) } + fieldFoundOnce := false for i := range table.Rows { parsedRow, err := findJSONPathResults(parser, table.Rows[i].Object.Object) if err != nil { - klog.Fatalf("Failed to get values for %#v using %s (%#v)", parsedRow, field, err) + return nil, fmt.Errorf("Failed to get values for %#v using %s (%#v)", parsedRow, field, err) } parsedRows = append(parsedRows, parsedRow) + if len(parsedRow) > 0 && len(parsedRow[0]) > 0 { + fieldFoundOnce = true + } + } + + if len(table.Rows) > 0 && !fieldFoundOnce { + return nil, fmt.Errorf("couldn't find any field with path %q in the list of objects", field) } return &TableSorter{ obj: table, field: field, parsedRows: parsedRows, - } + }, nil } func findJSONPathResults(parser *jsonpath.JSONPath, from runtime.Object) ([][]reflect.Value, error) { if unstructuredObj, ok := from.(*unstructured.Unstructured); ok { diff --git a/pkg/kubectl/cmd/get/sorter_test.go b/pkg/kubectl/cmd/get/sorter_test.go index c85d5d1bf5a..be2abd57f9b 100644 --- a/pkg/kubectl/cmd/get/sorter_test.go +++ b/pkg/kubectl/cmd/get/sorter_test.go @@ -17,6 +17,7 @@ limitations under the License. package get import ( + "encoding/json" "reflect" "strings" "testing" @@ -25,10 +26,20 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/diff" "k8s.io/kubernetes/pkg/kubectl/scheme" ) +func toUnstructuredOrDie(data []byte) *unstructured.Unstructured { + unstrBody := map[string]interface{}{} + err := json.Unmarshal(data, &unstrBody) + if err != nil { + panic(err) + } + return &unstructured.Unstructured{Object: unstrBody} +} func encodeOrDie(obj runtime.Object) []byte { data, err := runtime.Encode(scheme.Codecs.LegacyCodec(corev1.SchemeGroupVersion), obj) if err != nil { @@ -65,6 +76,16 @@ func TestSortingPrinter(t *testing.T) { name string expectedErr string }{ + { + name: "empty", + obj: &corev1.PodList{ + Items: []corev1.Pod{}, + }, + sort: &corev1.PodList{ + Items: []corev1.Pod{}, + }, + field: "{.metadata.name}", + }, { name: "in-order-already", obj: &corev1.PodList{ @@ -237,16 +258,16 @@ func TestSortingPrinter(t *testing.T) { name: "v1.List in order", obj: &corev1.List{ Items: []runtime.RawExtension{ - {Raw: encodeOrDie(a)}, - {Raw: encodeOrDie(b)}, - {Raw: encodeOrDie(c)}, + {Object: a, Raw: encodeOrDie(a)}, + {Object: b, Raw: encodeOrDie(b)}, + {Object: c, Raw: encodeOrDie(c)}, }, }, sort: &corev1.List{ Items: []runtime.RawExtension{ - {Raw: encodeOrDie(a)}, - {Raw: encodeOrDie(b)}, - {Raw: encodeOrDie(c)}, + {Object: a, Raw: encodeOrDie(a)}, + {Object: b, Raw: encodeOrDie(b)}, + {Object: c, Raw: encodeOrDie(c)}, }, }, field: "{.metadata.name}", @@ -255,16 +276,16 @@ func TestSortingPrinter(t *testing.T) { name: "v1.List in reverse", obj: &corev1.List{ Items: []runtime.RawExtension{ - {Raw: encodeOrDie(c)}, - {Raw: encodeOrDie(b)}, - {Raw: encodeOrDie(a)}, + {Object: c, Raw: encodeOrDie(c)}, + {Object: b, Raw: encodeOrDie(b)}, + {Object: a, Raw: encodeOrDie(a)}, }, }, sort: &corev1.List{ Items: []runtime.RawExtension{ - {Raw: encodeOrDie(a)}, - {Raw: encodeOrDie(b)}, - {Raw: encodeOrDie(c)}, + {Object: a, Raw: encodeOrDie(a)}, + {Object: b, Raw: encodeOrDie(b)}, + {Object: c, Raw: encodeOrDie(c)}, }, }, field: "{.metadata.name}", @@ -390,6 +411,43 @@ func TestSortingPrinter(t *testing.T) { }, } for _, tt := range tests { + t.Run(tt.name+" table", func(t *testing.T) { + table := &metav1beta1.Table{} + meta.EachListItem(tt.obj, func(item runtime.Object) error { + table.Rows = append(table.Rows, metav1beta1.TableRow{ + Object: runtime.RawExtension{Object: toUnstructuredOrDie(encodeOrDie(item))}, + }) + return nil + }) + + expectedTable := &metav1beta1.Table{} + meta.EachListItem(tt.sort, func(item runtime.Object) error { + expectedTable.Rows = append(expectedTable.Rows, metav1beta1.TableRow{ + Object: runtime.RawExtension{Object: toUnstructuredOrDie(encodeOrDie(item))}, + }) + return nil + }) + + sorter, err := NewTableSorter(table, tt.field) + if err == nil { + err = sorter.Sort() + } + if err != nil { + if len(tt.expectedErr) > 0 { + if strings.Contains(err.Error(), tt.expectedErr) { + return + } + t.Fatalf("%s: expected error containing: %q, got: \"%v\"", tt.name, tt.expectedErr, err) + } + t.Fatalf("%s: unexpected error: %v", tt.name, err) + } + if len(tt.expectedErr) > 0 { + t.Fatalf("%s: expected error containing: %q, got none", tt.name, tt.expectedErr) + } + if !reflect.DeepEqual(table, expectedTable) { + t.Errorf("[%s]\nexpected/saw:\n%s", tt.name, diff.ObjectReflectDiff(expectedTable, table)) + } + }) t.Run(tt.name, func(t *testing.T) { sort := &SortingPrinter{SortField: tt.field, Decoder: scheme.Codecs.UniversalDecoder()} err := sort.sortObj(tt.obj) diff --git a/pkg/kubectl/cmd/label/label.go b/pkg/kubectl/cmd/label/label.go index 11251a278d2..a342b90c92f 100644 --- a/pkg/kubectl/cmd/label/label.go +++ b/pkg/kubectl/cmd/label/label.go @@ -344,8 +344,7 @@ func (o *LabelOptions) RunLabel() error { if err != nil { return err } - printer.PrintObj(info.Object, o.Out) - return nil + return printer.PrintObj(info.Object, o.Out) }) } diff --git a/pkg/kubectl/cmd/plugin/plugin.go b/pkg/kubectl/cmd/plugin/plugin.go index 528ede0e70c..7196804cafe 100644 --- a/pkg/kubectl/cmd/plugin/plugin.go +++ b/pkg/kubectl/cmd/plugin/plugin.go @@ -34,13 +34,13 @@ import ( ) var ( - plugin_long = templates.LongDesc(` + pluginLong = templates.LongDesc(` Provides utilities for interacting with plugins. Plugins provide extended functionality that is not part of the major command-line distribution. Please refer to the documentation and examples for more information about how write your own plugins.`) - plugin_list_long = templates.LongDesc(` + pluginListLong = templates.LongDesc(` List all available plugin files on a user's PATH. Available plugin files are those that are: @@ -55,7 +55,7 @@ func NewCmdPlugin(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra Use: "plugin [flags]", DisableFlagsInUseLine: true, Short: i18n.T("Provides utilities for interacting with plugins."), - Long: plugin_long, + Long: pluginLong, Run: func(cmd *cobra.Command, args []string) { cmdutil.DefaultSubCommandRun(streams.ErrOut)(cmd, args) }, @@ -81,7 +81,7 @@ func NewCmdPluginList(f cmdutil.Factory, streams genericclioptions.IOStreams) *c cmd := &cobra.Command{ Use: "list", Short: "list all visible plugin executables on a user's PATH", - Long: plugin_list_long, + Long: pluginListLong, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(cmd)) cmdutil.CheckErr(o.Run()) diff --git a/pkg/kubectl/cmd/rollout/rollout.go b/pkg/kubectl/cmd/rollout/rollout.go index 38b2c529753..f724a3063d3 100644 --- a/pkg/kubectl/cmd/rollout/rollout.go +++ b/pkg/kubectl/cmd/rollout/rollout.go @@ -27,17 +27,17 @@ import ( ) var ( - rollout_long = templates.LongDesc(` - Manage the rollout of a resource.` + rollout_valid_resources) + rolloutLong = templates.LongDesc(` + Manage the rollout of a resource.` + rolloutValidResources) - rollout_example = templates.Examples(` + rolloutExample = templates.Examples(` # Rollback to the previous deployment kubectl rollout undo deployment/abc # Check the rollout status of a daemonset kubectl rollout status daemonset/foo`) - rollout_valid_resources = dedent.Dedent(` + rolloutValidResources = dedent.Dedent(` Valid resource types include: * deployments @@ -46,13 +46,14 @@ var ( `) ) +// NewCmdRollout returns a Command instance for 'rollout' sub command func NewCmdRollout(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { cmd := &cobra.Command{ Use: "rollout SUBCOMMAND", DisableFlagsInUseLine: true, Short: i18n.T("Manage the rollout of a resource"), - Long: rollout_long, - Example: rollout_example, + Long: rolloutLong, + Example: rolloutExample, Run: cmdutil.DefaultSubCommandRun(streams.Out), } // subcommands diff --git a/pkg/kubectl/cmd/rollout/rollout_history.go b/pkg/kubectl/cmd/rollout/rollout_history.go index 009c8d76ca4..d9245508184 100644 --- a/pkg/kubectl/cmd/rollout/rollout_history.go +++ b/pkg/kubectl/cmd/rollout/rollout_history.go @@ -32,10 +32,10 @@ import ( ) var ( - history_long = templates.LongDesc(` + historyLong = templates.LongDesc(` View previous rollout revisions and configurations.`) - history_example = templates.Examples(` + historyExample = templates.Examples(` # View the rollout history of a deployment kubectl rollout history deployment/abc @@ -43,6 +43,7 @@ var ( kubectl rollout history daemonset/abc --revision=3`) ) +// RolloutHistoryOptions holds the options for 'rollout history' sub command type RolloutHistoryOptions struct { PrintFlags *genericclioptions.PrintFlags ToPrinter func(string) (printers.ResourcePrinter, error) @@ -61,6 +62,7 @@ type RolloutHistoryOptions struct { genericclioptions.IOStreams } +// NewRolloutHistoryOptions returns an initialized RolloutHistoryOptions instance func NewRolloutHistoryOptions(streams genericclioptions.IOStreams) *RolloutHistoryOptions { return &RolloutHistoryOptions{ PrintFlags: genericclioptions.NewPrintFlags("").WithTypeSetter(scheme.Scheme), @@ -68,6 +70,7 @@ func NewRolloutHistoryOptions(streams genericclioptions.IOStreams) *RolloutHisto } } +// NewCmdRolloutHistory returns a Command instance for RolloutHistory sub command func NewCmdRolloutHistory(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { o := NewRolloutHistoryOptions(streams) @@ -77,8 +80,8 @@ func NewCmdRolloutHistory(f cmdutil.Factory, streams genericclioptions.IOStreams Use: "history (TYPE NAME | TYPE/NAME) [flags]", DisableFlagsInUseLine: true, Short: i18n.T("View rollout history"), - Long: history_long, - Example: history_example, + Long: historyLong, + Example: historyExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd, args)) cmdutil.CheckErr(o.Validate()) @@ -97,6 +100,7 @@ func NewCmdRolloutHistory(f cmdutil.Factory, streams genericclioptions.IOStreams return cmd } +// Complete completes al the required options func (o *RolloutHistoryOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { o.Resources = args @@ -117,6 +121,7 @@ func (o *RolloutHistoryOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, return nil } +// Validate makes sure all the provided values for command-line options are valid func (o *RolloutHistoryOptions) Validate() error { if len(o.Resources) == 0 && cmdutil.IsFilenameSliceEmpty(o.Filenames) { return fmt.Errorf("required resource not specified") @@ -128,6 +133,7 @@ func (o *RolloutHistoryOptions) Validate() error { return nil } +// Run performs the execution of 'rollout history' sub command func (o *RolloutHistoryOptions) Run() error { r := o.Builder(). diff --git a/pkg/kubectl/cmd/rollout/rollout_pause.go b/pkg/kubectl/cmd/rollout/rollout_pause.go index 70c5af4a698..b29963d6497 100644 --- a/pkg/kubectl/cmd/rollout/rollout_pause.go +++ b/pkg/kubectl/cmd/rollout/rollout_pause.go @@ -51,20 +51,21 @@ type PauseOptions struct { } var ( - pause_long = templates.LongDesc(` + pauseLong = templates.LongDesc(` Mark the provided resource as paused Paused resources will not be reconciled by a controller. Use "kubectl rollout resume" to resume a paused resource. Currently only deployments support being paused.`) - pause_example = templates.Examples(` + pauseExample = templates.Examples(` # Mark the nginx deployment as paused. Any current state of # the deployment will continue its function, new updates to the deployment will not # have an effect as long as the deployment is paused. kubectl rollout pause deployment/nginx`) ) +// NewCmdRolloutPause returns a Command instance for 'rollout pause' sub command func NewCmdRolloutPause(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { o := &PauseOptions{ PrintFlags: genericclioptions.NewPrintFlags("paused").WithTypeSetter(scheme.Scheme), @@ -77,8 +78,8 @@ func NewCmdRolloutPause(f cmdutil.Factory, streams genericclioptions.IOStreams) Use: "pause RESOURCE", DisableFlagsInUseLine: true, Short: i18n.T("Mark the provided resource as paused"), - Long: pause_long, - Example: pause_example, + Long: pauseLong, + Example: pauseExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd, args)) cmdutil.CheckErr(o.Validate()) @@ -94,6 +95,7 @@ func NewCmdRolloutPause(f cmdutil.Factory, streams genericclioptions.IOStreams) return cmd } +// Complete completes all the required options func (o *PauseOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { o.Pauser = polymorphichelpers.ObjectPauserFn @@ -121,7 +123,8 @@ func (o *PauseOptions) Validate() error { return nil } -func (o PauseOptions) RunPause() error { +// RunPause performs the execution of 'rollout pause' sub command +func (o *PauseOptions) RunPause() error { r := o.Builder(). WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...). NamespaceParam(o.Namespace).DefaultNamespace(). diff --git a/pkg/kubectl/cmd/rollout/rollout_resume.go b/pkg/kubectl/cmd/rollout/rollout_resume.go index e97c807173c..f1f9e4e33db 100644 --- a/pkg/kubectl/cmd/rollout/rollout_resume.go +++ b/pkg/kubectl/cmd/rollout/rollout_resume.go @@ -52,18 +52,19 @@ type ResumeOptions struct { } var ( - resume_long = templates.LongDesc(` + resumeLong = templates.LongDesc(` Resume a paused resource Paused resources will not be reconciled by a controller. By resuming a resource, we allow it to be reconciled again. Currently only deployments support being resumed.`) - resume_example = templates.Examples(` + resumeExample = templates.Examples(` # Resume an already paused deployment kubectl rollout resume deployment/nginx`) ) +// NewRolloutResumeOptions returns an initialized ResumeOptions instance func NewRolloutResumeOptions(streams genericclioptions.IOStreams) *ResumeOptions { return &ResumeOptions{ PrintFlags: genericclioptions.NewPrintFlags("resumed").WithTypeSetter(scheme.Scheme), @@ -71,6 +72,7 @@ func NewRolloutResumeOptions(streams genericclioptions.IOStreams) *ResumeOptions } } +// NewCmdRolloutResume returns a Command instance for 'rollout resume' sub command func NewCmdRolloutResume(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { o := NewRolloutResumeOptions(streams) @@ -80,8 +82,8 @@ func NewCmdRolloutResume(f cmdutil.Factory, streams genericclioptions.IOStreams) Use: "resume RESOURCE", DisableFlagsInUseLine: true, Short: i18n.T("Resume a paused resource"), - Long: resume_long, - Example: resume_example, + Long: resumeLong, + Example: resumeExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd, args)) cmdutil.CheckErr(o.Validate()) @@ -96,6 +98,7 @@ func NewCmdRolloutResume(f cmdutil.Factory, streams genericclioptions.IOStreams) return cmd } +// Complete completes all the required options func (o *ResumeOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { o.Resources = args @@ -124,6 +127,7 @@ func (o *ResumeOptions) Validate() error { return nil } +// RunResume performs the execution of 'rollout resume' sub command func (o ResumeOptions) RunResume() error { r := o.Builder(). WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...). diff --git a/pkg/kubectl/cmd/rollout/rollout_status.go b/pkg/kubectl/cmd/rollout/rollout_status.go index 311c9c88d90..fde72df1d8b 100644 --- a/pkg/kubectl/cmd/rollout/rollout_status.go +++ b/pkg/kubectl/cmd/rollout/rollout_status.go @@ -45,7 +45,7 @@ import ( ) var ( - status_long = templates.LongDesc(` + statusLong = templates.LongDesc(` Show the status of the rollout. By default 'rollout status' will watch the status of the latest rollout @@ -55,11 +55,12 @@ var ( pin to a specific revision and abort if it is rolled over by another revision, use --revision=N where N is the revision you need to watch for.`) - status_example = templates.Examples(` + statusExample = templates.Examples(` # Watch the rollout status of a deployment kubectl rollout status deployment/nginx`) ) +// RolloutStatusOptions holds the command-line options for 'rollout status' sub command type RolloutStatusOptions struct { PrintFlags *genericclioptions.PrintFlags @@ -79,6 +80,7 @@ type RolloutStatusOptions struct { genericclioptions.IOStreams } +// NewRolloutStatusOptions returns an initialized RolloutStatusOptions instance func NewRolloutStatusOptions(streams genericclioptions.IOStreams) *RolloutStatusOptions { return &RolloutStatusOptions{ PrintFlags: genericclioptions.NewPrintFlags("").WithTypeSetter(scheme.Scheme), @@ -89,6 +91,7 @@ func NewRolloutStatusOptions(streams genericclioptions.IOStreams) *RolloutStatus } } +// NewCmdRolloutStatus returns a Command instance for the 'rollout status' sub command func NewCmdRolloutStatus(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { o := NewRolloutStatusOptions(streams) @@ -98,8 +101,8 @@ func NewCmdRolloutStatus(f cmdutil.Factory, streams genericclioptions.IOStreams) Use: "status (TYPE NAME | TYPE/NAME) [flags]", DisableFlagsInUseLine: true, Short: i18n.T("Show the status of the rollout"), - Long: status_long, - Example: status_example, + Long: statusLong, + Example: statusExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, args)) cmdutil.CheckErr(o.Validate()) @@ -117,6 +120,7 @@ func NewCmdRolloutStatus(f cmdutil.Factory, streams genericclioptions.IOStreams) return cmd } +// Complete completes all the required options func (o *RolloutStatusOptions) Complete(f cmdutil.Factory, args []string) error { o.Builder = f.NewBuilder @@ -142,6 +146,7 @@ func (o *RolloutStatusOptions) Complete(f cmdutil.Factory, args []string) error return nil } +// Validate makes sure all the provided values for command-line options are valid func (o *RolloutStatusOptions) Validate() error { if len(o.BuilderArgs) == 0 && cmdutil.IsFilenameSliceEmpty(o.FilenameOptions.Filenames) { return fmt.Errorf("required resource not specified") @@ -154,6 +159,7 @@ func (o *RolloutStatusOptions) Validate() error { return nil } +// Run performs the execution of 'rollout status' sub command func (o *RolloutStatusOptions) Run() error { r := o.Builder(). WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...). diff --git a/pkg/kubectl/cmd/rollout/rollout_undo.go b/pkg/kubectl/cmd/rollout/rollout_undo.go index 2ab7a028871..c4db88af208 100644 --- a/pkg/kubectl/cmd/rollout/rollout_undo.go +++ b/pkg/kubectl/cmd/rollout/rollout_undo.go @@ -50,10 +50,10 @@ type UndoOptions struct { } var ( - undo_long = templates.LongDesc(` + undoLong = templates.LongDesc(` Rollback to a previous rollout.`) - undo_example = templates.Examples(` + undoExample = templates.Examples(` # Rollback to the previous deployment kubectl rollout undo deployment/abc @@ -64,6 +64,7 @@ var ( kubectl rollout undo --dry-run=true deployment/abc`) ) +// NewRolloutUndoOptions returns an initialized UndoOptions instance func NewRolloutUndoOptions(streams genericclioptions.IOStreams) *UndoOptions { return &UndoOptions{ PrintFlags: genericclioptions.NewPrintFlags("rolled back").WithTypeSetter(scheme.Scheme), @@ -72,6 +73,7 @@ func NewRolloutUndoOptions(streams genericclioptions.IOStreams) *UndoOptions { } } +// NewCmdRolloutUndo returns a Command instance for the 'rollout undo' sub command func NewCmdRolloutUndo(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { o := NewRolloutUndoOptions(streams) @@ -81,8 +83,8 @@ func NewCmdRolloutUndo(f cmdutil.Factory, streams genericclioptions.IOStreams) * Use: "undo (TYPE NAME | TYPE/NAME) [flags]", DisableFlagsInUseLine: true, Short: i18n.T("Undo a previous rollout"), - Long: undo_long, - Example: undo_example, + Long: undoLong, + Example: undoExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd, args)) cmdutil.CheckErr(o.Validate()) @@ -99,6 +101,7 @@ func NewCmdRolloutUndo(f cmdutil.Factory, streams genericclioptions.IOStreams) * return cmd } +// Complete completes al the required options func (o *UndoOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { o.Resources = args o.DryRun = cmdutil.GetDryRunFlag(cmd) @@ -129,6 +132,7 @@ func (o *UndoOptions) Validate() error { return nil } +// RunUndo performs the execution of 'rollout undo' sub command func (o *UndoOptions) RunUndo() error { r := o.Builder(). WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...). diff --git a/pkg/kubectl/cmd/run/run.go b/pkg/kubectl/cmd/run/run.go index fdd47b287a8..3fc4bf65fbd 100644 --- a/pkg/kubectl/cmd/run/run.go +++ b/pkg/kubectl/cmd/run/run.go @@ -360,9 +360,9 @@ func (o *RunOptions) Run(f cmdutil.Factory, cmd *cobra.Command, args []string) e runObject, err := o.createGeneratedObject(f, cmd, generator, names, params, cmdutil.GetFlagString(cmd, "overrides"), namespace) if err != nil { return err - } else { - createdObjects = append(createdObjects, runObject) } + createdObjects = append(createdObjects, runObject) + allErrs := []error{} if o.Expose { serviceGenerator := cmdutil.GetFlagString(cmd, "service-generator") @@ -567,9 +567,8 @@ func getRestartPolicy(cmd *cobra.Command, interactive bool) (corev1.RestartPolic if len(restart) == 0 { if interactive { return corev1.RestartPolicyOnFailure, nil - } else { - return corev1.RestartPolicyAlways, nil } + return corev1.RestartPolicyAlways, nil } switch corev1.RestartPolicy(restart) { case corev1.RestartPolicyAlways: diff --git a/pkg/kubectl/cmd/set/set.go b/pkg/kubectl/cmd/set/set.go index a50b7f95374..0617f88caec 100644 --- a/pkg/kubectl/cmd/set/set.go +++ b/pkg/kubectl/cmd/set/set.go @@ -25,18 +25,19 @@ import ( ) var ( - set_long = templates.LongDesc(` + setLong = templates.LongDesc(` Configure application resources These commands help you make changes to existing application resources.`) ) +// NewCmdSet returns an initialized Command instance for 'set' sub command func NewCmdSet(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { cmd := &cobra.Command{ Use: "set SUBCOMMAND", DisableFlagsInUseLine: true, Short: i18n.T("Set specific features on objects"), - Long: set_long, + Long: setLong, Run: cmdutil.DefaultSubCommandRun(streams.ErrOut), } diff --git a/pkg/kubectl/cmd/set/set_env.go b/pkg/kubectl/cmd/set/set_env.go index 62aed433dfe..0cc00cbcfc6 100644 --- a/pkg/kubectl/cmd/set/set_env.go +++ b/pkg/kubectl/cmd/set/set_env.go @@ -97,6 +97,7 @@ var ( env | grep RAILS_ | kubectl set env -e - deployment/registry`) ) +// EnvOptions holds values for 'set env' command-lone options type EnvOptions struct { PrintFlags *genericclioptions.PrintFlags resource.FilenameOptions @@ -202,6 +203,7 @@ func contains(key string, keyList []string) bool { return false } +// Complete completes all required options func (o *EnvOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { if o.All && len(o.Selector) > 0 { return fmt.Errorf("cannot set --all and --selector at the same time") @@ -242,6 +244,7 @@ func (o *EnvOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []stri return nil } +// Validate makes sure provided values for EnvOptions are valid func (o *EnvOptions) Validate() error { if len(o.Filenames) == 0 && len(o.resources) < 1 { return fmt.Errorf("one or more resources must be specified as or /") @@ -471,7 +474,7 @@ func (o *EnvOptions) RunEnv() error { for _, patch := range patches { info := patch.Info if patch.Err != nil { - allErrs = append(allErrs, fmt.Errorf("error: %s/%s %v\n", info.Mapping.Resource, info.Name, patch.Err)) + allErrs = append(allErrs, fmt.Errorf("error: %s/%s %v", info.Mapping.Resource, info.Name, patch.Err)) continue } @@ -489,7 +492,7 @@ func (o *EnvOptions) RunEnv() error { actual, err := resource.NewHelper(info.Client, info.Mapping).Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil) if err != nil { - allErrs = append(allErrs, fmt.Errorf("failed to patch env update to pod template: %v\n", err)) + allErrs = append(allErrs, fmt.Errorf("failed to patch env update to pod template: %v", err)) continue } diff --git a/pkg/kubectl/cmd/set/set_image.go b/pkg/kubectl/cmd/set/set_image.go index 38677a803f0..5fe8b0516f2 100644 --- a/pkg/kubectl/cmd/set/set_image.go +++ b/pkg/kubectl/cmd/set/set_image.go @@ -63,16 +63,16 @@ type SetImageOptions struct { } var ( - image_resources = ` + imageResources = ` pod (po), replicationcontroller (rc), deployment (deploy), daemonset (ds), replicaset (rs)` - image_long = templates.LongDesc(` + imageLong = templates.LongDesc(` Update existing container image(s) of resources. Possible resources include (case insensitive): - ` + image_resources) + ` + imageResources) - image_example = templates.Examples(` + imageExample = templates.Examples(` # Set a deployment's nginx container image to 'nginx:1.9.1', and its busybox container image to 'busybox'. kubectl set image deployment/nginx busybox=busybox nginx=nginx:1.9.1 @@ -86,6 +86,7 @@ var ( kubectl set image -f path/to/file.yaml nginx=nginx:1.9.1 --local -o yaml`) ) +// NewImageOptions returns an initialized SetImageOptions instance func NewImageOptions(streams genericclioptions.IOStreams) *SetImageOptions { return &SetImageOptions{ PrintFlags: genericclioptions.NewPrintFlags("image updated").WithTypeSetter(scheme.Scheme), @@ -97,6 +98,7 @@ func NewImageOptions(streams genericclioptions.IOStreams) *SetImageOptions { } } +// NewCmdImage returns an initialized Command instance for the 'set image' sub command func NewCmdImage(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { o := NewImageOptions(streams) @@ -104,8 +106,8 @@ func NewCmdImage(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra. Use: "image (-f FILENAME | TYPE NAME) CONTAINER_NAME_1=CONTAINER_IMAGE_1 ... CONTAINER_NAME_N=CONTAINER_IMAGE_N", DisableFlagsInUseLine: true, Short: i18n.T("Update image of a pod template"), - Long: image_long, - Example: image_example, + Long: imageLong, + Example: imageExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd, args)) cmdutil.CheckErr(o.Validate()) @@ -126,6 +128,7 @@ func NewCmdImage(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra. return cmd } +// Complete completes all required options func (o *SetImageOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { var err error @@ -191,6 +194,7 @@ func (o *SetImageOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args [ return nil } +// Validate makes sure provided values in SetImageOptions are valid func (o *SetImageOptions) Validate() error { errors := []error{} if o.All && len(o.Selector) > 0 { @@ -207,6 +211,7 @@ func (o *SetImageOptions) Validate() error { return utilerrors.NewAggregate(errors) } +// Run performs the execution of 'set image' sub command func (o *SetImageOptions) Run() error { allErrs := []error{} @@ -265,7 +270,7 @@ func (o *SetImageOptions) Run() error { for _, patch := range patches { info := patch.Info if patch.Err != nil { - allErrs = append(allErrs, fmt.Errorf("error: %s/%s %v\n", info.Mapping.Resource, info.Name, patch.Err)) + allErrs = append(allErrs, fmt.Errorf("error: %s/%s %v", info.Mapping.Resource, info.Name, patch.Err)) continue } @@ -284,7 +289,7 @@ func (o *SetImageOptions) Run() error { // patch the change actual, err := resource.NewHelper(info.Client, info.Mapping).Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil) if err != nil { - allErrs = append(allErrs, fmt.Errorf("failed to patch image update to pod template: %v\n", err)) + allErrs = append(allErrs, fmt.Errorf("failed to patch image update to pod template: %v", err)) continue } diff --git a/pkg/kubectl/cmd/set/set_resources.go b/pkg/kubectl/cmd/set/set_resources.go index b7328a0cb45..e4e2dd14bba 100644 --- a/pkg/kubectl/cmd/set/set_resources.go +++ b/pkg/kubectl/cmd/set/set_resources.go @@ -38,14 +38,14 @@ import ( ) var ( - resources_long = templates.LongDesc(` + resourcesLong = templates.LongDesc(` Specify compute resource requirements (cpu, memory) for any resource that defines a pod template. If a pod is successfully scheduled, it is guaranteed the amount of resource requested, but may burst up to its specified limits. for each compute resource, if a limit is specified and a request is omitted, the request will default to the limit. Possible resources include (case insensitive): %s.`) - resources_example = templates.Examples(` + resourcesExample = templates.Examples(` # Set a deployments nginx container cpu limits to "200m" and memory to "512Mi" kubectl set resources deployment nginx -c=nginx --limits=cpu=200m,memory=512Mi @@ -59,7 +59,7 @@ var ( kubectl set resources -f path/to/file.yaml --limits=cpu=200m,memory=512Mi --local -o yaml`) ) -// ResourcesOptions is the start of the data required to perform the operation. As new fields are added, add them here instead of +// SetResourcesOptions is the start of the data required to perform the operation. As new fields are added, add them here instead of // referencing the cmd.Flags type SetResourcesOptions struct { resource.FilenameOptions @@ -104,6 +104,7 @@ func NewResourcesOptions(streams genericclioptions.IOStreams) *SetResourcesOptio } } +// NewCmdResources returns initialized Command instance for the 'set resources' sub command func NewCmdResources(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { o := NewResourcesOptions(streams) @@ -111,8 +112,8 @@ func NewCmdResources(f cmdutil.Factory, streams genericclioptions.IOStreams) *co Use: "resources (-f FILENAME | TYPE NAME) ([--limits=LIMITS & --requests=REQUESTS]", DisableFlagsInUseLine: true, Short: i18n.T("Update resource requests/limits on objects with pod templates"), - Long: fmt.Sprintf(resources_long, cmdutil.SuggestApiResources("kubectl")), - Example: resources_example, + Long: fmt.Sprintf(resourcesLong, cmdutil.SuggestAPIResources("kubectl")), + Example: resourcesExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd, args)) cmdutil.CheckErr(o.Validate()) @@ -138,6 +139,7 @@ func NewCmdResources(f cmdutil.Factory, streams genericclioptions.IOStreams) *co return cmd } +// Complete completes all required options func (o *SetResourcesOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { var err error @@ -197,6 +199,7 @@ func (o *SetResourcesOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, ar return nil } +// Validate makes sure that provided values in ResourcesOptions are valid func (o *SetResourcesOptions) Validate() error { var err error if o.All && len(o.Selector) > 0 { @@ -214,6 +217,7 @@ func (o *SetResourcesOptions) Validate() error { return nil } +// Run performs the execution of 'set resources' sub command func (o *SetResourcesOptions) Run() error { allErrs := []error{} patches := CalculatePatches(o.Infos, scheme.DefaultJSONEncoder(), func(obj runtime.Object) ([]byte, error) { @@ -259,13 +263,13 @@ func (o *SetResourcesOptions) Run() error { for _, patch := range patches { info := patch.Info if patch.Err != nil { - allErrs = append(allErrs, fmt.Errorf("error: %s/%s %v\n", info.Mapping.Resource, info.Name, patch.Err)) + allErrs = append(allErrs, fmt.Errorf("error: %s/%s %v", info.Mapping.Resource, info.Name, patch.Err)) continue } //no changes if string(patch.Patch) == "{}" || len(patch.Patch) == 0 { - allErrs = append(allErrs, fmt.Errorf("info: %s %q was not changed\n", info.Mapping.Resource, info.Name)) + allErrs = append(allErrs, fmt.Errorf("info: %s %q was not changed", info.Mapping.Resource, info.Name)) continue } @@ -278,7 +282,7 @@ func (o *SetResourcesOptions) Run() error { actual, err := resource.NewHelper(info.Client, info.Mapping).Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil) if err != nil { - allErrs = append(allErrs, fmt.Errorf("failed to patch limit update to pod template %v\n", err)) + allErrs = append(allErrs, fmt.Errorf("failed to patch limit update to pod template %v", err)) continue } diff --git a/pkg/kubectl/cmd/set/set_selector.go b/pkg/kubectl/cmd/set/set_selector.go index 167ae747170..ddc4ba489b8 100644 --- a/pkg/kubectl/cmd/set/set_selector.go +++ b/pkg/kubectl/cmd/set/set_selector.go @@ -36,7 +36,7 @@ import ( "k8s.io/kubernetes/pkg/kubectl/util/templates" ) -// SelectorOptions is the start of the data required to perform the operation. As new fields are added, add them here instead of +// SetSelectorOptions is the start of the data required to perform the operation. As new fields are added, add them here instead of // referencing the cmd.Flags() type SetSelectorOptions struct { // Bound @@ -73,6 +73,7 @@ var ( kubectl create deployment my-dep -o yaml --dry-run | kubectl label --local -f - environment=qa -o yaml | kubectl create -f -`) ) +// NewSelectorOptions returns an initialized SelectorOptions instance func NewSelectorOptions(streams genericclioptions.IOStreams) *SetSelectorOptions { return &SetSelectorOptions{ ResourceBuilderFlags: genericclioptions.NewResourceBuilderFlags(). diff --git a/pkg/kubectl/cmd/set/set_serviceaccount.go b/pkg/kubectl/cmd/set/set_serviceaccount.go index eced76f7e9c..b0d58527090 100644 --- a/pkg/kubectl/cmd/set/set_serviceaccount.go +++ b/pkg/kubectl/cmd/set/set_serviceaccount.go @@ -56,7 +56,7 @@ var ( `)) ) -// serviceAccountConfig encapsulates the data required to perform the operation. +// SetServiceAccountOptions encapsulates the data required to perform the operation. type SetServiceAccountOptions struct { PrintFlags *genericclioptions.PrintFlags RecordFlags *genericclioptions.RecordFlags @@ -77,6 +77,7 @@ type SetServiceAccountOptions struct { genericclioptions.IOStreams } +// NewSetServiceAccountOptions returns an initialized SetServiceAccountOptions instance func NewSetServiceAccountOptions(streams genericclioptions.IOStreams) *SetServiceAccountOptions { return &SetServiceAccountOptions{ PrintFlags: genericclioptions.NewPrintFlags("serviceaccount updated").WithTypeSetter(scheme.Scheme), @@ -193,7 +194,7 @@ func (o *SetServiceAccountOptions) Run() error { for _, patch := range patches { info := patch.Info if patch.Err != nil { - patchErrs = append(patchErrs, fmt.Errorf("error: %s/%s %v\n", info.Mapping.Resource, info.Name, patch.Err)) + patchErrs = append(patchErrs, fmt.Errorf("error: %s/%s %v", info.Mapping.Resource, info.Name, patch.Err)) continue } if o.local || o.dryRun { diff --git a/pkg/kubectl/cmd/set/set_subject.go b/pkg/kubectl/cmd/set/set_subject.go index a4a245e2855..a18032d9031 100644 --- a/pkg/kubectl/cmd/set/set_subject.go +++ b/pkg/kubectl/cmd/set/set_subject.go @@ -37,10 +37,10 @@ import ( ) var ( - subject_long = templates.LongDesc(` + subjectLong = templates.LongDesc(` Update User, Group or ServiceAccount in a RoleBinding/ClusterRoleBinding.`) - subject_example = templates.Examples(` + subjectExample = templates.Examples(` # Update a ClusterRoleBinding for serviceaccount1 kubectl set subject clusterrolebinding admin --serviceaccount=namespace:serviceaccount1 @@ -79,6 +79,7 @@ type SubjectOptions struct { genericclioptions.IOStreams } +// NewSubjectOptions returns an initialized SubjectOptions instance func NewSubjectOptions(streams genericclioptions.IOStreams) *SubjectOptions { return &SubjectOptions{ PrintFlags: genericclioptions.NewPrintFlags("subjects updated").WithTypeSetter(scheme.Scheme), @@ -87,14 +88,15 @@ func NewSubjectOptions(streams genericclioptions.IOStreams) *SubjectOptions { } } +// NewCmdSubject returns the "new subject" sub command func NewCmdSubject(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { o := NewSubjectOptions(streams) cmd := &cobra.Command{ Use: "subject (-f FILENAME | TYPE NAME) [--user=username] [--group=groupname] [--serviceaccount=namespace:serviceaccountname] [--dry-run]", DisableFlagsInUseLine: true, Short: i18n.T("Update User, Group or ServiceAccount in a RoleBinding/ClusterRoleBinding"), - Long: subject_long, - Example: subject_example, + Long: subjectLong, + Example: subjectExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd, args)) cmdutil.CheckErr(o.Validate()) @@ -116,6 +118,7 @@ func NewCmdSubject(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobr return cmd } +// Complete completes all required options func (o *SubjectOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { o.Output = cmdutil.GetFlagString(cmd, "output") o.DryRun = cmdutil.GetDryRunFlag(cmd) @@ -167,6 +170,7 @@ func (o *SubjectOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args [] return nil } +// Validate makes sure provided values in SubjectOptions are valid func (o *SubjectOptions) Validate() error { if o.All && len(o.Selector) > 0 { return fmt.Errorf("cannot set --all and --selector at the same time") @@ -192,6 +196,7 @@ func (o *SubjectOptions) Validate() error { return nil } +// Run performs the execution of "set subject" sub command func (o *SubjectOptions) Run(fn updateSubjects) error { patches := CalculatePatches(o.Infos, scheme.DefaultJSONEncoder(), func(obj runtime.Object) ([]byte, error) { subjects := []rbacv1.Subject{} @@ -238,13 +243,13 @@ func (o *SubjectOptions) Run(fn updateSubjects) error { for _, patch := range patches { info := patch.Info if patch.Err != nil { - allErrs = append(allErrs, fmt.Errorf("error: %s/%s %v\n", info.Mapping.Resource, info.Name, patch.Err)) + allErrs = append(allErrs, fmt.Errorf("error: %s/%s %v", info.Mapping.Resource, info.Name, patch.Err)) continue } //no changes if string(patch.Patch) == "{}" || len(patch.Patch) == 0 { - allErrs = append(allErrs, fmt.Errorf("info: %s %q was not changed\n", info.Mapping.Resource, info.Name)) + allErrs = append(allErrs, fmt.Errorf("info: %s %q was not changed", info.Mapping.Resource, info.Name)) continue } @@ -257,7 +262,7 @@ func (o *SubjectOptions) Run(fn updateSubjects) error { actual, err := resource.NewHelper(info.Client, info.Mapping).Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil) if err != nil { - allErrs = append(allErrs, fmt.Errorf("failed to patch subjects to rolebinding: %v\n", err)) + allErrs = append(allErrs, fmt.Errorf("failed to patch subjects to rolebinding: %v", err)) continue } diff --git a/pkg/kubectl/cmd/taint/taint_test.go b/pkg/kubectl/cmd/taint/taint_test.go index 8e68516730c..3b4a9f540e5 100644 --- a/pkg/kubectl/cmd/taint/taint_test.go +++ b/pkg/kubectl/cmd/taint/taint_test.go @@ -237,7 +237,7 @@ func TestTaint(t *testing.T) { for _, test := range tests { t.Run(test.description, func(t *testing.T) { oldNode, expectNewNode := generateNodeAndTaintedNode(test.oldTaints, test.newTaints) - new_node := &corev1.Node{} + newNode := &corev1.Node{} tainted := false tf := cmdtesting.NewTestFactory() defer tf.Cleanup() @@ -274,13 +274,13 @@ func TestTaint(t *testing.T) { } // decode the patch - if err := runtime.DecodeInto(codec, appliedPatch, new_node); err != nil { + if err := runtime.DecodeInto(codec, appliedPatch, newNode); err != nil { t.Fatalf("%s: unexpected error: %v", test.description, err) } - if !equalTaints(expectNewNode.Spec.Taints, new_node.Spec.Taints) { - t.Fatalf("%s: expected:\n%v\nsaw:\n%v\n", test.description, expectNewNode.Spec.Taints, new_node.Spec.Taints) + if !equalTaints(expectNewNode.Spec.Taints, newNode.Spec.Taints) { + t.Fatalf("%s: expected:\n%v\nsaw:\n%v\n", test.description, expectNewNode.Spec.Taints, newNode.Spec.Taints) } - return &http.Response{StatusCode: 200, Header: cmdtesting.DefaultHeader(), Body: cmdtesting.ObjBody(codec, new_node)}, nil + return &http.Response{StatusCode: 200, Header: cmdtesting.DefaultHeader(), Body: cmdtesting.ObjBody(codec, newNode)}, nil case m.isFor("PUT", "/nodes/node-name"): tainted = true data, err := ioutil.ReadAll(req.Body) @@ -288,13 +288,13 @@ func TestTaint(t *testing.T) { t.Fatalf("%s: unexpected error: %v", test.description, err) } defer req.Body.Close() - if err := runtime.DecodeInto(codec, data, new_node); err != nil { + if err := runtime.DecodeInto(codec, data, newNode); err != nil { t.Fatalf("%s: unexpected error: %v", test.description, err) } - if !equalTaints(expectNewNode.Spec.Taints, new_node.Spec.Taints) { - t.Fatalf("%s: expected:\n%v\nsaw:\n%v\n", test.description, expectNewNode.Spec.Taints, new_node.Spec.Taints) + if !equalTaints(expectNewNode.Spec.Taints, newNode.Spec.Taints) { + t.Fatalf("%s: expected:\n%v\nsaw:\n%v\n", test.description, expectNewNode.Spec.Taints, newNode.Spec.Taints) } - return &http.Response{StatusCode: 200, Header: cmdtesting.DefaultHeader(), Body: cmdtesting.ObjBody(codec, new_node)}, nil + return &http.Response{StatusCode: 200, Header: cmdtesting.DefaultHeader(), Body: cmdtesting.ObjBody(codec, newNode)}, nil default: t.Fatalf("%s: unexpected request: %v %#v\n%#v", test.description, req.Method, req.URL, req) return nil, nil @@ -305,7 +305,7 @@ func TestTaint(t *testing.T) { cmd := NewCmdTaint(tf, genericclioptions.NewTestIOStreamsDiscard()) - saw_fatal := false + sawFatal := false func() { defer func() { // Recover from the panic below. @@ -316,13 +316,13 @@ func TestTaint(t *testing.T) { // Restore cmdutil behavior cmdutil.DefaultBehaviorOnFatal() }() - cmdutil.BehaviorOnFatal(func(e string, code int) { saw_fatal = true; panic(e) }) + cmdutil.BehaviorOnFatal(func(e string, code int) { sawFatal = true; panic(e) }) cmd.SetArgs(test.args) cmd.Execute() }() if test.expectFatal { - if !saw_fatal { + if !sawFatal { t.Fatalf("%s: unexpected non-error", test.description) } } diff --git a/pkg/kubectl/cmd/testing/fake.go b/pkg/kubectl/cmd/testing/fake.go index 4268b1ecacb..30f1d7a54ed 100644 --- a/pkg/kubectl/cmd/testing/fake.go +++ b/pkg/kubectl/cmd/testing/fake.go @@ -51,6 +51,7 @@ import ( "k8s.io/kubernetes/pkg/kubectl/validation" ) +// InternalType is the schema for internal type // +k8s:deepcopy-gen=true // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type InternalType struct { @@ -60,6 +61,7 @@ type InternalType struct { Name string } +// ExternalType is the schema for external type // +k8s:deepcopy-gen=true // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type ExternalType struct { @@ -69,6 +71,7 @@ type ExternalType struct { Name string `json:"name"` } +// ExternalType2 is another schema for external type // +k8s:deepcopy-gen=true // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type ExternalType2 struct { @@ -78,28 +81,46 @@ type ExternalType2 struct { Name string `json:"name"` } +// GetObjectKind returns the ObjectKind schema func (obj *InternalType) GetObjectKind() schema.ObjectKind { return obj } + +// SetGroupVersionKind sets the version and kind func (obj *InternalType) SetGroupVersionKind(gvk schema.GroupVersionKind) { obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() } + +// GroupVersionKind returns GroupVersionKind schema func (obj *InternalType) GroupVersionKind() schema.GroupVersionKind { return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) } + +// GetObjectKind returns the ObjectKind schema func (obj *ExternalType) GetObjectKind() schema.ObjectKind { return obj } + +// SetGroupVersionKind returns the GroupVersionKind schema func (obj *ExternalType) SetGroupVersionKind(gvk schema.GroupVersionKind) { obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() } + +// GroupVersionKind returns the GroupVersionKind schema func (obj *ExternalType) GroupVersionKind() schema.GroupVersionKind { return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) } + +// GetObjectKind returns the ObjectKind schema func (obj *ExternalType2) GetObjectKind() schema.ObjectKind { return obj } + +// SetGroupVersionKind sets the API version and obj kind from schema func (obj *ExternalType2) SetGroupVersionKind(gvk schema.GroupVersionKind) { obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() } + +// GroupVersionKind returns the FromAPIVersionAndKind schema func (obj *ExternalType2) GroupVersionKind() schema.GroupVersionKind { return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) } +// NewInternalType returns an initialized InternalType instance func NewInternalType(kind, apiversion, name string) *InternalType { item := InternalType{Kind: kind, APIVersion: apiversion, @@ -107,6 +128,7 @@ func NewInternalType(kind, apiversion, name string) *InternalType { return &item } +// InternalNamespacedType schema for internal namespaced types // +k8s:deepcopy-gen=true // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type InternalNamespacedType struct { @@ -117,6 +139,7 @@ type InternalNamespacedType struct { Namespace string } +// ExternalNamespacedType schema for external namespaced types // +k8s:deepcopy-gen=true // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type ExternalNamespacedType struct { @@ -127,6 +150,7 @@ type ExternalNamespacedType struct { Namespace string `json:"namespace"` } +// ExternalNamespacedType2 schema for external namespaced types // +k8s:deepcopy-gen=true // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type ExternalNamespacedType2 struct { @@ -137,28 +161,46 @@ type ExternalNamespacedType2 struct { Namespace string `json:"namespace"` } +// GetObjectKind returns the ObjectKind schema func (obj *InternalNamespacedType) GetObjectKind() schema.ObjectKind { return obj } + +// SetGroupVersionKind sets the API group and kind from schema func (obj *InternalNamespacedType) SetGroupVersionKind(gvk schema.GroupVersionKind) { obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() } + +// GroupVersionKind returns the GroupVersionKind schema func (obj *InternalNamespacedType) GroupVersionKind() schema.GroupVersionKind { return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) } + +// GetObjectKind returns the ObjectKind schema func (obj *ExternalNamespacedType) GetObjectKind() schema.ObjectKind { return obj } + +// SetGroupVersionKind sets the API version and kind from schema func (obj *ExternalNamespacedType) SetGroupVersionKind(gvk schema.GroupVersionKind) { obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() } + +// GroupVersionKind returns the GroupVersionKind schema func (obj *ExternalNamespacedType) GroupVersionKind() schema.GroupVersionKind { return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) } + +// GetObjectKind returns the ObjectKind schema func (obj *ExternalNamespacedType2) GetObjectKind() schema.ObjectKind { return obj } + +// SetGroupVersionKind sets the API version and kind from schema func (obj *ExternalNamespacedType2) SetGroupVersionKind(gvk schema.GroupVersionKind) { obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() } + +// GroupVersionKind returns the GroupVersionKind schema func (obj *ExternalNamespacedType2) GroupVersionKind() schema.GroupVersionKind { return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) } +// NewInternalNamespacedType returns an initialized instance of InternalNamespacedType func NewInternalNamespacedType(kind, apiversion, name, namespace string) *InternalNamespacedType { item := InternalNamespacedType{Kind: kind, APIVersion: apiversion, @@ -167,26 +209,35 @@ func NewInternalNamespacedType(kind, apiversion, name, namespace string) *Intern return &item } -var versionErr = errors.New("not a version") +var errInvalidVersion = errors.New("not a version") func versionErrIfFalse(b bool) error { if b { return nil } - return versionErr + return errInvalidVersion } +// ValidVersion of API var ValidVersion = "v1" + +// InternalGV is the internal group version object var InternalGV = schema.GroupVersion{Group: "apitest", Version: runtime.APIVersionInternal} + +// UnlikelyGV is a group version object for unrecognised version var UnlikelyGV = schema.GroupVersion{Group: "apitest", Version: "unlikelyversion"} + +// ValidVersionGV is the valid group version object var ValidVersionGV = schema.GroupVersion{Group: "apitest", Version: ValidVersion} +// NewExternalScheme returns required objects for ExternalScheme func NewExternalScheme() (*runtime.Scheme, meta.RESTMapper, runtime.Codec) { scheme := runtime.NewScheme() mapper, codec := AddToScheme(scheme) return scheme, mapper, codec } +// AddToScheme adds required objects into scheme func AddToScheme(scheme *runtime.Scheme) (meta.RESTMapper, runtime.Codec) { scheme.AddKnownTypeWithName(InternalGV.WithKind("Type"), &InternalType{}) scheme.AddKnownTypeWithName(UnlikelyGV.WithKind("Type"), &ExternalType{}) @@ -228,6 +279,7 @@ func (d *fakeCachedDiscoveryClient) ServerResources() ([]*metav1.APIResourceList return []*metav1.APIResourceList{}, nil } +// TestFactory extends cmdutil.Factory type TestFactory struct { cmdutil.Factory @@ -245,6 +297,7 @@ type TestFactory struct { OpenAPISchemaFunc func() (openapi.Resources, error) } +// NewTestFactory returns an initialized TestFactory instance func NewTestFactory() *TestFactory { // specify an optionalClientConfig to explicitly use in testing // to avoid polluting an existing user config. @@ -281,11 +334,13 @@ func NewTestFactory() *TestFactory { } } +// WithNamespace is used to mention namespace reactively func (f *TestFactory) WithNamespace(ns string) *TestFactory { f.kubeConfigFlags.WithNamespace(ns) return f } +// Cleanup cleans up TestFactory temp config file func (f *TestFactory) Cleanup() { if f.tempConfigFile == nil { return @@ -294,14 +349,17 @@ func (f *TestFactory) Cleanup() { os.Remove(f.tempConfigFile.Name()) } +// ToRESTConfig is used to get ClientConfigVal from a TestFactory func (f *TestFactory) ToRESTConfig() (*restclient.Config, error) { return f.ClientConfigVal, nil } +// ClientForMapping is used to Client from a TestFactory func (f *TestFactory) ClientForMapping(mapping *meta.RESTMapping) (resource.RESTClient, error) { return f.Client, nil } +// UnstructuredClientForMapping is used to get UnstructuredClient from a TestFactory func (f *TestFactory) UnstructuredClientForMapping(mapping *meta.RESTMapping) (resource.RESTClient, error) { if f.UnstructuredClientForMappingFunc != nil { return f.UnstructuredClientForMappingFunc(mapping.GroupVersionKind.GroupVersion()) @@ -309,10 +367,12 @@ func (f *TestFactory) UnstructuredClientForMapping(mapping *meta.RESTMapping) (r return f.UnstructuredClient, nil } +// Validator returns a validation schema func (f *TestFactory) Validator(validate bool) (validation.Schema, error) { return validation.NullSchema{}, nil } +// OpenAPISchema returns openapi resources func (f *TestFactory) OpenAPISchema() (openapi.Resources, error) { if f.OpenAPISchemaFunc != nil { return f.OpenAPISchemaFunc() @@ -320,6 +380,7 @@ func (f *TestFactory) OpenAPISchema() (openapi.Resources, error) { return openapitesting.EmptyResources{}, nil } +// NewBuilder returns an initialized resource.Builder instance func (f *TestFactory) NewBuilder() *resource.Builder { return resource.NewFakeBuilder( func(version schema.GroupVersion) (resource.RESTClient, error) { @@ -338,6 +399,7 @@ func (f *TestFactory) NewBuilder() *resource.Builder { ) } +// KubernetesClientSet initializes and returns the Clientset using TestFactory func (f *TestFactory) KubernetesClientSet() (*kubernetes.Clientset, error) { fakeClient := f.Client.(*fake.RESTClient) clientset := kubernetes.NewForConfigOrDie(f.ClientConfigVal) @@ -365,6 +427,7 @@ func (f *TestFactory) KubernetesClientSet() (*kubernetes.Clientset, error) { return clientset, nil } +// DynamicClient returns a dynamic client from TestFactory func (f *TestFactory) DynamicClient() (dynamic.Interface, error) { if f.FakeDynamicClient != nil { return f.FakeDynamicClient, nil @@ -372,6 +435,7 @@ func (f *TestFactory) DynamicClient() (dynamic.Interface, error) { return f.Factory.DynamicClient() } +// RESTClient returns a REST client from TestFactory func (f *TestFactory) RESTClient() (*restclient.RESTClient, error) { // Swap out the HTTP client out of the client with the fake's version. fakeClient := f.Client.(*fake.RESTClient) @@ -383,6 +447,7 @@ func (f *TestFactory) RESTClient() (*restclient.RESTClient, error) { return restClient, nil } +// DiscoveryClient returns a discovery client from TestFactory func (f *TestFactory) DiscoveryClient() (discovery.CachedDiscoveryInterface, error) { fakeClient := f.Client.(*fake.RESTClient) @@ -413,6 +478,7 @@ func testRESTMapper() meta.RESTMapper { return expander } +// ScaleClient returns the ScalesGetter from a TestFactory func (f *TestFactory) ScaleClient() (scaleclient.ScalesGetter, error) { return f.ScaleGetter, nil } diff --git a/pkg/kubectl/cmd/top/top_node_test.go b/pkg/kubectl/cmd/top/top_node_test.go index b610d201a88..d350489a509 100644 --- a/pkg/kubectl/cmd/top/top_node_test.go +++ b/pkg/kubectl/cmd/top/top_node_test.go @@ -46,7 +46,7 @@ const ( func TestTopNodeAllMetrics(t *testing.T) { cmdtesting.InitTestErrorHandler(t) metrics, nodes := testNodeV1alpha1MetricsData() - expectedMetricsPath := fmt.Sprintf("%s/%s/nodes", baseMetricsAddress, metricsApiVersion) + expectedMetricsPath := fmt.Sprintf("%s/%s/nodes", baseMetricsAddress, metricsAPIVersion) expectedNodePath := fmt.Sprintf("/%s/%s/nodes", apiPrefix, apiVersion) tf := cmdtesting.NewTestFactory().WithNamespace("test") @@ -102,7 +102,7 @@ func TestTopNodeAllMetricsCustomDefaults(t *testing.T) { cmdtesting.InitTestErrorHandler(t) metrics, nodes := testNodeV1alpha1MetricsData() - expectedMetricsPath := fmt.Sprintf("%s/%s/nodes", customBaseMetricsAddress, metricsApiVersion) + expectedMetricsPath := fmt.Sprintf("%s/%s/nodes", customBaseMetricsAddress, metricsAPIVersion) expectedNodePath := fmt.Sprintf("/%s/%s/nodes", apiPrefix, apiVersion) tf := cmdtesting.NewTestFactory().WithNamespace("test") @@ -165,7 +165,7 @@ func TestTopNodeWithNameMetrics(t *testing.T) { ListMeta: metrics.ListMeta, Items: metrics.Items[1:], } - expectedPath := fmt.Sprintf("%s/%s/nodes/%s", baseMetricsAddress, metricsApiVersion, expectedMetrics.Name) + expectedPath := fmt.Sprintf("%s/%s/nodes/%s", baseMetricsAddress, metricsAPIVersion, expectedMetrics.Name) expectedNodePath := fmt.Sprintf("/%s/%s/nodes/%s", apiPrefix, apiVersion, expectedMetrics.Name) tf := cmdtesting.NewTestFactory().WithNamespace("test") @@ -230,7 +230,7 @@ func TestTopNodeWithLabelSelectorMetrics(t *testing.T) { Items: metrics.Items[1:], } label := "key=value" - expectedPath := fmt.Sprintf("%s/%s/nodes", baseMetricsAddress, metricsApiVersion) + expectedPath := fmt.Sprintf("%s/%s/nodes", baseMetricsAddress, metricsAPIVersion) expectedQuery := fmt.Sprintf("labelSelector=%s", url.QueryEscape(label)) expectedNodePath := fmt.Sprintf("/%s/%s/nodes", apiPrefix, apiVersion) diff --git a/pkg/kubectl/cmd/top/top_pod_test.go b/pkg/kubectl/cmd/top/top_pod_test.go index f0ab8b0010f..1d28b4c6a5d 100644 --- a/pkg/kubectl/cmd/top/top_pod_test.go +++ b/pkg/kubectl/cmd/top/top_pod_test.go @@ -45,7 +45,7 @@ import ( ) const ( - topPathPrefix = baseMetricsAddress + "/" + metricsApiVersion + topPathPrefix = baseMetricsAddress + "/" + metricsAPIVersion topMetricsAPIPathPrefix = "/apis/metrics.k8s.io/v1beta1" apibody = `{ "kind": "APIVersions", @@ -440,7 +440,7 @@ func (d *fakeDiscovery) RESTClient() restclient.Interface { func TestTopPodCustomDefaults(t *testing.T) { customBaseHeapsterServiceAddress := "/api/v1/namespaces/custom-namespace/services/https:custom-heapster-service:/proxy" customBaseMetricsAddress := customBaseHeapsterServiceAddress + "/apis/metrics" - customTopPathPrefix := customBaseMetricsAddress + "/" + metricsApiVersion + customTopPathPrefix := customBaseMetricsAddress + "/" + metricsAPIVersion testNS := "custom-namespace" testCases := []struct { diff --git a/pkg/kubectl/cmd/top/top_test.go b/pkg/kubectl/cmd/top/top_test.go index 834dd36004d..eaa6856444c 100644 --- a/pkg/kubectl/cmd/top/top_test.go +++ b/pkg/kubectl/cmd/top/top_test.go @@ -38,7 +38,7 @@ const ( baseHeapsterServiceAddress = "/api/v1/namespaces/kube-system/services/http:heapster:/proxy" baseMetricsAddress = baseHeapsterServiceAddress + "/apis/metrics" baseMetricsServerAddress = "/apis/metrics.k8s.io/v1beta1" - metricsApiVersion = "v1alpha1" + metricsAPIVersion = "v1alpha1" ) func TestTopSubcommandsExist(t *testing.T) { diff --git a/pkg/kubectl/cmd/util/editor/editoptions.go b/pkg/kubectl/cmd/util/editor/editoptions.go index 2b2add45859..0293df42729 100644 --- a/pkg/kubectl/cmd/util/editor/editoptions.go +++ b/pkg/kubectl/cmd/util/editor/editoptions.go @@ -81,6 +81,7 @@ type EditOptions struct { updatedResultGetter func(data []byte) *resource.Result } +// NewEditOptions returns an initialized EditOptions instance func NewEditOptions(editMode EditMode, ioStreams genericclioptions.IOStreams) *EditOptions { return &EditOptions{ RecordFlags: genericclioptions.NewRecordFlags(), @@ -216,6 +217,7 @@ func (o *EditOptions) Validate() error { return nil } +// Run performs the execution func (o *EditOptions) Run() error { edit := NewDefaultEditor(editorEnvs()) // editFn is invoked for each edit session (once with a list for normal edit, once for each individual resource in a edit-on-create invocation) @@ -399,7 +401,7 @@ func (o *EditOptions) Run() error { return err } if len(infos) == 0 { - return errors.New("edit cancelled, no objects found.") + return errors.New("edit cancelled, no objects found") } return editFn(infos) case ApplyEditMode: @@ -459,12 +461,12 @@ func (o *EditOptions) visitToApplyEditPatch(originalInfos []*resource.Info, patc return fmt.Errorf("no original object found for %#v", info.Object) } - originalJS, err := encodeToJson(originalInfo.Object.(runtime.Unstructured)) + originalJS, err := encodeToJSON(originalInfo.Object.(runtime.Unstructured)) if err != nil { return err } - editedJS, err := encodeToJson(info.Object.(runtime.Unstructured)) + editedJS, err := encodeToJSON(info.Object.(runtime.Unstructured)) if err != nil { return err } @@ -474,21 +476,18 @@ func (o *EditOptions) visitToApplyEditPatch(originalInfos []*resource.Info, patc if err != nil { return err } - printer.PrintObj(info.Object, o.Out) - return nil - } else { - err := o.annotationPatch(info) - if err != nil { - return err - } - - printer, err := o.ToPrinter("edited") - if err != nil { - return err - } - printer.PrintObj(info.Object, o.Out) - return nil + return printer.PrintObj(info.Object, o.Out) } + err = o.annotationPatch(info) + if err != nil { + return err + } + + printer, err := o.ToPrinter("edited") + if err != nil { + return err + } + return printer.PrintObj(info.Object, o.Out) }) return err } @@ -511,8 +510,9 @@ func (o *EditOptions) annotationPatch(update *resource.Info) error { return nil } +// GetApplyPatch is used to get and apply patches func GetApplyPatch(obj runtime.Unstructured) ([]byte, []byte, types.PatchType, error) { - beforeJSON, err := encodeToJson(obj) + beforeJSON, err := encodeToJSON(obj) if err != nil { return nil, []byte(""), types.MergePatchType, err } @@ -527,7 +527,7 @@ func GetApplyPatch(obj runtime.Unstructured) ([]byte, []byte, types.PatchType, e } annotations[corev1.LastAppliedConfigAnnotation] = string(beforeJSON) accessor.SetAnnotations(objCopy, annotations) - afterJSON, err := encodeToJson(objCopy.(runtime.Unstructured)) + afterJSON, err := encodeToJSON(objCopy.(runtime.Unstructured)) if err != nil { return nil, beforeJSON, types.MergePatchType, err } @@ -535,7 +535,7 @@ func GetApplyPatch(obj runtime.Unstructured) ([]byte, []byte, types.PatchType, e return patch, beforeJSON, types.MergePatchType, err } -func encodeToJson(obj runtime.Unstructured) ([]byte, error) { +func encodeToJSON(obj runtime.Unstructured) ([]byte, error) { serialization, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) if err != nil { return nil, err @@ -569,12 +569,12 @@ func (o *EditOptions) visitToPatch(originalInfos []*resource.Info, patchVisitor return fmt.Errorf("no original object found for %#v", info.Object) } - originalJS, err := encodeToJson(originalInfo.Object.(runtime.Unstructured)) + originalJS, err := encodeToJSON(originalInfo.Object.(runtime.Unstructured)) if err != nil { return err } - editedJS, err := encodeToJson(info.Object.(runtime.Unstructured)) + editedJS, err := encodeToJSON(info.Object.(runtime.Unstructured)) if err != nil { return err } @@ -585,8 +585,7 @@ func (o *EditOptions) visitToPatch(originalInfos []*resource.Info, patchVisitor if err != nil { return err } - printer.PrintObj(info.Object, o.Out) - return nil + return printer.PrintObj(info.Object, o.Out) } preconditions := []mergepatch.PreconditionFunc{ @@ -643,8 +642,7 @@ func (o *EditOptions) visitToPatch(originalInfos []*resource.Info, patchVisitor if err != nil { return err } - printer.PrintObj(info.Object, o.Out) - return nil + return printer.PrintObj(info.Object, o.Out) }) return err } @@ -658,8 +656,7 @@ func (o *EditOptions) visitToCreate(createVisitor resource.Visitor) error { if err != nil { return err } - printer.PrintObj(info.Object, o.Out) - return nil + return printer.PrintObj(info.Object, o.Out) }) return err } @@ -683,12 +680,18 @@ func (o *EditOptions) visitAnnotation(annotationVisitor resource.Visitor) error return err } +// EditMode can be either NormalEditMode, EditBeforeCreateMode or ApplyEditMode type EditMode string const ( - NormalEditMode EditMode = "normal_mode" + // NormalEditMode is an edit mode + NormalEditMode EditMode = "normal_mode" + + // EditBeforeCreateMode is an edit mode EditBeforeCreateMode EditMode = "edit_before_create_mode" - ApplyEditMode EditMode = "edit_last_applied_mode" + + // ApplyEditMode is an edit mode + ApplyEditMode EditMode = "edit_last_applied_mode" ) // editReason preserves a message about the reason this file must be edited again diff --git a/pkg/kubectl/cmd/util/editor/editor.go b/pkg/kubectl/cmd/util/editor/editor.go index e7229870ec7..ef7831ca110 100644 --- a/pkg/kubectl/cmd/util/editor/editor.go +++ b/pkg/kubectl/cmd/util/editor/editor.go @@ -43,6 +43,7 @@ const ( windowsShell = "cmd" ) +// Editor holds the command-line args to fire up the editor type Editor struct { Args []string Shell bool diff --git a/pkg/kubectl/cmd/util/helpers.go b/pkg/kubectl/cmd/util/helpers.go index 9a392cde59b..9d661cb3abb 100644 --- a/pkg/kubectl/cmd/util/helpers.go +++ b/pkg/kubectl/cmd/util/helpers.go @@ -290,7 +290,7 @@ func messageForError(err error) string { func UsageErrorf(cmd *cobra.Command, format string, args ...interface{}) error { msg := fmt.Sprintf(format, args...) - return fmt.Errorf("%s\nSee '%s -h' for help and examples.", msg, cmd.CommandPath()) + return fmt.Errorf("%s\nSee '%s -h' for help and examples", msg, cmd.CommandPath()) } func IsFilenameSliceEmpty(filenames []string) bool { diff --git a/pkg/kubectl/cmd/util/kubectl_match_version.go b/pkg/kubectl/cmd/util/kubectl_match_version.go index 99ba2baeec6..675f8e1d91d 100644 --- a/pkg/kubectl/cmd/util/kubectl_match_version.go +++ b/pkg/kubectl/cmd/util/kubectl_match_version.go @@ -92,7 +92,7 @@ func (f *MatchVersionFlags) ToDiscoveryClient() (discovery.CachedDiscoveryInterf return f.Delegate.ToDiscoveryClient() } -// RESTMapper returns a mapper. +// ToRESTMapper returns a mapper. func (f *MatchVersionFlags) ToRESTMapper() (meta.RESTMapper, error) { if err := f.checkMatchingServerVersion(); err != nil { return nil, err diff --git a/pkg/kubectl/cmd/util/printing.go b/pkg/kubectl/cmd/util/printing.go index 75b73c62048..f093940d8a4 100644 --- a/pkg/kubectl/cmd/util/printing.go +++ b/pkg/kubectl/cmd/util/printing.go @@ -22,8 +22,8 @@ import ( "k8s.io/kubernetes/pkg/kubectl/util/templates" ) -// SuggestApiResources returns a suggestion to use the "api-resources" command +// SuggestAPIResources returns a suggestion to use the "api-resources" command // to retrieve a supported list of resources -func SuggestApiResources(parent string) string { +func SuggestAPIResources(parent string) string { return templates.LongDesc(fmt.Sprintf("Use \"%s api-resources\" for a complete list of supported resources.", parent)) } diff --git a/pkg/kubectl/cmd/util/sanity/cmd_sanity.go b/pkg/kubectl/cmd/util/sanity/cmd_sanity.go index 21d2a291d29..4ec078d1e12 100644 --- a/pkg/kubectl/cmd/util/sanity/cmd_sanity.go +++ b/pkg/kubectl/cmd/util/sanity/cmd_sanity.go @@ -28,20 +28,27 @@ import ( "k8s.io/kubernetes/pkg/kubectl/util/templates" ) +// CmdCheck is the commom type of functions to check cobra commands type CmdCheck func(cmd *cobra.Command) []error + +// GlobalCheck is the common type of functions to check global flags type GlobalCheck func() []error var ( + // AllCmdChecks is the list of CmdCheck type functions AllCmdChecks = []CmdCheck{ CheckLongDesc, CheckExamples, CheckFlags, } + + // AllGlobalChecks is the list of GlobalCheck type functions AllGlobalChecks = []GlobalCheck{ CheckGlobalVarFlags, } ) +// RunGlobalChecks runs all the GlobalCheck functions passed and checks for error func RunGlobalChecks(globalChecks []GlobalCheck) []error { fmt.Fprint(os.Stdout, "---+ RUNNING GLOBAL CHECKS\n") errors := []error{} @@ -51,6 +58,7 @@ func RunGlobalChecks(globalChecks []GlobalCheck) []error { return errors } +// RunCmdChecks runs all the CmdCheck functions passed, skipping skippable commands and looks for error func RunCmdChecks(cmd *cobra.Command, cmdChecks []CmdCheck, skipCmd []string) []error { cmdPath := cmd.CommandPath() @@ -80,6 +88,7 @@ func RunCmdChecks(cmd *cobra.Command, cmdChecks []CmdCheck, skipCmd []string) [] return errors } +// CheckLongDesc checks if the long description is valid func CheckLongDesc(cmd *cobra.Command) []error { fmt.Fprint(os.Stdout, " ↳ checking long description\n") cmdPath := cmd.CommandPath() @@ -92,6 +101,7 @@ func CheckLongDesc(cmd *cobra.Command) []error { return nil } +// CheckExamples checks if the command examples are valid func CheckExamples(cmd *cobra.Command) []error { fmt.Fprint(os.Stdout, " ↳ checking examples\n") cmdPath := cmd.CommandPath() @@ -110,6 +120,7 @@ func CheckExamples(cmd *cobra.Command) []error { return errors } +// CheckFlags checks if the command-line flags are valid func CheckFlags(cmd *cobra.Command) []error { allFlagsSlice := []*pflag.Flag{} @@ -140,6 +151,7 @@ func CheckFlags(cmd *cobra.Command) []error { return errors } +// CheckGlobalVarFlags checks if the global flags are valid func CheckGlobalVarFlags() []error { fmt.Fprint(os.Stdout, " ↳ checking flags from global vars\n") errors := []error{} diff --git a/pkg/kubectl/cmd/wait/wait.go b/pkg/kubectl/cmd/wait/wait.go index 6a1e075e816..481d4661fb4 100644 --- a/pkg/kubectl/cmd/wait/wait.go +++ b/pkg/kubectl/cmd/wait/wait.go @@ -44,7 +44,7 @@ import ( ) var ( - wait_long = templates.LongDesc(` + waitLong = templates.LongDesc(` Experimental: Wait for a specific condition on one or many resources. The command takes multiple resources and waits until the specified condition @@ -56,7 +56,7 @@ var ( A successful message will be printed to stdout indicating when the specified condition has been met. One can use -o option to change to output destination.`) - wait_example = templates.Examples(` + waitExample = templates.Examples(` # Wait for the pod "busybox1" to contain the status condition of type "Ready". kubectl wait --for=condition=Ready pod/busybox1 @@ -106,8 +106,8 @@ func NewCmdWait(restClientGetter genericclioptions.RESTClientGetter, streams gen Use: "wait resource.group/name [--for=delete|--for condition=available]", DisableFlagsInUseLine: true, Short: "Experimental: Wait for a specific condition on one or many resources.", - Long: wait_long, - Example: wait_example, + Long: waitLong, + Example: waitExample, Run: func(cmd *cobra.Command, args []string) { o, err := flags.ToOptions(args) cmdutil.CheckErr(err) @@ -191,12 +191,14 @@ func conditionFuncFor(condition string, errOut io.Writer) (ConditionFunc, error) return nil, fmt.Errorf("unrecognized condition: %q", condition) } +// ResourceLocation holds the location of a resource type ResourceLocation struct { GroupResource schema.GroupResource Namespace string Name string } +// UIDMap maps ResourceLocation with UID type UIDMap map[ResourceLocation]types.UID // WaitOptions is a set of options that allows you to wait. This is the object reflects the runtime needs of a wait diff --git a/pkg/kubectl/describe/versioned/describe.go b/pkg/kubectl/describe/versioned/describe.go index 6f6dba6f0bc..8bc1b9cc8b4 100644 --- a/pkg/kubectl/describe/versioned/describe.go +++ b/pkg/kubectl/describe/versioned/describe.go @@ -881,8 +881,8 @@ func printProjectedVolumeSource(projected *corev1.ProjectedVolumeSource, w Prefi " ConfigMapOptional:\t%v\n", source.ConfigMap.Name, source.ConfigMap.Optional) } else if source.ServiceAccountToken != nil { - w.Write(LEVEL_2, "TokenExpirationSeconds:\t%v\n", - source.ServiceAccountToken.ExpirationSeconds) + w.Write(LEVEL_2, "TokenExpirationSeconds:\t%d\n", + *source.ServiceAccountToken.ExpirationSeconds) } } } @@ -1647,12 +1647,12 @@ func describeContainerVolumes(container corev1.Container, w PrefixWriter) { sort.Sort(SortableVolumeMounts(container.VolumeMounts)) for _, mount := range container.VolumeMounts { flags := []string{} - switch { - case mount.ReadOnly: + if mount.ReadOnly { flags = append(flags, "ro") - case !mount.ReadOnly: + } else { flags = append(flags, "rw") - case len(mount.SubPath) > 0: + } + if len(mount.SubPath) > 0 { flags = append(flags, fmt.Sprintf("path=%q", mount.SubPath)) } w.Write(LEVEL_3, "%s from %s (%s)\n", mount.MountPath, mount.Name, strings.Join(flags, ",")) @@ -2073,6 +2073,16 @@ func describeCronJob(cronJob *batchv1beta1.CronJob, events *corev1.EventList) (s w.Write(LEVEL_0, "Schedule:\t%s\n", cronJob.Spec.Schedule) w.Write(LEVEL_0, "Concurrency Policy:\t%s\n", cronJob.Spec.ConcurrencyPolicy) w.Write(LEVEL_0, "Suspend:\t%s\n", printBoolPtr(cronJob.Spec.Suspend)) + if cronJob.Spec.SuccessfulJobsHistoryLimit != nil { + w.Write(LEVEL_0, "Successful Job History Limit:\t%d\n", cronJob.Spec.SuccessfulJobsHistoryLimit) + } else { + w.Write(LEVEL_0, "Successful Job History Limit:\t\n") + } + if cronJob.Spec.FailedJobsHistoryLimit != nil { + w.Write(LEVEL_0, "Failed Job History Limit:\t%d\n", *cronJob.Spec.FailedJobsHistoryLimit) + } else { + w.Write(LEVEL_0, "Failed Job History Limit:\t\n") + } if cronJob.Spec.StartingDeadlineSeconds != nil { w.Write(LEVEL_0, "Starting Deadline Seconds:\t%ds\n", *cronJob.Spec.StartingDeadlineSeconds) } else { diff --git a/pkg/kubectl/describe/versioned/describe_test.go b/pkg/kubectl/describe/versioned/describe_test.go index fff37f189eb..1f7689df14c 100644 --- a/pkg/kubectl/describe/versioned/describe_test.go +++ b/pkg/kubectl/describe/versioned/describe_test.go @@ -718,6 +718,22 @@ func TestDescribeContainers(t *testing.T) { expectedElements: []string{"Mounts", "mounted-volume", "/opt/", "(ro)"}, }, + // volumeMounts subPath + { + container: corev1.Container{ + Name: "test", + Image: "image", + VolumeMounts: []corev1.VolumeMount{ + { + Name: "mounted-volume", + MountPath: "/opt/", + SubPath: "foo", + }, + }, + }, + expectedElements: []string{"Mounts", "mounted-volume", "/opt/", "(rw,path=\"foo\")"}, + }, + // volumeDevices { container: corev1.Container{ diff --git a/pkg/kubelet/BUILD b/pkg/kubelet/BUILD index fd5e6391c51..70f3382d5fd 100644 --- a/pkg/kubelet/BUILD +++ b/pkg/kubelet/BUILD @@ -166,7 +166,6 @@ go_test( "kubelet_resources_test.go", "kubelet_test.go", "kubelet_volumes_test.go", - "main_test.go", "oom_watcher_test.go", "pod_container_deletor_test.go", "pod_workers_test.go", @@ -209,7 +208,7 @@ go_test( "//pkg/kubelet/util/sliceutils:go_default_library", "//pkg/kubelet/volumemanager:go_default_library", "//pkg/scheduler/api:go_default_library", - "//pkg/scheduler/cache:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/util/taints:go_default_library", "//pkg/version:go_default_library", diff --git a/pkg/kubelet/apis/cri/runtime/v1alpha2/api.proto b/pkg/kubelet/apis/cri/runtime/v1alpha2/api.proto index 8819607692d..28fb45816dc 100644 --- a/pkg/kubelet/apis/cri/runtime/v1alpha2/api.proto +++ b/pkg/kubelet/apis/cri/runtime/v1alpha2/api.proto @@ -1,3 +1,19 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + // To regenerate api.pb.go run hack/update-generated-runtime.sh syntax = 'proto3'; diff --git a/pkg/kubelet/apis/stats/v1alpha1/types.go b/pkg/kubelet/apis/stats/v1alpha1/types.go index ecffb8bc344..1a09fe4ffcc 100644 --- a/pkg/kubelet/apis/stats/v1alpha1/types.go +++ b/pkg/kubelet/apis/stats/v1alpha1/types.go @@ -324,7 +324,7 @@ type UserDefinedMetricDescriptor struct { Labels map[string]string `json:"labels,omitempty"` } -// UserDefinedMetric represents a metric defined and generate by users. +// UserDefinedMetric represents a metric defined and generated by users. type UserDefinedMetric struct { UserDefinedMetricDescriptor `json:",inline"` // The time at which these stats were updated. diff --git a/pkg/kubelet/cadvisor/BUILD b/pkg/kubelet/cadvisor/BUILD index d3370f13cc8..f8421a3977c 100644 --- a/pkg/kubelet/cadvisor/BUILD +++ b/pkg/kubelet/cadvisor/BUILD @@ -50,19 +50,17 @@ go_test( name = "go_default_test", srcs = [ "cadvisor_linux_test.go", - "main_test.go", "util_test.go", ], embed = [":go_default_library"], - deps = [ - "//pkg/features:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library", - ] + select({ + deps = select({ "@io_bazel_rules_go//go/platform:linux": [ "//pkg/apis/core/v1/helper:go_default_library", + "//pkg/features:go_default_library", "//pkg/kubelet/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library", "//vendor/github.com/google/cadvisor/metrics:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", diff --git a/pkg/kubelet/cadvisor/cadvisor_linux.go b/pkg/kubelet/cadvisor/cadvisor_linux.go index d15907fe869..0f02a811426 100644 --- a/pkg/kubelet/cadvisor/cadvisor_linux.go +++ b/pkg/kubelet/cadvisor/cadvisor_linux.go @@ -100,7 +100,6 @@ func containerLabels(c *cadvisorapi.ContainerInfo) map[string]string { return set } -// New creates a cAdvisor and exports its API on the specified port if port > 0. func New(imageFsInfoProvider ImageFsInfoProvider, rootPath string, usingLegacyStats bool) (Interface, error) { sysFs := sysfs.NewRealSysFs() diff --git a/pkg/kubelet/cadvisor/main_test.go b/pkg/kubelet/cadvisor/main_test.go deleted file mode 100644 index f13d029c393..00000000000 --- a/pkg/kubelet/cadvisor/main_test.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cadvisor - -import ( - "testing" - - utilfeature "k8s.io/apiserver/pkg/util/feature" - utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" - _ "k8s.io/kubernetes/pkg/features" -) - -func TestMain(m *testing.M) { - utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run) -} diff --git a/pkg/kubelet/certificate/bootstrap/bootstrap.go b/pkg/kubelet/certificate/bootstrap/bootstrap.go index efe34ef1510..46491a45cb2 100644 --- a/pkg/kubelet/certificate/bootstrap/bootstrap.go +++ b/pkg/kubelet/certificate/bootstrap/bootstrap.go @@ -47,13 +47,65 @@ import ( const tmpPrivateKeyFile = "kubelet-client.key.tmp" +// LoadClientConfig tries to load the appropriate client config for retrieving certs and for use by users. +// If bootstrapPath is empty, only kubeconfigPath is checked. If bootstrap path is set and the contents +// of kubeconfigPath are valid, both certConfig and userConfig will point to that file. Otherwise the +// kubeconfigPath on disk is populated based on bootstrapPath but pointing to the location of the client cert +// in certDir. This preserves the historical behavior of bootstrapping where on subsequent restarts the +// most recent client cert is used to request new client certs instead of the initial token. +func LoadClientConfig(kubeconfigPath, bootstrapPath, certDir string) (certConfig, userConfig *restclient.Config, err error) { + if len(bootstrapPath) == 0 { + clientConfig, err := loadRESTClientConfig(kubeconfigPath) + if err != nil { + return nil, nil, fmt.Errorf("unable to load kubeconfig: %v", err) + } + klog.V(2).Infof("No bootstrapping requested, will use kubeconfig") + return clientConfig, restclient.CopyConfig(clientConfig), nil + } + + store, err := certificate.NewFileStore("kubelet-client", certDir, certDir, "", "") + if err != nil { + return nil, nil, fmt.Errorf("unable to build bootstrap cert store") + } + + ok, err := isClientConfigStillValid(kubeconfigPath) + if err != nil { + return nil, nil, err + } + + // use the current client config + if ok { + clientConfig, err := loadRESTClientConfig(kubeconfigPath) + if err != nil { + return nil, nil, fmt.Errorf("unable to load kubeconfig: %v", err) + } + klog.V(2).Infof("Current kubeconfig file contents are still valid, no bootstrap necessary") + return clientConfig, restclient.CopyConfig(clientConfig), nil + } + + bootstrapClientConfig, err := loadRESTClientConfig(bootstrapPath) + if err != nil { + return nil, nil, fmt.Errorf("unable to load bootstrap kubeconfig: %v", err) + } + + clientConfig := restclient.AnonymousClientConfig(bootstrapClientConfig) + pemPath := store.CurrentPath() + clientConfig.KeyFile = pemPath + clientConfig.CertFile = pemPath + if err := writeKubeconfigFromBootstrapping(clientConfig, kubeconfigPath, pemPath); err != nil { + return nil, nil, err + } + klog.V(2).Infof("Use the bootstrap credentials to request a cert, and set kubeconfig to point to the certificate dir") + return bootstrapClientConfig, clientConfig, nil +} + // LoadClientCert requests a client cert for kubelet if the kubeconfigPath file does not exist. // The kubeconfig at bootstrapPath is used to request a client certificate from the API server. // On success, a kubeconfig file referencing the generated key and obtained certificate is written to kubeconfigPath. // The certificate and key file are stored in certDir. -func LoadClientCert(kubeconfigPath string, bootstrapPath string, certDir string, nodeName types.NodeName) error { +func LoadClientCert(kubeconfigPath, bootstrapPath, certDir string, nodeName types.NodeName) error { // Short-circuit if the kubeconfig file exists and is valid. - ok, err := verifyBootstrapClientConfig(kubeconfigPath) + ok, err := isClientConfigStillValid(kubeconfigPath) if err != nil { return err } @@ -117,8 +169,10 @@ func LoadClientCert(kubeconfigPath string, bootstrapPath string, certDir string, klog.V(2).Infof("failed cleaning up private key file %q: %v", privKeyPath, err) } - pemPath := store.CurrentPath() + return writeKubeconfigFromBootstrapping(bootstrapClientConfig, kubeconfigPath, store.CurrentPath()) +} +func writeKubeconfigFromBootstrapping(bootstrapClientConfig *restclient.Config, kubeconfigPath, pemPath string) error { // Get the CA data from the bootstrap client config. caFile, caData := bootstrapClientConfig.CAFile, []byte{} if len(caFile) == 0 { @@ -168,10 +222,10 @@ func loadRESTClientConfig(kubeconfig string) (*restclient.Config, error) { ).ClientConfig() } -// verifyBootstrapClientConfig checks the provided kubeconfig to see if it has a valid +// isClientConfigStillValid checks the provided kubeconfig to see if it has a valid // client certificate. It returns true if the kubeconfig is valid, or an error if bootstrapping // should stop immediately. -func verifyBootstrapClientConfig(kubeconfigPath string) (bool, error) { +func isClientConfigStillValid(kubeconfigPath string) (bool, error) { _, err := os.Stat(kubeconfigPath) if os.IsNotExist(err) { return false, nil diff --git a/pkg/kubelet/certificate/kubelet.go b/pkg/kubelet/certificate/kubelet.go index e55594f8e84..cf106c84e10 100644 --- a/pkg/kubelet/certificate/kubelet.go +++ b/pkg/kubelet/certificate/kubelet.go @@ -17,6 +17,7 @@ limitations under the License. package certificate import ( + "crypto/tls" "crypto/x509" "crypto/x509/pkix" "fmt" @@ -29,7 +30,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" clientset "k8s.io/client-go/kubernetes" - clientcertificates "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" + certificatesclient "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" "k8s.io/client-go/util/certificate" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" "k8s.io/kubernetes/pkg/kubelet/metrics" @@ -38,7 +39,7 @@ import ( // NewKubeletServerCertificateManager creates a certificate manager for the kubelet when retrieving a server certificate // or returns an error. func NewKubeletServerCertificateManager(kubeClient clientset.Interface, kubeCfg *kubeletconfig.KubeletConfiguration, nodeName types.NodeName, getAddresses func() []v1.NodeAddress, certDirectory string) (certificate.Manager, error) { - var certSigningRequestClient clientcertificates.CertificateSigningRequestInterface + var certSigningRequestClient certificatesclient.CertificateSigningRequestInterface if kubeClient != nil && kubeClient.CertificatesV1beta1() != nil { certSigningRequestClient = kubeClient.CertificatesV1beta1().CertificateSigningRequests() } @@ -78,8 +79,10 @@ func NewKubeletServerCertificateManager(kubeClient clientset.Interface, kubeCfg } m, err := certificate.NewManager(&certificate.Config{ - CertificateSigningRequestClient: certSigningRequestClient, - GetTemplate: getTemplate, + ClientFn: func(current *tls.Certificate) (certificatesclient.CertificateSigningRequestInterface, error) { + return certSigningRequestClient, nil + }, + GetTemplate: getTemplate, Usages: []certificates.KeyUsage{ // https://tools.ietf.org/html/rfc5280#section-4.2.1.3 // @@ -142,10 +145,18 @@ func addressesToHostnamesAndIPs(addresses []v1.NodeAddress) (dnsNames []string, } // NewKubeletClientCertificateManager sets up a certificate manager without a -// client that can be used to sign new certificates (or rotate). It answers with -// whatever certificate it is initialized with. If a CSR client is set later, it -// may begin rotating/renewing the client cert -func NewKubeletClientCertificateManager(certDirectory string, nodeName types.NodeName, certData []byte, keyData []byte, certFile string, keyFile string) (certificate.Manager, error) { +// client that can be used to sign new certificates (or rotate). If a CSR +// client is set later, it may begin rotating/renewing the client cert. +func NewKubeletClientCertificateManager( + certDirectory string, + nodeName types.NodeName, + bootstrapCertData []byte, + bootstrapKeyData []byte, + certFile string, + keyFile string, + clientFn certificate.CSRClientFunc, +) (certificate.Manager, error) { + certificateStore, err := certificate.NewFileStore( "kubelet-client", certDirectory, @@ -163,9 +174,10 @@ func NewKubeletClientCertificateManager(certDirectory string, nodeName types.Nod Help: "Gauge of the lifetime of a certificate. The value is the date the certificate will expire in seconds since January 1, 1970 UTC.", }, ) - prometheus.MustRegister(certificateExpiration) + prometheus.Register(certificateExpiration) m, err := certificate.NewManager(&certificate.Config{ + ClientFn: clientFn, Template: &x509.CertificateRequest{ Subject: pkix.Name{ CommonName: fmt.Sprintf("system:node:%s", nodeName), @@ -187,10 +199,16 @@ func NewKubeletClientCertificateManager(certDirectory string, nodeName types.Nod // authenticate itself to the TLS server. certificates.UsageClientAuth, }, - CertificateStore: certificateStore, - BootstrapCertificatePEM: certData, - BootstrapKeyPEM: keyData, - CertificateExpiration: certificateExpiration, + + // For backwards compatibility, the kubelet supports the ability to + // provide a higher privileged certificate as initial data that will + // then be rotated immediately. This code path is used by kubeadm on + // the masters. + BootstrapCertificatePEM: bootstrapCertData, + BootstrapKeyPEM: bootstrapKeyData, + + CertificateStore: certificateStore, + CertificateExpiration: certificateExpiration, }) if err != nil { return nil, fmt.Errorf("failed to initialize client certificate manager: %v", err) diff --git a/pkg/kubelet/certificate/transport_test.go b/pkg/kubelet/certificate/transport_test.go index ef8ea8c7291..78f26a49007 100644 --- a/pkg/kubelet/certificate/transport_test.go +++ b/pkg/kubelet/certificate/transport_test.go @@ -124,7 +124,9 @@ func (f *fakeManager) SetCertificateSigningRequestClient(certificatesclient.Cert func (f *fakeManager) ServerHealthy() bool { return f.healthy } -func (f *fakeManager) Start() {} +func (f *fakeManager) Start() {} +func (f *fakeManager) Stop() {} +func (f *fakeManager) RotateCerts() (bool, error) { return false, nil } func (f *fakeManager) Current() *tls.Certificate { if val := f.cert.Load(); val != nil { diff --git a/pkg/kubelet/cm/BUILD b/pkg/kubelet/cm/BUILD index 6ead9ea8c3d..e6e44b4435a 100644 --- a/pkg/kubelet/cm/BUILD +++ b/pkg/kubelet/cm/BUILD @@ -35,7 +35,7 @@ go_library( "//pkg/kubelet/lifecycle:go_default_library", "//pkg/kubelet/status:go_default_library", "//pkg/kubelet/util/pluginwatcher:go_default_library", - "//pkg/scheduler/cache:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/pkg/kubelet/cm/container_manager.go b/pkg/kubelet/cm/container_manager.go index a9fe926ee04..bb7b21c3983 100644 --- a/pkg/kubelet/cm/container_manager.go +++ b/pkg/kubelet/cm/container_manager.go @@ -30,7 +30,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/status" "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" "fmt" "strconv" @@ -90,7 +90,7 @@ type ContainerManager interface { // Otherwise, it updates allocatableResource in nodeInfo if necessary, // to make sure it is at least equal to the pod's requested capacity for // any registered device plugin resource - UpdatePluginResources(*schedulercache.NodeInfo, *lifecycle.PodAdmitAttributes) error + UpdatePluginResources(*schedulernodeinfo.NodeInfo, *lifecycle.PodAdmitAttributes) error InternalContainerLifecycle() InternalContainerLifecycle diff --git a/pkg/kubelet/cm/container_manager_linux.go b/pkg/kubelet/cm/container_manager_linux.go index 9432fc71adc..ea0a5ad2151 100644 --- a/pkg/kubelet/cm/container_manager_linux.go +++ b/pkg/kubelet/cm/container_manager_linux.go @@ -55,7 +55,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/qos" "k8s.io/kubernetes/pkg/kubelet/status" "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" utilfile "k8s.io/kubernetes/pkg/util/file" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/oom" @@ -628,7 +628,7 @@ func (cm *containerManagerImpl) GetResources(pod *v1.Pod, container *v1.Containe return opts, nil } -func (cm *containerManagerImpl) UpdatePluginResources(node *schedulercache.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error { +func (cm *containerManagerImpl) UpdatePluginResources(node *schedulernodeinfo.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error { return cm.deviceManager.Allocate(node, attrs) } diff --git a/pkg/kubelet/cm/container_manager_stub.go b/pkg/kubelet/cm/container_manager_stub.go index 4563dc53048..c4b0924d994 100644 --- a/pkg/kubelet/cm/container_manager_stub.go +++ b/pkg/kubelet/cm/container_manager_stub.go @@ -29,7 +29,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/status" "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) type containerManagerStub struct{} @@ -94,7 +94,7 @@ func (cm *containerManagerStub) GetResources(pod *v1.Pod, container *v1.Containe return &kubecontainer.RunContainerOptions{}, nil } -func (cm *containerManagerStub) UpdatePluginResources(*schedulercache.NodeInfo, *lifecycle.PodAdmitAttributes) error { +func (cm *containerManagerStub) UpdatePluginResources(*schedulernodeinfo.NodeInfo, *lifecycle.PodAdmitAttributes) error { return nil } diff --git a/pkg/kubelet/cm/container_manager_windows.go b/pkg/kubelet/cm/container_manager_windows.go index a8a84d5f22d..48ec1ce076b 100644 --- a/pkg/kubelet/cm/container_manager_windows.go +++ b/pkg/kubelet/cm/container_manager_windows.go @@ -39,7 +39,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/status" "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" "k8s.io/kubernetes/pkg/util/mount" ) @@ -156,7 +156,7 @@ func (cm *containerManagerImpl) GetResources(pod *v1.Pod, container *v1.Containe return &kubecontainer.RunContainerOptions{}, nil } -func (cm *containerManagerImpl) UpdatePluginResources(*schedulercache.NodeInfo, *lifecycle.PodAdmitAttributes) error { +func (cm *containerManagerImpl) UpdatePluginResources(*schedulernodeinfo.NodeInfo, *lifecycle.PodAdmitAttributes) error { return nil } diff --git a/pkg/kubelet/cm/devicemanager/BUILD b/pkg/kubelet/cm/devicemanager/BUILD index 885f783db79..f45f3fc9897 100644 --- a/pkg/kubelet/cm/devicemanager/BUILD +++ b/pkg/kubelet/cm/devicemanager/BUILD @@ -25,7 +25,7 @@ go_library( "//pkg/kubelet/lifecycle:go_default_library", "//pkg/kubelet/metrics:go_default_library", "//pkg/kubelet/util/pluginwatcher:go_default_library", - "//pkg/scheduler/cache:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", @@ -47,7 +47,7 @@ go_test( "//pkg/kubelet/checkpointmanager:go_default_library", "//pkg/kubelet/lifecycle:go_default_library", "//pkg/kubelet/util/pluginwatcher:go_default_library", - "//pkg/scheduler/cache:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/kubelet/cm/devicemanager/manager.go b/pkg/kubelet/cm/devicemanager/manager.go index 5434aa29f8e..5ae0405665c 100644 --- a/pkg/kubelet/cm/devicemanager/manager.go +++ b/pkg/kubelet/cm/devicemanager/manager.go @@ -41,7 +41,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/metrics" watcher "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) // ActivePodsFunc is a function that returns a list of pods to reconcile. @@ -246,7 +246,7 @@ func (m *ManagerImpl) GetWatcherHandler() watcher.PluginHandler { } // ValidatePlugin validates a plugin if the version is correct and the name has the format of an extended resource -func (m *ManagerImpl) ValidatePlugin(pluginName string, endpoint string, versions []string) error { +func (m *ManagerImpl) ValidatePlugin(pluginName string, endpoint string, versions []string, foundInDeprecatedDir bool) error { klog.V(2).Infof("Got Plugin %s at endpoint %s with versions %v", pluginName, endpoint, versions) if !m.isVersionCompatibleWithPlugin(versions) { @@ -263,7 +263,7 @@ func (m *ManagerImpl) ValidatePlugin(pluginName string, endpoint string, version // RegisterPlugin starts the endpoint and registers it // TODO: Start the endpoint and wait for the First ListAndWatch call // before registering the plugin -func (m *ManagerImpl) RegisterPlugin(pluginName string, endpoint string) error { +func (m *ManagerImpl) RegisterPlugin(pluginName string, endpoint string, versions []string) error { klog.V(2).Infof("Registering Plugin %s at endpoint %s", pluginName, endpoint) e, err := newEndpointImpl(endpoint, pluginName, m.callback) @@ -313,7 +313,7 @@ func (m *ManagerImpl) isVersionCompatibleWithPlugin(versions []string) bool { // Allocate is the call that you can use to allocate a set of devices // from the registered device plugins. -func (m *ManagerImpl) Allocate(node *schedulercache.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error { +func (m *ManagerImpl) Allocate(node *schedulernodeinfo.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error { pod := attrs.Pod devicesToReuse := make(map[string]sets.String) for _, container := range pod.Spec.InitContainers { @@ -769,8 +769,8 @@ func (m *ManagerImpl) callPreStartContainerIfNeeded(podUID, contName, resource s // and if necessary, updates allocatableResource in nodeInfo to at least equal to // the allocated capacity. This allows pods that have already been scheduled on // the node to pass GeneralPredicates admission checking even upon device plugin failure. -func (m *ManagerImpl) sanitizeNodeAllocatable(node *schedulercache.NodeInfo) { - var newAllocatableResource *schedulercache.Resource +func (m *ManagerImpl) sanitizeNodeAllocatable(node *schedulernodeinfo.NodeInfo) { + var newAllocatableResource *schedulernodeinfo.Resource allocatableResource := node.AllocatableResource() if allocatableResource.ScalarResources == nil { allocatableResource.ScalarResources = make(map[v1.ResourceName]int64) diff --git a/pkg/kubelet/cm/devicemanager/manager_stub.go b/pkg/kubelet/cm/devicemanager/manager_stub.go index e32b671ffb2..a4309c78a40 100644 --- a/pkg/kubelet/cm/devicemanager/manager_stub.go +++ b/pkg/kubelet/cm/devicemanager/manager_stub.go @@ -22,7 +22,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) // ManagerStub provides a simple stub implementation for the Device Manager. @@ -44,7 +44,7 @@ func (h *ManagerStub) Stop() error { } // Allocate simply returns nil. -func (h *ManagerStub) Allocate(node *schedulercache.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error { +func (h *ManagerStub) Allocate(node *schedulernodeinfo.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error { return nil } diff --git a/pkg/kubelet/cm/devicemanager/manager_test.go b/pkg/kubelet/cm/devicemanager/manager_test.go index 637779cfdbf..6cd969412cf 100644 --- a/pkg/kubelet/cm/devicemanager/manager_test.go +++ b/pkg/kubelet/cm/devicemanager/manager_test.go @@ -37,7 +37,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/checkpointmanager" "k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) const ( @@ -248,7 +248,7 @@ func setupDevicePlugin(t *testing.T, devs []*pluginapi.Device, pluginSocketName } func setupPluginWatcher(pluginSocketName string, m Manager) *pluginwatcher.Watcher { - w := pluginwatcher.NewWatcher(filepath.Dir(pluginSocketName)) + w := pluginwatcher.NewWatcher(filepath.Dir(pluginSocketName), "" /* deprecatedSockDir */) w.AddHandler(watcherapi.DevicePlugin, m.GetWatcherHandler()) w.Start() @@ -635,13 +635,13 @@ func getTestManager(tmpDir string, activePods ActivePodsFunc, testRes []TestReso return testManager, nil } -func getTestNodeInfo(allocatable v1.ResourceList) *schedulercache.NodeInfo { +func getTestNodeInfo(allocatable v1.ResourceList) *schedulernodeinfo.NodeInfo { cachedNode := &v1.Node{ Status: v1.NodeStatus{ Allocatable: allocatable, }, } - nodeInfo := &schedulercache.NodeInfo{} + nodeInfo := &schedulernodeinfo.NodeInfo{} nodeInfo.SetNode(cachedNode) return nodeInfo } @@ -875,7 +875,7 @@ func TestSanitizeNodeAllocatable(t *testing.T) { }, }, } - nodeInfo := &schedulercache.NodeInfo{} + nodeInfo := &schedulernodeinfo.NodeInfo{} nodeInfo.SetNode(cachedNode) testManager.sanitizeNodeAllocatable(nodeInfo) diff --git a/pkg/kubelet/cm/devicemanager/types.go b/pkg/kubelet/cm/devicemanager/types.go index 8396378b407..a420cf6541f 100644 --- a/pkg/kubelet/cm/devicemanager/types.go +++ b/pkg/kubelet/cm/devicemanager/types.go @@ -25,7 +25,7 @@ import ( kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/lifecycle" watcher "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) // Manager manages all the Device Plugins running on a node. @@ -41,7 +41,7 @@ type Manager interface { // variables, mount points and device files). The node object is provided // for the device manager to update the node capacity to reflect the // currently available devices. - Allocate(node *schedulercache.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error + Allocate(node *schedulernodeinfo.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error // Stop stops the manager. Stop() error diff --git a/pkg/kubelet/cm/helpers_linux_test.go b/pkg/kubelet/cm/helpers_linux_test.go index e82b800bdb1..51b2f6ce4c6 100644 --- a/pkg/kubelet/cm/helpers_linux_test.go +++ b/pkg/kubelet/cm/helpers_linux_test.go @@ -274,8 +274,7 @@ func TestResourceConfigForPodWithCustomCPUCFSQuotaPeriod(t *testing.T) { tunedQuotaPeriod := uint64(5 * time.Millisecond / time.Microsecond) tunedQuota := int64(1 * time.Millisecond / time.Microsecond) - utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.CPUCFSQuotaPeriod, true) - defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.CPUCFSQuotaPeriod, false) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.CPUCFSQuotaPeriod, true)() minShares := uint64(MinShares) burstableShares := MilliCPUToShares(100) diff --git a/pkg/kubelet/config/BUILD b/pkg/kubelet/config/BUILD index 8b408d66a2e..5e89141c02a 100644 --- a/pkg/kubelet/config/BUILD +++ b/pkg/kubelet/config/BUILD @@ -48,7 +48,7 @@ go_library( ] + select({ "@io_bazel_rules_go//go/platform:linux": [ "//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library", - "//vendor/golang.org/x/exp/inotify:go_default_library", + "//vendor/github.com/sigma/go-inotify:go_default_library", ], "//conditions:default": [], }), diff --git a/pkg/kubelet/config/file.go b/pkg/kubelet/config/file.go index 5eee61d3cf4..a08a987795e 100644 --- a/pkg/kubelet/config/file.go +++ b/pkg/kubelet/config/file.go @@ -61,7 +61,7 @@ type sourceFile struct { } func NewSourceFile(path string, nodeName types.NodeName, period time.Duration, updates chan<- interface{}) { - // "golang.org/x/exp/inotify" requires a path without trailing "/" + // "github.com/sigma/go-inotify" requires a path without trailing "/" path = strings.TrimRight(path, string(os.PathSeparator)) config := newSourceFile(path, nodeName, period, updates) diff --git a/pkg/kubelet/config/file_linux.go b/pkg/kubelet/config/file_linux.go index 98803ec4492..85ed2456900 100644 --- a/pkg/kubelet/config/file_linux.go +++ b/pkg/kubelet/config/file_linux.go @@ -26,7 +26,7 @@ import ( "strings" "time" - "golang.org/x/exp/inotify" + "github.com/sigma/go-inotify" "k8s.io/klog" "k8s.io/api/core/v1" diff --git a/pkg/kubelet/container/runtime.go b/pkg/kubelet/container/runtime.go index 2d9fcd33fd9..aacbf9e36b5 100644 --- a/pkg/kubelet/container/runtime.go +++ b/pkg/kubelet/container/runtime.go @@ -289,7 +289,7 @@ type PodStatus struct { ID types.UID // Name of the pod. Name string - // Namspace of the pod. + // Namespace of the pod. Namespace string // IP of the pod. IP string diff --git a/pkg/kubelet/eviction/BUILD b/pkg/kubelet/eviction/BUILD index 65968751a8d..cf6b99ae6cc 100644 --- a/pkg/kubelet/eviction/BUILD +++ b/pkg/kubelet/eviction/BUILD @@ -11,7 +11,6 @@ go_test( srcs = [ "eviction_manager_test.go", "helpers_test.go", - "main_test.go", "memory_threshold_notifier_test.go", "mock_threshold_notifier_test.go", ], diff --git a/pkg/kubelet/eviction/eviction_manager_test.go b/pkg/kubelet/eviction/eviction_manager_test.go index 67d161aef6c..433c0669f52 100644 --- a/pkg/kubelet/eviction/eviction_manager_test.go +++ b/pkg/kubelet/eviction/eviction_manager_test.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/clock" utilfeature "k8s.io/apiserver/pkg/util/feature" + utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" "k8s.io/client-go/tools/record" kubeapi "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/features" @@ -181,7 +182,7 @@ type podToMake struct { // TestMemoryPressure func TestMemoryPressure(t *testing.T) { - utilfeature.DefaultFeatureGate.SetFromMap(map[string]bool{string(features.PodPriority): true}) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)() podMaker := makePodWithMemoryStats summaryStatsMaker := makeMemoryStats podsToMake := []podToMake{ @@ -399,10 +400,9 @@ func parseQuantity(value string) resource.Quantity { } func TestDiskPressureNodeFs(t *testing.T) { - utilfeature.DefaultFeatureGate.SetFromMap(map[string]bool{ - string(features.LocalStorageCapacityIsolation): true, - string(features.PodPriority): true, - }) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LocalStorageCapacityIsolation, true)() + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)() + podMaker := makePodWithDiskStats summaryStatsMaker := makeDiskStats podsToMake := []podToMake{ @@ -600,7 +600,7 @@ func TestDiskPressureNodeFs(t *testing.T) { // TestMinReclaim verifies that min-reclaim works as desired. func TestMinReclaim(t *testing.T) { - utilfeature.DefaultFeatureGate.SetFromMap(map[string]bool{string(features.PodPriority): true}) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)() podMaker := makePodWithMemoryStats summaryStatsMaker := makeMemoryStats podsToMake := []podToMake{ @@ -739,10 +739,9 @@ func TestMinReclaim(t *testing.T) { } func TestNodeReclaimFuncs(t *testing.T) { - utilfeature.DefaultFeatureGate.SetFromMap(map[string]bool{ - string(features.PodPriority): true, - string(features.LocalStorageCapacityIsolation): true, - }) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LocalStorageCapacityIsolation, true)() + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)() + podMaker := makePodWithDiskStats summaryStatsMaker := makeDiskStats podsToMake := []podToMake{ @@ -918,7 +917,7 @@ func TestNodeReclaimFuncs(t *testing.T) { } func TestInodePressureNodeFsInodes(t *testing.T) { - utilfeature.DefaultFeatureGate.SetFromMap(map[string]bool{string(features.PodPriority): true}) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)() podMaker := func(name string, priority int32, requests v1.ResourceList, limits v1.ResourceList, rootInodes, logInodes, volumeInodes string) (*v1.Pod, statsapi.PodStats) { pod := newPod(name, priority, []v1.Container{ newContainer(name, requests, limits), @@ -1140,7 +1139,7 @@ func TestInodePressureNodeFsInodes(t *testing.T) { // TestCriticalPodsAreNotEvicted func TestCriticalPodsAreNotEvicted(t *testing.T) { - utilfeature.DefaultFeatureGate.SetFromMap(map[string]bool{string(features.PodPriority): true}) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)() podMaker := makePodWithMemoryStats summaryStatsMaker := makeMemoryStats podsToMake := []podToMake{ @@ -1210,7 +1209,7 @@ func TestCriticalPodsAreNotEvicted(t *testing.T) { } // Enable critical pod annotation feature gate - utilfeature.DefaultFeatureGate.SetFromMap(map[string]bool{string(features.ExperimentalCriticalPodAnnotation): true}) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ExperimentalCriticalPodAnnotation, true)() // induce soft threshold fakeClock.Step(1 * time.Minute) summaryProvider.result = summaryStatsMaker("1500Mi", podStats) @@ -1255,7 +1254,7 @@ func TestCriticalPodsAreNotEvicted(t *testing.T) { } // Disable critical pod annotation feature gate - utilfeature.DefaultFeatureGate.SetFromMap(map[string]bool{string(features.ExperimentalCriticalPodAnnotation): false}) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ExperimentalCriticalPodAnnotation, false)() // induce memory pressure! fakeClock.Step(1 * time.Minute) @@ -1275,7 +1274,7 @@ func TestCriticalPodsAreNotEvicted(t *testing.T) { // TestAllocatableMemoryPressure func TestAllocatableMemoryPressure(t *testing.T) { - utilfeature.DefaultFeatureGate.SetFromMap(map[string]bool{string(features.PodPriority): true}) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)() podMaker := makePodWithMemoryStats summaryStatsMaker := makeMemoryStats podsToMake := []podToMake{ diff --git a/pkg/kubelet/eviction/helpers.go b/pkg/kubelet/eviction/helpers.go index ef34b97a0ba..6b5d7eb7db3 100644 --- a/pkg/kubelet/eviction/helpers.go +++ b/pkg/kubelet/eviction/helpers.go @@ -390,16 +390,6 @@ func podDiskUsage(podStats statsapi.PodStats, pod *v1.Pod, statsToMeasure []fsSt }, nil } -// podMemoryUsage aggregates pod memory usage. -func podMemoryUsage(podStats statsapi.PodStats) (v1.ResourceList, error) { - memory := resource.Quantity{Format: resource.BinarySI} - for _, container := range podStats.Containers { - // memory usage (if known) - memory.Add(*memoryUsage(container.Memory)) - } - return v1.ResourceList{v1.ResourceMemory: memory}, nil -} - // localEphemeralVolumeNames returns the set of ephemeral volumes for the pod that are local func localEphemeralVolumeNames(pod *v1.Pod) []string { result := []string{} @@ -544,15 +534,8 @@ func exceedMemoryRequests(stats statsFunc) cmpFunc { return cmpBool(!p1Found, !p2Found) } - p1Usage, p1Err := podMemoryUsage(p1Stats) - p2Usage, p2Err := podMemoryUsage(p2Stats) - if p1Err != nil || p2Err != nil { - // prioritize evicting the pod which had an error getting stats - return cmpBool(p1Err != nil, p2Err != nil) - } - - p1Memory := p1Usage[v1.ResourceMemory] - p2Memory := p2Usage[v1.ResourceMemory] + p1Memory := memoryUsage(p1Stats.Memory) + p2Memory := memoryUsage(p2Stats.Memory) p1ExceedsRequests := p1Memory.Cmp(podRequest(p1, v1.ResourceMemory)) == 1 p2ExceedsRequests := p2Memory.Cmp(podRequest(p2, v1.ResourceMemory)) == 1 // prioritize evicting the pod which exceeds its requests @@ -570,24 +553,17 @@ func memory(stats statsFunc) cmpFunc { return cmpBool(!p1Found, !p2Found) } - p1Usage, p1Err := podMemoryUsage(p1Stats) - p2Usage, p2Err := podMemoryUsage(p2Stats) - if p1Err != nil || p2Err != nil { - // prioritize evicting the pod which had an error getting stats - return cmpBool(p1Err != nil, p2Err != nil) - } - // adjust p1, p2 usage relative to the request (if any) - p1Memory := p1Usage[v1.ResourceMemory] + p1Memory := memoryUsage(p1Stats.Memory) p1Request := podRequest(p1, v1.ResourceMemory) p1Memory.Sub(p1Request) - p2Memory := p2Usage[v1.ResourceMemory] + p2Memory := memoryUsage(p2Stats.Memory) p2Request := podRequest(p2, v1.ResourceMemory) p2Memory.Sub(p2Request) // prioritize evicting the pod which has the larger consumption of memory - return p2Memory.Cmp(p1Memory) + return p2Memory.Cmp(*p1Memory) } } diff --git a/pkg/kubelet/eviction/helpers_test.go b/pkg/kubelet/eviction/helpers_test.go index ab960a4a546..b627f7f40de 100644 --- a/pkg/kubelet/eviction/helpers_test.go +++ b/pkg/kubelet/eviction/helpers_test.go @@ -28,6 +28,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" utilfeature "k8s.io/apiserver/pkg/util/feature" + utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" "k8s.io/kubernetes/pkg/features" statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api" @@ -423,7 +424,8 @@ func thresholdEqual(a evictionapi.Threshold, b evictionapi.Threshold) bool { } func TestOrderedByExceedsRequestMemory(t *testing.T) { - utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority)) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)() + below := newPod("below-requests", -1, []v1.Container{ newContainer("below-requests", newResourceList("", "200Mi", ""), newResourceList("", "", "")), }, nil) @@ -450,8 +452,8 @@ func TestOrderedByExceedsRequestMemory(t *testing.T) { } func TestOrderedByExceedsRequestDisk(t *testing.T) { - utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority)) - utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.LocalStorageCapacityIsolation)) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)() + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LocalStorageCapacityIsolation, true)() below := newPod("below-requests", -1, []v1.Container{ newContainer("below-requests", v1.ResourceList{v1.ResourceEphemeralStorage: resource.MustParse("200Mi")}, newResourceList("", "", "")), }, nil) @@ -478,7 +480,7 @@ func TestOrderedByExceedsRequestDisk(t *testing.T) { } func TestOrderedByPriority(t *testing.T) { - utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority)) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)() low := newPod("low-priority", -134, []v1.Container{ newContainer("low-priority", newResourceList("", "", ""), newResourceList("", "", "")), }, nil) @@ -501,7 +503,7 @@ func TestOrderedByPriority(t *testing.T) { } func TestOrderedByPriorityDisabled(t *testing.T) { - utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=false", features.PodPriority)) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, false)() low := newPod("low-priority", lowPriority, []v1.Container{ newContainer("low-priority", newResourceList("", "", ""), newResourceList("", "", "")), }, nil) @@ -525,7 +527,7 @@ func TestOrderedByPriorityDisabled(t *testing.T) { } func TestOrderedbyDisk(t *testing.T) { - utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.LocalStorageCapacityIsolation)) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LocalStorageCapacityIsolation, true)() pod1 := newPod("best-effort-high", defaultPriority, []v1.Container{ newContainer("best-effort-high", newResourceList("", "", ""), newResourceList("", "", "")), }, []v1.Volume{ @@ -592,7 +594,7 @@ func TestOrderedbyDisk(t *testing.T) { // Tests that we correctly ignore disk requests when the local storage feature gate is disabled. func TestOrderedbyDiskDisableLocalStorage(t *testing.T) { - utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=false", features.LocalStorageCapacityIsolation)) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LocalStorageCapacityIsolation, false)() pod1 := newPod("best-effort-high", defaultPriority, []v1.Container{ newContainer("best-effort-high", newResourceList("", "", ""), newResourceList("", "", "")), }, []v1.Volume{ @@ -658,8 +660,8 @@ func TestOrderedbyDiskDisableLocalStorage(t *testing.T) { } func TestOrderedbyInodes(t *testing.T) { - utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority)) - utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.LocalStorageCapacityIsolation)) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)() + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LocalStorageCapacityIsolation, true)() low := newPod("low", defaultPriority, []v1.Container{ newContainer("low", newResourceList("", "", ""), newResourceList("", "", "")), }, []v1.Volume{ @@ -702,8 +704,8 @@ func TestOrderedbyInodes(t *testing.T) { // TestOrderedByPriorityDisk ensures we order pods by priority and then greediest resource consumer func TestOrderedByPriorityDisk(t *testing.T) { - utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority)) - utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.LocalStorageCapacityIsolation)) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)() + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LocalStorageCapacityIsolation, true)() pod1 := newPod("above-requests-low-priority-high-usage", lowPriority, []v1.Container{ newContainer("above-requests-low-priority-high-usage", newResourceList("", "", ""), newResourceList("", "", "")), }, []v1.Volume{ @@ -787,7 +789,7 @@ func TestOrderedByPriorityDisk(t *testing.T) { // TestOrderedByPriorityInodes ensures we order pods by priority and then greediest resource consumer func TestOrderedByPriorityInodes(t *testing.T) { - utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority)) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)() pod1 := newPod("low-priority-high-usage", lowPriority, []v1.Container{ newContainer("low-priority-high-usage", newResourceList("", "", ""), newResourceList("", "", "")), }, []v1.Volume{ @@ -880,7 +882,7 @@ func TestOrderedByMemory(t *testing.T) { // TestOrderedByPriorityMemory ensures we order by priority and then memory consumption relative to request. func TestOrderedByPriorityMemory(t *testing.T) { - utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority)) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)() pod1 := newPod("above-requests-low-priority-high-usage", lowPriority, []v1.Container{ newContainer("above-requests-low-priority-high-usage", newResourceList("", "", ""), newResourceList("", "", "")), }, nil) @@ -1017,23 +1019,17 @@ func (f *fakeSummaryProvider) GetCPUAndMemoryStats() (*statsapi.Summary, error) // newPodStats returns a pod stat where each container is using the specified working set // each pod must have a Name, UID, Namespace -func newPodStats(pod *v1.Pod, containerWorkingSetBytes int64) statsapi.PodStats { - result := statsapi.PodStats{ +func newPodStats(pod *v1.Pod, podWorkingSetBytes uint64) statsapi.PodStats { + return statsapi.PodStats{ PodRef: statsapi.PodReference{ Name: pod.Name, Namespace: pod.Namespace, UID: string(pod.UID), }, + Memory: &statsapi.MemoryStats{ + WorkingSetBytes: &podWorkingSetBytes, + }, } - val := uint64(containerWorkingSetBytes) - for range pod.Spec.Containers { - result.Containers = append(result.Containers, statsapi.ContainerStats{ - Memory: &statsapi.MemoryStats{ - WorkingSetBytes: &val, - }, - }) - } - return result } func TestMakeSignalObservations(t *testing.T) { @@ -1098,9 +1094,9 @@ func TestMakeSignalObservations(t *testing.T) { podMaker("pod1", "ns2", "uuid2", 1), podMaker("pod3", "ns3", "uuid3", 1), } - containerWorkingSetBytes := int64(1024 * 1024 * 1024) + podWorkingSetBytes := uint64(1024 * 1024 * 1024) for _, pod := range pods { - fakeStats.Pods = append(fakeStats.Pods, newPodStats(pod, containerWorkingSetBytes)) + fakeStats.Pods = append(fakeStats.Pods, newPodStats(pod, podWorkingSetBytes)) } res := quantityMustParse("5Gi") // Allocatable thresholds are always 100%. Verify that Threshold == Capacity. @@ -1173,11 +1169,8 @@ func TestMakeSignalObservations(t *testing.T) { if !found { t.Errorf("Pod stats were not found for pod %v", pod.UID) } - for _, container := range podStats.Containers { - actual := int64(*container.Memory.WorkingSetBytes) - if containerWorkingSetBytes != actual { - t.Errorf("Container working set expected %v, actual: %v", containerWorkingSetBytes, actual) - } + if *podStats.Memory.WorkingSetBytes != podWorkingSetBytes { + t.Errorf("Pod working set expected %v, actual: %v", podWorkingSetBytes, *podStats.Memory.WorkingSetBytes) } } } @@ -1853,20 +1846,15 @@ func newPodDiskStats(pod *v1.Pod, rootFsUsed, logsUsed, perLocalVolumeUsed resou } func newPodMemoryStats(pod *v1.Pod, workingSet resource.Quantity) statsapi.PodStats { - result := statsapi.PodStats{ + workingSetBytes := uint64(workingSet.Value()) + return statsapi.PodStats{ PodRef: statsapi.PodReference{ Name: pod.Name, Namespace: pod.Namespace, UID: string(pod.UID), }, + Memory: &statsapi.MemoryStats{ + WorkingSetBytes: &workingSetBytes, + }, } - for range pod.Spec.Containers { - workingSetBytes := uint64(workingSet.Value()) - result.Containers = append(result.Containers, statsapi.ContainerStats{ - Memory: &statsapi.MemoryStats{ - WorkingSetBytes: &workingSetBytes, - }, - }) - } - return result } func newResourceList(cpu, memory, disk string) v1.ResourceList { diff --git a/pkg/kubelet/eviction/main_test.go b/pkg/kubelet/eviction/main_test.go deleted file mode 100644 index a5c470b3ed5..00000000000 --- a/pkg/kubelet/eviction/main_test.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package eviction - -import ( - "testing" - - utilfeature "k8s.io/apiserver/pkg/util/feature" - utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" - _ "k8s.io/kubernetes/pkg/features" -) - -func TestMain(m *testing.M) { - utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run) -} diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index c44bbb0edea..27490aa2cfb 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -590,6 +590,8 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, // podManager is also responsible for keeping secretManager and configMapManager contents up-to-date. klet.podManager = kubepod.NewBasicPodManager(kubepod.NewBasicMirrorClient(klet.kubeClient), secretManager, configMapManager, checkpointManager) + klet.statusManager = status.NewManager(klet.kubeClient, klet.podManager, klet) + if remoteRuntimeEndpoint != "" { // remoteImageEndpoint is same as remoteRuntimeEndpoint if not explicitly specified if remoteImageEndpoint == "" { @@ -705,7 +707,8 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, klet.resourceAnalyzer, klet.podManager, klet.runtimeCache, - klet.containerRuntime) + klet.containerRuntime, + klet.statusManager) } else { klet.StatsProvider = stats.NewCRIStatsProvider( klet.cadvisor, @@ -754,8 +757,6 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, klet.containerLogManager = logs.NewStubContainerLogManager() } - klet.statusManager = status.NewManager(klet.kubeClient, klet.podManager, klet) - if kubeCfg.ServerTLSBootstrap && kubeDeps.TLSOptions != nil && utilfeature.DefaultFeatureGate.Enabled(features.RotateKubeletServerCertificate) { klet.serverCertificateManager, err = kubeletcertificate.NewKubeletServerCertificateManager(klet.kubeClient, kubeCfg, klet.nodeName, klet.getLastObservedNodeAddresses, certDirectory) if err != nil { @@ -789,7 +790,10 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, return nil, err } if klet.enablePluginsWatcher { - klet.pluginWatcher = pluginwatcher.NewWatcher(klet.getPluginsRegistrationDir()) + klet.pluginWatcher = pluginwatcher.NewWatcher( + klet.getPluginsRegistrationDir(), /* sockDir */ + klet.getPluginsDir(), /* deprecatedSockDir */ + ) } // If the experimentalMounterPathFlag is set, we do not want to @@ -1061,15 +1065,15 @@ type Kubelet struct { lastStatusReportTime time.Time // syncNodeStatusMux is a lock on updating the node status, because this path is not thread-safe. - // This lock is used by Kublet.syncNodeStatus function and shouldn't be used anywhere else. + // This lock is used by Kubelet.syncNodeStatus function and shouldn't be used anywhere else. syncNodeStatusMux sync.Mutex // updatePodCIDRMux is a lock on updating pod CIDR, because this path is not thread-safe. - // This lock is used by Kublet.syncNodeStatus function and shouldn't be used anywhere else. + // This lock is used by Kubelet.syncNodeStatus function and shouldn't be used anywhere else. updatePodCIDRMux sync.Mutex // updateRuntimeMux is a lock on updating runtime, because this path is not thread-safe. - // This lock is used by Kublet.updateRuntimeUp function and shouldn't be used anywhere else. + // This lock is used by Kubelet.updateRuntimeUp function and shouldn't be used anywhere else. updateRuntimeMux sync.Mutex // nodeLeaseController claims and renews the node lease for this Kubelet diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go index 59a2fd60bb4..ca9a900f54d 100644 --- a/pkg/kubelet/kubelet_pods.go +++ b/pkg/kubelet/kubelet_pods.go @@ -852,7 +852,7 @@ func (kl *Kubelet) podIsTerminated(pod *v1.Pod) bool { return status.Phase == v1.PodFailed || status.Phase == v1.PodSucceeded || (pod.DeletionTimestamp != nil && notRunning(status.ContainerStatuses)) } -// IsPodTerminated returns trus if the pod with the provided UID is in a terminated state ("Failed" or "Succeeded") +// IsPodTerminated returns true if the pod with the provided UID is in a terminated state ("Failed" or "Succeeded") // or if the pod has been deleted or removed func (kl *Kubelet) IsPodTerminated(uid types.UID) bool { pod, podFound := kl.podManager.GetPodByUID(uid) diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go index 89a7da95969..192060bda84 100644 --- a/pkg/kubelet/kubelet_test.go +++ b/pkg/kubelet/kubelet_test.go @@ -36,6 +36,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" + utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/flowcontrol" @@ -65,7 +66,7 @@ import ( kubetypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/kubelet/util/queue" kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volumemanager" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/awsebs" @@ -263,7 +264,8 @@ func newTestKubeletWithImageList( kubelet.resourceAnalyzer, kubelet.podManager, kubelet.runtimeCache, - fakeRuntime) + fakeRuntime, + kubelet.statusManager) fakeImageGCPolicy := images.ImageGCPolicy{ HighThresholdPercent: 90, LowThresholdPercent: 80, @@ -658,7 +660,7 @@ func TestHandlePluginResources(t *testing.T) { } kl.nodeInfo = testNodeInfo{nodes: nodes} - updatePluginResourcesFunc := func(node *schedulercache.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error { + updatePluginResourcesFunc := func(node *schedulernodeinfo.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error { // Maps from resourceName to the value we use to set node.allocatableResource[resourceName]. // A resource with invalid value (< 0) causes the function to return an error // to emulate resource Allocation failure. @@ -2254,17 +2256,12 @@ func runVolumeManager(kubelet *Kubelet) chan struct{} { func forEachFeatureGate(t *testing.T, fs []utilfeature.Feature, tf func(t *testing.T)) { for _, fg := range fs { - func() { - enabled := utilfeature.DefaultFeatureGate.Enabled(fg) - defer func() { - utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, enabled)) - }() - - for _, f := range []bool{true, false} { - utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, f)) + for _, f := range []bool{true, false} { + func() { + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, fg, f)() t.Run(fmt.Sprintf("%v(%t)", fg, f), tf) - } - }() + }() + } } } diff --git a/pkg/kubelet/kuberuntime/BUILD b/pkg/kubelet/kuberuntime/BUILD index 361c3487115..12c16e7e548 100644 --- a/pkg/kubelet/kuberuntime/BUILD +++ b/pkg/kubelet/kuberuntime/BUILD @@ -95,7 +95,6 @@ go_test( "kuberuntime_sandbox_test.go", "labels_test.go", "legacy_test.go", - "main_test.go", "security_context_test.go", ], embed = [":go_default_library"], diff --git a/pkg/kubelet/kuberuntime/helpers_linux_test.go b/pkg/kubelet/kuberuntime/helpers_linux_test.go index 73615c61cc2..e394d727317 100644 --- a/pkg/kubelet/kuberuntime/helpers_linux_test.go +++ b/pkg/kubelet/kuberuntime/helpers_linux_test.go @@ -89,8 +89,7 @@ func TestMilliCPUToQuota(t *testing.T) { } func TestMilliCPUToQuotaWithCustomCPUCFSQuotaPeriod(t *testing.T) { - utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CPUCFSQuotaPeriod, true) - defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CPUCFSQuotaPeriod, false) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CPUCFSQuotaPeriod, true)() for _, testCase := range []struct { msg string diff --git a/pkg/kubelet/kuberuntime/helpers_test.go b/pkg/kubelet/kuberuntime/helpers_test.go index 0db2ad4d7ed..53a927ac05b 100644 --- a/pkg/kubelet/kuberuntime/helpers_test.go +++ b/pkg/kubelet/kuberuntime/helpers_test.go @@ -351,8 +351,7 @@ func TestNamespacesForPod(t *testing.T) { assert.Equal(t, test.expected, actual) } - // Test ShareProcessNamespace feature disabled, feature gate restored by previous defer - utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodShareProcessNamespace, false) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodShareProcessNamespace, false)() for desc, test := range map[string]struct { input *v1.Pod diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container.go b/pkg/kubelet/kuberuntime/kuberuntime_container.go index 9407a03c63a..d0624ee0483 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_container.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_container.go @@ -394,7 +394,8 @@ func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, n for i, c := range containers { status, err := m.runtimeService.ContainerStatus(c.Id) if err != nil { - klog.Errorf("ContainerStatus for %s error: %v", c.Id, err) + // Merely log this here; GetPodStatus will actually report the error out. + klog.V(4).Infof("ContainerStatus for %s error: %v", c.Id, err) return nil, err } cStatus := toKubeContainerStatus(status, m.runtimeName) @@ -583,7 +584,7 @@ func (m *kubeGenericRuntimeManager) killContainer(pod *v1.Pod, containerID kubec klog.V(3).Infof("Container %q exited normally", containerID.String()) } - message := fmt.Sprintf("Killing container with id %s", containerID.String()) + message := fmt.Sprintf("Killing container %s", containerID.String()) if reason != "" { message = fmt.Sprint(message, ":", reason) } diff --git a/pkg/kubelet/kuberuntime/kuberuntime_manager.go b/pkg/kubelet/kuberuntime/kuberuntime_manager.go index 90039beda09..3e6313bf46b 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_manager.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_manager.go @@ -20,6 +20,7 @@ import ( "errors" "fmt" "os" + "sync" "time" cadvisorapi "github.com/google/cadvisor/info/v1" @@ -59,6 +60,8 @@ const ( // The expiration time of version cache. versionCacheTTL = 60 * time.Second + // How frequently to report identical errors + identicalErrorDelay = 1 * time.Minute ) var ( @@ -123,6 +126,13 @@ type kubeGenericRuntimeManager struct { // Manage RuntimeClass resources. runtimeClassManager *runtimeclass.Manager + + // Cache last per-container error message to reduce log spam + lastError map[string]string + + // Time last per-container error message was printed + errorPrinted map[string]time.Time + errorMapLock sync.Mutex } // KubeGenericRuntime is a interface contains interfaces for container runtime and command. @@ -177,6 +187,8 @@ func NewKubeGenericRuntimeManager( internalLifecycle: internalLifecycle, legacyLogProvider: legacyLogProvider, runtimeClassManager: runtimeClassManager, + lastError: make(map[string]string), + errorPrinted: make(map[string]time.Time), } typedVersion, err := kubeRuntimeManager.runtimeService.Version(kubeRuntimeAPIVersion) @@ -819,6 +831,17 @@ func (m *kubeGenericRuntimeManager) killPodWithSyncResult(pod *v1.Pod, runningPo return } +func (m *kubeGenericRuntimeManager) cleanupErrorTimeouts() { + m.errorMapLock.Lock() + defer m.errorMapLock.Unlock() + for name, timeout := range m.errorPrinted { + if time.Now().Sub(timeout) >= identicalErrorDelay { + delete(m.errorPrinted, name) + delete(m.lastError, name) + } + } +} + // GetPodStatus retrieves the status of the pod, including the // information of all containers in the pod that are visible in Runtime. func (m *kubeGenericRuntimeManager) GetPodStatus(uid kubetypes.UID, name, namespace string) (*kubecontainer.PodStatus, error) { @@ -867,10 +890,19 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(uid kubetypes.UID, name, namesp // Get statuses of all containers visible in the pod. containerStatuses, err := m.getPodContainerStatuses(uid, name, namespace) + m.errorMapLock.Lock() + defer m.errorMapLock.Unlock() if err != nil { - klog.Errorf("getPodContainerStatuses for pod %q failed: %v", podFullName, err) + lastMsg, ok := m.lastError[podFullName] + if !ok || err.Error() != lastMsg || time.Now().Sub(m.errorPrinted[podFullName]) >= identicalErrorDelay { + klog.Errorf("getPodContainerStatuses for pod %q failed: %v", podFullName, err) + m.errorPrinted[podFullName] = time.Now() + m.lastError[podFullName] = err.Error() + } return nil, err } + delete(m.errorPrinted, podFullName) + delete(m.lastError, podFullName) return &kubecontainer.PodStatus{ ID: uid, diff --git a/pkg/kubelet/kuberuntime/labels_test.go b/pkg/kubelet/kuberuntime/labels_test.go index 7f0652cbd41..ea7f5869033 100644 --- a/pkg/kubelet/kuberuntime/labels_test.go +++ b/pkg/kubelet/kuberuntime/labels_test.go @@ -24,6 +24,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" utilfeature "k8s.io/apiserver/pkg/util/feature" + utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" + "k8s.io/kubernetes/pkg/features" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" ) @@ -67,15 +69,15 @@ func TestContainerLabels(t *testing.T) { var tests = []struct { description string - featuresCreated string // Features enabled when container is created - featuresStatus string // Features enabled when container status is read + featuresCreated bool // Features enabled when container is created + featuresStatus bool // Features enabled when container status is read typeLabel kubecontainer.ContainerType expected *labeledContainerInfo }{ { "Debug containers disabled", - "DebugContainers=False", - "DebugContainers=False", + false, + false, "ignored", &labeledContainerInfo{ PodName: pod.Name, @@ -87,8 +89,8 @@ func TestContainerLabels(t *testing.T) { }, { "Regular containers", - "DebugContainers=True", - "DebugContainers=True", + true, + true, kubecontainer.ContainerTypeRegular, &labeledContainerInfo{ PodName: pod.Name, @@ -100,8 +102,8 @@ func TestContainerLabels(t *testing.T) { }, { "Init containers", - "DebugContainers=True", - "DebugContainers=True", + true, + true, kubecontainer.ContainerTypeInit, &labeledContainerInfo{ PodName: pod.Name, @@ -113,8 +115,8 @@ func TestContainerLabels(t *testing.T) { }, { "Created without type label", - "DebugContainers=False", - "DebugContainers=True", + false, + true, "ignored", &labeledContainerInfo{ PodName: pod.Name, @@ -126,8 +128,8 @@ func TestContainerLabels(t *testing.T) { }, { "Created with type label, subsequently disabled", - "DebugContainers=True", - "DebugContainers=False", + true, + false, kubecontainer.ContainerTypeRegular, &labeledContainerInfo{ PodName: pod.Name, @@ -141,15 +143,16 @@ func TestContainerLabels(t *testing.T) { // Test whether we can get right information from label for _, test := range tests { - utilfeature.DefaultFeatureGate.Set(test.featuresCreated) - labels := newContainerLabels(container, pod, test.typeLabel) - utilfeature.DefaultFeatureGate.Set(test.featuresStatus) - containerInfo := getContainerInfoFromLabels(labels) - if !reflect.DeepEqual(containerInfo, test.expected) { - t.Errorf("%v: expected %v, got %v", test.description, test.expected, containerInfo) - } + func() { + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DebugContainers, test.featuresCreated)() + labels := newContainerLabels(container, pod, test.typeLabel) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DebugContainers, test.featuresStatus)() + containerInfo := getContainerInfoFromLabels(labels) + if !reflect.DeepEqual(containerInfo, test.expected) { + t.Errorf("%v: expected %v, got %v", test.description, test.expected, containerInfo) + } + }() } - utilfeature.DefaultFeatureGate.Set("DebugContainers=False") } func TestContainerAnnotations(t *testing.T) { diff --git a/pkg/kubelet/kuberuntime/main_test.go b/pkg/kubelet/kuberuntime/main_test.go deleted file mode 100644 index 8507d04f373..00000000000 --- a/pkg/kubelet/kuberuntime/main_test.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kuberuntime - -import ( - "testing" - - utilfeature "k8s.io/apiserver/pkg/util/feature" - utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" - _ "k8s.io/kubernetes/pkg/features" -) - -func TestMain(m *testing.M) { - utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run) -} diff --git a/pkg/kubelet/lifecycle/BUILD b/pkg/kubelet/lifecycle/BUILD index ff518ddfdf1..1a1964b5b48 100644 --- a/pkg/kubelet/lifecycle/BUILD +++ b/pkg/kubelet/lifecycle/BUILD @@ -23,7 +23,7 @@ go_library( "//pkg/kubelet/util/format:go_default_library", "//pkg/scheduler/algorithm:go_default_library", "//pkg/scheduler/algorithm/predicates:go_default_library", - "//pkg/scheduler/cache:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", "//pkg/security/apparmor:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", @@ -42,7 +42,7 @@ go_test( deps = [ "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/util/format:go_default_library", - "//pkg/scheduler/cache:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", diff --git a/pkg/kubelet/lifecycle/predicate.go b/pkg/kubelet/lifecycle/predicate.go index df4a32d1add..30d36638409 100644 --- a/pkg/kubelet/lifecycle/predicate.go +++ b/pkg/kubelet/lifecycle/predicate.go @@ -26,12 +26,12 @@ import ( "k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) type getNodeAnyWayFuncType func() (*v1.Node, error) -type pluginResourceUpdateFuncType func(*schedulercache.NodeInfo, *PodAdmitAttributes) error +type pluginResourceUpdateFuncType func(*schedulernodeinfo.NodeInfo, *PodAdmitAttributes) error // AdmissionFailureHandler is an interface which defines how to deal with a failure to admit a pod. // This allows for the graceful handling of pod admission failure. @@ -67,7 +67,7 @@ func (w *predicateAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult } admitPod := attrs.Pod pods := attrs.OtherPods - nodeInfo := schedulercache.NewNodeInfo(pods...) + nodeInfo := schedulernodeinfo.NewNodeInfo(pods...) nodeInfo.SetNode(node) // ensure the node has enough plugin resources for that required in pods if err = w.pluginResourceUpdateFunc(nodeInfo, attrs); err != nil { @@ -155,7 +155,7 @@ func (w *predicateAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult } } -func removeMissingExtendedResources(pod *v1.Pod, nodeInfo *schedulercache.NodeInfo) *v1.Pod { +func removeMissingExtendedResources(pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) *v1.Pod { podCopy := pod.DeepCopy() for i, c := range pod.Spec.Containers { // We only handle requests in Requests but not Limits because the diff --git a/pkg/kubelet/lifecycle/predicate_test.go b/pkg/kubelet/lifecycle/predicate_test.go index 47b792fbbde..af4cc617902 100644 --- a/pkg/kubelet/lifecycle/predicate_test.go +++ b/pkg/kubelet/lifecycle/predicate_test.go @@ -22,7 +22,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) var ( @@ -81,7 +81,7 @@ func TestRemoveMissingExtendedResources(t *testing.T) { ), }, } { - nodeInfo := schedulercache.NewNodeInfo() + nodeInfo := schedulernodeinfo.NewNodeInfo() nodeInfo.SetNode(test.node) pod := removeMissingExtendedResources(test.pod, nodeInfo) if !reflect.DeepEqual(pod, test.expectedPod) { diff --git a/pkg/kubelet/main_test.go b/pkg/kubelet/main_test.go deleted file mode 100644 index f7ab8e1ca1f..00000000000 --- a/pkg/kubelet/main_test.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubelet - -import ( - "testing" - - utilfeature "k8s.io/apiserver/pkg/util/feature" - utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" - _ "k8s.io/kubernetes/pkg/features" -) - -func TestMain(m *testing.M) { - utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run) -} diff --git a/pkg/kubelet/network/dns/BUILD b/pkg/kubelet/network/dns/BUILD index f7d550391f3..00a63fef44f 100644 --- a/pkg/kubelet/network/dns/BUILD +++ b/pkg/kubelet/network/dns/BUILD @@ -21,10 +21,7 @@ go_library( go_test( name = "go_default_test", - srcs = [ - "dns_test.go", - "main_test.go", - ], + srcs = ["dns_test.go"], embed = [":go_default_library"], deps = [ "//pkg/features:go_default_library", diff --git a/pkg/kubelet/network/dns/dns_test.go b/pkg/kubelet/network/dns/dns_test.go index 1c47aaa1982..052998f90d2 100644 --- a/pkg/kubelet/network/dns/dns_test.go +++ b/pkg/kubelet/network/dns/dns_test.go @@ -29,7 +29,9 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" utilfeature "k8s.io/apiserver/pkg/util/feature" + utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" "k8s.io/client-go/tools/record" + "k8s.io/kubernetes/pkg/features" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" "github.com/stretchr/testify/assert" @@ -266,14 +268,6 @@ func TestMergeDNSOptions(t *testing.T) { } func TestGetPodDNSType(t *testing.T) { - customDNSEnabled := utilfeature.DefaultFeatureGate.Enabled("CustomPodDNS") - defer func() { - // Restoring the old value. - if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("CustomPodDNS=%v", customDNSEnabled)); err != nil { - t.Errorf("Failed to set CustomPodDNS feature gate: %v", err) - } - }() - recorder := record.NewFakeRecorder(20) nodeRef := &v1.ObjectReference{ Kind: "Node", @@ -361,28 +355,28 @@ func TestGetPodDNSType(t *testing.T) { } for _, tc := range testCases { - if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("CustomPodDNS=%v", tc.customPodDNSFeatureGate)); err != nil { - t.Errorf("Failed to set CustomPodDNS feature gate: %v", err) - } + t.Run(tc.desc, func(t *testing.T) { + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CustomPodDNS, tc.customPodDNSFeatureGate)() - if tc.hasClusterDNS { - configurer.clusterDNS = testClusterDNS - } else { - configurer.clusterDNS = nil - } - pod.Spec.DNSPolicy = tc.dnsPolicy - pod.Spec.HostNetwork = tc.hostNetwork - - resType, err := getPodDNSType(pod) - if tc.expectedError { - if err == nil { - t.Errorf("%s: GetPodDNSType(%v) got no error, want error", tc.desc, pod) + if tc.hasClusterDNS { + configurer.clusterDNS = testClusterDNS + } else { + configurer.clusterDNS = nil } - continue - } - if resType != tc.expectedDNSType { - t.Errorf("%s: GetPodDNSType(%v)=%v, want %v", tc.desc, pod, resType, tc.expectedDNSType) - } + pod.Spec.DNSPolicy = tc.dnsPolicy + pod.Spec.HostNetwork = tc.hostNetwork + + resType, err := getPodDNSType(pod) + if tc.expectedError { + if err == nil { + t.Errorf("%s: GetPodDNSType(%v) got no error, want error", tc.desc, pod) + } + return + } + if resType != tc.expectedDNSType { + t.Errorf("%s: GetPodDNSType(%v)=%v, want %v", tc.desc, pod, resType, tc.expectedDNSType) + } + }) } } @@ -482,14 +476,6 @@ func TestGetPodDNS(t *testing.T) { } func TestGetPodDNSCustom(t *testing.T) { - customDNSEnabled := utilfeature.DefaultFeatureGate.Enabled("CustomPodDNS") - defer func() { - // Restoring the old value. - if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("CustomPodDNS=%v", customDNSEnabled)); err != nil { - t.Errorf("Failed to set CustomPodDNS feature gate: %v", err) - } - }() - recorder := record.NewFakeRecorder(20) nodeRef := &v1.ObjectReference{ Kind: "Node", @@ -628,21 +614,21 @@ func TestGetPodDNSCustom(t *testing.T) { } for _, tc := range testCases { - if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("CustomPodDNS=%v", tc.customPodDNSFeatureGate)); err != nil { - t.Errorf("Failed to set CustomPodDNS feature gate: %v", err) - } + t.Run(tc.desc, func(t *testing.T) { + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CustomPodDNS, tc.customPodDNSFeatureGate)() - testPod.Spec.HostNetwork = tc.hostnetwork - testPod.Spec.DNSConfig = tc.dnsConfig - testPod.Spec.DNSPolicy = tc.dnsPolicy + testPod.Spec.HostNetwork = tc.hostnetwork + testPod.Spec.DNSConfig = tc.dnsConfig + testPod.Spec.DNSPolicy = tc.dnsPolicy - resDNSConfig, err := configurer.GetPodDNS(testPod) - if err != nil { - t.Errorf("%s: GetPodDNS(%v), unexpected error: %v", tc.desc, testPod, err) - } - if !dnsConfigsAreEqual(resDNSConfig, tc.expectedDNSConfig) { - t.Errorf("%s: GetPodDNS(%v)=%v, want %v", tc.desc, testPod, resDNSConfig, tc.expectedDNSConfig) - } + resDNSConfig, err := configurer.GetPodDNS(testPod) + if err != nil { + t.Errorf("%s: GetPodDNS(%v), unexpected error: %v", tc.desc, testPod, err) + } + if !dnsConfigsAreEqual(resDNSConfig, tc.expectedDNSConfig) { + t.Errorf("%s: GetPodDNS(%v)=%v, want %v", tc.desc, testPod, resDNSConfig, tc.expectedDNSConfig) + } + }) } } diff --git a/pkg/kubelet/network/dns/main_test.go b/pkg/kubelet/network/dns/main_test.go deleted file mode 100644 index 1ed5fa15436..00000000000 --- a/pkg/kubelet/network/dns/main_test.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package dns - -import ( - "testing" - - utilfeature "k8s.io/apiserver/pkg/util/feature" - utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" - _ "k8s.io/kubernetes/pkg/features" -) - -func TestMain(m *testing.M) { - utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run) -} diff --git a/pkg/kubelet/nodestatus/setters.go b/pkg/kubelet/nodestatus/setters.go index d16bcec2949..199a587e9bb 100644 --- a/pkg/kubelet/nodestatus/setters.go +++ b/pkg/kubelet/nodestatus/setters.go @@ -270,7 +270,9 @@ func MachineInfo(nodeName string, // capacity for every node status request initialCapacity := capacityFunc() if initialCapacity != nil { - node.Status.Capacity[v1.ResourceEphemeralStorage] = initialCapacity[v1.ResourceEphemeralStorage] + if v, exists := initialCapacity[v1.ResourceEphemeralStorage]; exists { + node.Status.Capacity[v1.ResourceEphemeralStorage] = v + } } } diff --git a/pkg/kubelet/pleg/generic.go b/pkg/kubelet/pleg/generic.go index 4de9c721130..f69e0d49552 100644 --- a/pkg/kubelet/pleg/generic.go +++ b/pkg/kubelet/pleg/generic.go @@ -244,7 +244,8 @@ func (g *GenericPLEG) relist() { // serially may take a while. We should be aware of this and // parallelize if needed. if err := g.updateCache(pod, pid); err != nil { - klog.Errorf("PLEG: Ignoring events for pod %s/%s: %v", pod.Name, pod.Namespace, err) + // Rely on updateCache calling GetPodStatus to log the actual error. + klog.V(4).Infof("PLEG: Ignoring events for pod %s/%s: %v", pod.Name, pod.Namespace, err) // make sure we try to reinspect the pod during the next relisting needsReinspection[pid] = pod @@ -274,7 +275,8 @@ func (g *GenericPLEG) relist() { klog.V(5).Infof("GenericPLEG: Reinspecting pods that previously failed inspection") for pid, pod := range g.podsToReinspect { if err := g.updateCache(pod, pid); err != nil { - klog.Errorf("PLEG: pod %s/%s failed reinspection: %v", pod.Name, pod.Namespace, err) + // Rely on updateCache calling GetPodStatus to log the actual error. + klog.V(5).Infof("PLEG: pod %s/%s failed reinspection: %v", pod.Name, pod.Namespace, err) needsReinspection[pid] = pod } } diff --git a/pkg/kubelet/preemption/BUILD b/pkg/kubelet/preemption/BUILD index d1a06818df3..8fab53ffa81 100644 --- a/pkg/kubelet/preemption/BUILD +++ b/pkg/kubelet/preemption/BUILD @@ -41,10 +41,7 @@ filegroup( go_test( name = "go_default_test", - srcs = [ - "main_test.go", - "preemption_test.go", - ], + srcs = ["preemption_test.go"], embed = [":go_default_library"], deps = [ "//pkg/apis/core:go_default_library", diff --git a/pkg/kubelet/preemption/main_test.go b/pkg/kubelet/preemption/main_test.go deleted file mode 100644 index 2878db15c9b..00000000000 --- a/pkg/kubelet/preemption/main_test.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package preemption - -import ( - "testing" - - utilfeature "k8s.io/apiserver/pkg/util/feature" - utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" - _ "k8s.io/kubernetes/pkg/features" -) - -func TestMain(m *testing.M) { - utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run) -} diff --git a/pkg/kubelet/prober/prober.go b/pkg/kubelet/prober/prober.go index efec60c98d2..cc69ec02f7c 100644 --- a/pkg/kubelet/prober/prober.go +++ b/pkg/kubelet/prober/prober.go @@ -274,6 +274,26 @@ func (eic execInContainer) SetStderr(out io.Writer) { //unimplemented } +func (eic execInContainer) SetEnv(env []string) { + //unimplemented +} + func (eic execInContainer) Stop() { //unimplemented } + +func (eic execInContainer) Start() error { + return fmt.Errorf("unimplemented") +} + +func (eic execInContainer) Wait() error { + return fmt.Errorf("unimplemented") +} + +func (eic execInContainer) StdoutPipe() (io.ReadCloser, error) { + return nil, fmt.Errorf("unimplemented") +} + +func (eic execInContainer) StderrPipe() (io.ReadCloser, error) { + return nil, fmt.Errorf("unimplemented") +} diff --git a/pkg/kubelet/remote/remote_runtime.go b/pkg/kubelet/remote/remote_runtime.go index 16e16daff84..88b9bce9530 100644 --- a/pkg/kubelet/remote/remote_runtime.go +++ b/pkg/kubelet/remote/remote_runtime.go @@ -21,6 +21,7 @@ import ( "errors" "fmt" "strings" + "sync" "time" "google.golang.org/grpc" @@ -36,8 +37,18 @@ import ( type RemoteRuntimeService struct { timeout time.Duration runtimeClient runtimeapi.RuntimeServiceClient + // Cache last per-container error message to reduce log spam + lastError map[string]string + // Time last per-container error message was printed + errorPrinted map[string]time.Time + errorMapLock sync.Mutex } +const ( + // How frequently to report identical errors + identicalErrorDelay = 1 * time.Minute +) + // NewRemoteRuntimeService creates a new internalapi.RuntimeService. func NewRemoteRuntimeService(endpoint string, connectionTimeout time.Duration) (internalapi.RuntimeService, error) { klog.V(3).Infof("Connecting to runtime service %s", endpoint) @@ -57,6 +68,8 @@ func NewRemoteRuntimeService(endpoint string, connectionTimeout time.Duration) ( return &RemoteRuntimeService{ timeout: connectionTimeout, runtimeClient: runtimeapi.NewRuntimeServiceClient(conn), + lastError: make(map[string]string), + errorPrinted: make(map[string]time.Time), }, nil } @@ -225,6 +238,10 @@ func (r *RemoteRuntimeService) StopContainer(containerID string, timeout int64) ctx, cancel := getContextWithTimeout(t) defer cancel() + r.errorMapLock.Lock() + delete(r.lastError, containerID) + delete(r.errorPrinted, containerID) + r.errorMapLock.Unlock() _, err := r.runtimeClient.StopContainer(ctx, &runtimeapi.StopContainerRequest{ ContainerId: containerID, Timeout: timeout, @@ -243,6 +260,10 @@ func (r *RemoteRuntimeService) RemoveContainer(containerID string) error { ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() + r.errorMapLock.Lock() + delete(r.lastError, containerID) + delete(r.errorPrinted, containerID) + r.errorMapLock.Unlock() _, err := r.runtimeClient.RemoveContainer(ctx, &runtimeapi.RemoveContainerRequest{ ContainerId: containerID, }) @@ -270,6 +291,18 @@ func (r *RemoteRuntimeService) ListContainers(filter *runtimeapi.ContainerFilter return resp.Containers, nil } +// Clean up any expired last-error timers +func (r *RemoteRuntimeService) cleanupErrorTimeouts() { + r.errorMapLock.Lock() + defer r.errorMapLock.Unlock() + for ID, timeout := range r.errorPrinted { + if time.Now().Sub(timeout) >= identicalErrorDelay { + delete(r.lastError, ID) + delete(r.errorPrinted, ID) + } + } +} + // ContainerStatus returns the container status. func (r *RemoteRuntimeService) ContainerStatus(containerID string) (*runtimeapi.ContainerStatus, error) { ctx, cancel := getContextWithTimeout(r.timeout) @@ -278,10 +311,21 @@ func (r *RemoteRuntimeService) ContainerStatus(containerID string) (*runtimeapi. resp, err := r.runtimeClient.ContainerStatus(ctx, &runtimeapi.ContainerStatusRequest{ ContainerId: containerID, }) + r.cleanupErrorTimeouts() + r.errorMapLock.Lock() + defer r.errorMapLock.Unlock() if err != nil { - klog.Errorf("ContainerStatus %q from runtime service failed: %v", containerID, err) + // Don't spam the log with endless messages about the same failure. + lastMsg, ok := r.lastError[containerID] + if !ok || err.Error() != lastMsg || time.Now().Sub(r.errorPrinted[containerID]) >= identicalErrorDelay { + klog.Errorf("ContainerStatus %q from runtime service failed: %v", containerID, err) + r.errorPrinted[containerID] = time.Now() + r.lastError[containerID] = err.Error() + } return nil, err } + delete(r.lastError, containerID) + delete(r.errorPrinted, containerID) if resp.Status != nil { if err := verifyContainerStatus(resp.Status); err != nil { @@ -456,10 +500,20 @@ func (r *RemoteRuntimeService) ContainerStats(containerID string) (*runtimeapi.C resp, err := r.runtimeClient.ContainerStats(ctx, &runtimeapi.ContainerStatsRequest{ ContainerId: containerID, }) + r.cleanupErrorTimeouts() + r.errorMapLock.Lock() + defer r.errorMapLock.Unlock() if err != nil { - klog.Errorf("ContainerStatus %q from runtime service failed: %v", containerID, err) + lastMsg, ok := r.lastError[containerID] + if !ok || err.Error() != lastMsg || time.Now().Sub(r.errorPrinted[containerID]) >= identicalErrorDelay { + klog.Errorf("ContainerStatus %q from runtime service failed: %v", containerID, err) + r.errorPrinted[containerID] = time.Now() + r.lastError[containerID] = err.Error() + } return nil, err } + delete(r.lastError, containerID) + delete(r.errorPrinted, containerID) return resp.GetStats(), nil } diff --git a/pkg/kubelet/server/stats/BUILD b/pkg/kubelet/server/stats/BUILD index 6fec6f2eb99..dde9b3cbc7d 100644 --- a/pkg/kubelet/server/stats/BUILD +++ b/pkg/kubelet/server/stats/BUILD @@ -18,6 +18,7 @@ go_library( "//pkg/kubelet/apis/stats/v1alpha1:go_default_library", "//pkg/kubelet/cm:go_default_library", "//pkg/kubelet/container:go_default_library", + "//pkg/kubelet/util:go_default_library", "//pkg/kubelet/util/format:go_default_library", "//pkg/volume:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", diff --git a/pkg/kubelet/server/stats/summary.go b/pkg/kubelet/server/stats/summary.go index fb646c5d2f3..93aef69ed87 100644 --- a/pkg/kubelet/server/stats/summary.go +++ b/pkg/kubelet/server/stats/summary.go @@ -19,7 +19,11 @@ package stats import ( "fmt" + "k8s.io/klog" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" + "k8s.io/kubernetes/pkg/kubelet/util" ) type SummaryProvider interface { @@ -32,6 +36,11 @@ type SummaryProvider interface { // summaryProviderImpl implements the SummaryProvider interface. type summaryProviderImpl struct { + // kubeletCreationTime is the time at which the summaryProvider was created. + kubeletCreationTime metav1.Time + // systemBootTime is the time at which the system was started + systemBootTime metav1.Time + provider StatsProvider } @@ -40,7 +49,18 @@ var _ SummaryProvider = &summaryProviderImpl{} // NewSummaryProvider returns a SummaryProvider using the stats provided by the // specified statsProvider. func NewSummaryProvider(statsProvider StatsProvider) SummaryProvider { - return &summaryProviderImpl{statsProvider} + kubeletCreationTime := metav1.Now() + bootTime, err := util.GetBootTime() + if err != nil { + // bootTime will be zero if we encounter an error getting the boot time. + klog.Warningf("Error getting system boot time. Node metrics will have an incorrect start time: %v", err) + } + + return &summaryProviderImpl{ + kubeletCreationTime: kubeletCreationTime, + systemBootTime: metav1.NewTime(bootTime), + provider: statsProvider, + } } func (sp *summaryProviderImpl) Get(updateStats bool) (*statsapi.Summary, error) { @@ -77,7 +97,7 @@ func (sp *summaryProviderImpl) Get(updateStats bool) (*statsapi.Summary, error) CPU: rootStats.CPU, Memory: rootStats.Memory, Network: networkStats, - StartTime: rootStats.StartTime, + StartTime: sp.systemBootTime, Fs: rootFsStats, Runtime: &statsapi.RuntimeStats{ImageFs: imageFsStats}, Rlimit: rlimit, diff --git a/pkg/kubelet/server/stats/summary_sys_containers.go b/pkg/kubelet/server/stats/summary_sys_containers.go index baaff0ab1bd..4526f2a7695 100644 --- a/pkg/kubelet/server/stats/summary_sys_containers.go +++ b/pkg/kubelet/server/stats/summary_sys_containers.go @@ -21,6 +21,7 @@ package stats import ( "k8s.io/klog" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" "k8s.io/kubernetes/pkg/kubelet/cm" ) @@ -29,11 +30,12 @@ func (sp *summaryProviderImpl) GetSystemContainersStats(nodeConfig cm.NodeConfig systemContainers := map[string]struct { name string forceStatsUpdate bool + startTime metav1.Time }{ - statsapi.SystemContainerKubelet: {nodeConfig.KubeletCgroupsName, false}, - statsapi.SystemContainerRuntime: {nodeConfig.RuntimeCgroupsName, false}, - statsapi.SystemContainerMisc: {nodeConfig.SystemCgroupsName, false}, - statsapi.SystemContainerPods: {sp.provider.GetPodCgroupRoot(), updateStats}, + statsapi.SystemContainerKubelet: {name: nodeConfig.KubeletCgroupsName, forceStatsUpdate: false, startTime: sp.kubeletCreationTime}, + statsapi.SystemContainerRuntime: {name: nodeConfig.RuntimeCgroupsName, forceStatsUpdate: false}, + statsapi.SystemContainerMisc: {name: nodeConfig.SystemCgroupsName, forceStatsUpdate: false}, + statsapi.SystemContainerPods: {name: sp.provider.GetPodCgroupRoot(), forceStatsUpdate: updateStats}, } for sys, cont := range systemContainers { // skip if cgroup name is undefined (not all system containers are required) @@ -48,6 +50,11 @@ func (sp *summaryProviderImpl) GetSystemContainersStats(nodeConfig cm.NodeConfig // System containers don't have a filesystem associated with them. s.Logs, s.Rootfs = nil, nil s.Name = sys + + // if we know the start time of a system container, use that instead of the start time provided by cAdvisor + if !cont.startTime.IsZero() { + s.StartTime = cont.startTime + } stats = append(stats, *s) } @@ -58,11 +65,12 @@ func (sp *summaryProviderImpl) GetSystemContainersCPUAndMemoryStats(nodeConfig c systemContainers := map[string]struct { name string forceStatsUpdate bool + startTime metav1.Time }{ - statsapi.SystemContainerKubelet: {nodeConfig.KubeletCgroupsName, false}, - statsapi.SystemContainerRuntime: {nodeConfig.RuntimeCgroupsName, false}, - statsapi.SystemContainerMisc: {nodeConfig.SystemCgroupsName, false}, - statsapi.SystemContainerPods: {sp.provider.GetPodCgroupRoot(), updateStats}, + statsapi.SystemContainerKubelet: {name: nodeConfig.KubeletCgroupsName, forceStatsUpdate: false, startTime: sp.kubeletCreationTime}, + statsapi.SystemContainerRuntime: {name: nodeConfig.RuntimeCgroupsName, forceStatsUpdate: false}, + statsapi.SystemContainerMisc: {name: nodeConfig.SystemCgroupsName, forceStatsUpdate: false}, + statsapi.SystemContainerPods: {name: sp.provider.GetPodCgroupRoot(), forceStatsUpdate: updateStats}, } for sys, cont := range systemContainers { // skip if cgroup name is undefined (not all system containers are required) @@ -75,6 +83,11 @@ func (sp *summaryProviderImpl) GetSystemContainersCPUAndMemoryStats(nodeConfig c continue } s.Name = sys + + // if we know the start time of a system container, use that instead of the start time provided by cAdvisor + if !cont.startTime.IsZero() { + s.StartTime = cont.startTime + } stats = append(stats, *s) } diff --git a/pkg/kubelet/server/stats/summary_test.go b/pkg/kubelet/server/stats/summary_test.go index d210298f59f..081036855a2 100644 --- a/pkg/kubelet/server/stats/summary_test.go +++ b/pkg/kubelet/server/stats/summary_test.go @@ -83,12 +83,14 @@ func TestSummaryProviderGetStats(t *testing.T) { On("GetCgroupStats", "/kubelet", false).Return(cgroupStatsMap["/kubelet"].cs, cgroupStatsMap["/kubelet"].ns, nil). On("GetCgroupStats", "/kubepods", true).Return(cgroupStatsMap["/pods"].cs, cgroupStatsMap["/pods"].ns, nil) - provider := NewSummaryProvider(mockStatsProvider) + kubeletCreationTime := metav1.Now() + systemBootTime := metav1.Now() + provider := summaryProviderImpl{kubeletCreationTime: kubeletCreationTime, systemBootTime: systemBootTime, provider: mockStatsProvider} summary, err := provider.Get(true) assert.NoError(err) assert.Equal(summary.Node.NodeName, "test-node") - assert.Equal(summary.Node.StartTime, cgroupStatsMap["/"].cs.StartTime) + assert.Equal(summary.Node.StartTime, systemBootTime) assert.Equal(summary.Node.CPU, cgroupStatsMap["/"].cs.CPU) assert.Equal(summary.Node.Memory, cgroupStatsMap["/"].cs.Memory) assert.Equal(summary.Node.Network, cgroupStatsMap["/"].ns) @@ -98,7 +100,7 @@ func TestSummaryProviderGetStats(t *testing.T) { assert.Equal(len(summary.Node.SystemContainers), 4) assert.Contains(summary.Node.SystemContainers, statsapi.ContainerStats{ Name: "kubelet", - StartTime: cgroupStatsMap["/kubelet"].cs.StartTime, + StartTime: kubeletCreationTime, CPU: cgroupStatsMap["/kubelet"].cs.CPU, Memory: cgroupStatsMap["/kubelet"].cs.Memory, Accelerators: cgroupStatsMap["/kubelet"].cs.Accelerators, diff --git a/pkg/kubelet/stats/BUILD b/pkg/kubelet/stats/BUILD index 07ca1b54540..ca3efe32f00 100644 --- a/pkg/kubelet/stats/BUILD +++ b/pkg/kubelet/stats/BUILD @@ -24,6 +24,7 @@ go_library( "//pkg/kubelet/leaky:go_default_library", "//pkg/kubelet/pod:go_default_library", "//pkg/kubelet/server/stats:go_default_library", + "//pkg/kubelet/status:go_default_library", "//pkg/kubelet/types:go_default_library", "//pkg/volume:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -71,8 +72,10 @@ go_test( "//pkg/kubelet/leaky:go_default_library", "//pkg/kubelet/pod/testing:go_default_library", "//pkg/kubelet/server/stats:go_default_library", + "//pkg/kubelet/status/testing:go_default_library", "//pkg/kubelet/types:go_default_library", "//pkg/volume:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/pkg/kubelet/stats/cadvisor_stats_provider.go b/pkg/kubelet/stats/cadvisor_stats_provider.go index 9549c12a1a6..a8abfb26646 100644 --- a/pkg/kubelet/stats/cadvisor_stats_provider.go +++ b/pkg/kubelet/stats/cadvisor_stats_provider.go @@ -33,6 +33,7 @@ import ( kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/leaky" "k8s.io/kubernetes/pkg/kubelet/server/stats" + "k8s.io/kubernetes/pkg/kubelet/status" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" ) @@ -47,6 +48,8 @@ type cadvisorStatsProvider struct { resourceAnalyzer stats.ResourceAnalyzer // imageService is used to get the stats of the image filesystem. imageService kubecontainer.ImageService + // statusProvider is used to get pod metadata + statusProvider status.PodStatusProvider } // newCadvisorStatsProvider returns a containerStatsProvider that provides @@ -55,11 +58,13 @@ func newCadvisorStatsProvider( cadvisor cadvisor.Interface, resourceAnalyzer stats.ResourceAnalyzer, imageService kubecontainer.ImageService, + statusProvider status.PodStatusProvider, ) containerStatsProvider { return &cadvisorStatsProvider{ cadvisor: cadvisor, resourceAnalyzer: resourceAnalyzer, imageService: imageService, + statusProvider: statusProvider, } } @@ -114,7 +119,6 @@ func (p *cadvisorStatsProvider) ListPodStats() ([]statsapi.PodStats, error) { // Special case for infrastructure container which is hidden from // the user and has network stats. podStats.Network = cadvisorInfoToNetworkStats("pod:"+ref.Namespace+"_"+ref.Name, &cinfo) - podStats.StartTime = metav1.NewTime(cinfo.Spec.CreationTime) } else { podStats.Containers = append(podStats.Containers, *cadvisorInfoToContainerStats(containerName, &cinfo, &rootFsInfo, &imageFsInfo)) } @@ -139,7 +143,13 @@ func (p *cadvisorStatsProvider) ListPodStats() ([]statsapi.PodStats, error) { podStats.CPU = cpu podStats.Memory = memory } - result = append(result, *podStats) + + status, found := p.statusProvider.GetPodStatus(podUID) + if found && status.StartTime != nil && !status.StartTime.IsZero() { + podStats.StartTime = *status.StartTime + // only append stats if we were able to get the start time of the pod + result = append(result, *podStats) + } } return result, nil diff --git a/pkg/kubelet/stats/cadvisor_stats_provider_test.go b/pkg/kubelet/stats/cadvisor_stats_provider_test.go index 68a41acb1b9..9c4b6ddd78a 100644 --- a/pkg/kubelet/stats/cadvisor_stats_provider_test.go +++ b/pkg/kubelet/stats/cadvisor_stats_provider_test.go @@ -22,12 +22,16 @@ import ( cadvisorapiv2 "github.com/google/cadvisor/info/v2" "github.com/stretchr/testify/assert" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" "k8s.io/kubernetes/pkg/kubelet/leaky" serverstats "k8s.io/kubernetes/pkg/kubelet/server/stats" + statustest "k8s.io/kubernetes/pkg/kubelet/status/testing" ) func TestRemoveTerminatedContainerInfo(t *testing.T) { @@ -196,10 +200,17 @@ func TestCadvisorListPodStats(t *testing.T) { EphemeralVolumes: ephemeralVolumes, PersistentVolumes: persistentVolumes, } + p0Time := metav1.Now() + p1Time := metav1.Now() + p2Time := metav1.Now() + mockStatus := new(statustest.MockStatusProvider) + mockStatus.On("GetPodStatus", types.UID("UID"+pName0)).Return(v1.PodStatus{StartTime: &p0Time}, true) + mockStatus.On("GetPodStatus", types.UID("UID"+pName1)).Return(v1.PodStatus{StartTime: &p1Time}, true) + mockStatus.On("GetPodStatus", types.UID("UID"+pName2)).Return(v1.PodStatus{StartTime: &p2Time}, true) resourceAnalyzer := &fakeResourceAnalyzer{podVolumeStats: volumeStats} - p := NewCadvisorStatsProvider(mockCadvisor, resourceAnalyzer, nil, nil, mockRuntime) + p := NewCadvisorStatsProvider(mockCadvisor, resourceAnalyzer, nil, nil, mockRuntime, mockStatus) pods, err := p.ListPodStats() assert.NoError(t, err) @@ -227,7 +238,7 @@ func TestCadvisorListPodStats(t *testing.T) { checkCPUStats(t, "Pod0Container1", seedPod0Container1, con.CPU) checkMemoryStats(t, "Pod0Container1", seedPod0Container1, infos["/pod0-c1"], con.Memory) - assert.EqualValues(t, testTime(creationTime, seedPod0Infra).Unix(), ps.StartTime.Time.Unix()) + assert.EqualValues(t, p0Time.Unix(), ps.StartTime.Time.Unix()) checkNetworkStats(t, "Pod0", seedPod0Infra, ps.Network) checkEphemeralStats(t, "Pod0", []int{seedPod0Container0, seedPod0Container1}, []int{seedEphemeralVolume1, seedEphemeralVolume2}, ps.EphemeralStorage) if ps.CPU != nil { @@ -349,7 +360,7 @@ func TestCadvisorListPodCPUAndMemoryStats(t *testing.T) { resourceAnalyzer := &fakeResourceAnalyzer{podVolumeStats: volumeStats} - p := NewCadvisorStatsProvider(mockCadvisor, resourceAnalyzer, nil, nil, nil) + p := NewCadvisorStatsProvider(mockCadvisor, resourceAnalyzer, nil, nil, nil, nil) pods, err := p.ListPodCPUAndMemoryStats() assert.NoError(t, err) @@ -435,7 +446,7 @@ func TestCadvisorImagesFsStats(t *testing.T) { mockCadvisor.On("ImagesFsInfo").Return(imageFsInfo, nil) mockRuntime.On("ImageStats").Return(imageStats, nil) - provider := newCadvisorStatsProvider(mockCadvisor, &fakeResourceAnalyzer{}, mockRuntime) + provider := newCadvisorStatsProvider(mockCadvisor, &fakeResourceAnalyzer{}, mockRuntime, nil) stats, err := provider.ImageFsStats() assert.NoError(err) diff --git a/pkg/kubelet/stats/stats_provider.go b/pkg/kubelet/stats/stats_provider.go index 903f8678a64..b06c7a8ad8a 100644 --- a/pkg/kubelet/stats/stats_provider.go +++ b/pkg/kubelet/stats/stats_provider.go @@ -28,6 +28,7 @@ import ( kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubepod "k8s.io/kubernetes/pkg/kubelet/pod" "k8s.io/kubernetes/pkg/kubelet/server/stats" + "k8s.io/kubernetes/pkg/kubelet/status" ) // NewCRIStatsProvider returns a StatsProvider that provides the node stats @@ -52,8 +53,9 @@ func NewCadvisorStatsProvider( podManager kubepod.Manager, runtimeCache kubecontainer.RuntimeCache, imageService kubecontainer.ImageService, + statusProvider status.PodStatusProvider, ) *StatsProvider { - return newStatsProvider(cadvisor, podManager, runtimeCache, newCadvisorStatsProvider(cadvisor, resourceAnalyzer, imageService)) + return newStatsProvider(cadvisor, podManager, runtimeCache, newCadvisorStatsProvider(cadvisor, resourceAnalyzer, imageService, statusProvider)) } // newStatsProvider returns a new StatsProvider that provides node stats from diff --git a/pkg/kubelet/status/testing/BUILD b/pkg/kubelet/status/testing/BUILD index 4bc79aca4ec..c335925ad29 100644 --- a/pkg/kubelet/status/testing/BUILD +++ b/pkg/kubelet/status/testing/BUILD @@ -7,9 +7,16 @@ load( go_library( name = "go_default_library", - srcs = ["fake_pod_deletion_safety.go"], + srcs = [ + "fake_pod_deletion_safety.go", + "mock_pod_status_provider.go", + ], importpath = "k8s.io/kubernetes/pkg/kubelet/status/testing", - deps = ["//staging/src/k8s.io/api/core/v1:go_default_library"], + deps = [ + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/github.com/stretchr/testify/mock:go_default_library", + ], ) filegroup( diff --git a/pkg/apis/core/v1/main_test.go b/pkg/kubelet/status/testing/mock_pod_status_provider.go similarity index 65% rename from pkg/apis/core/v1/main_test.go rename to pkg/kubelet/status/testing/mock_pod_status_provider.go index e46b01929f7..cef85222ada 100644 --- a/pkg/apis/core/v1/main_test.go +++ b/pkg/kubelet/status/testing/mock_pod_status_provider.go @@ -14,16 +14,20 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1 +package testing import ( - "testing" + "github.com/stretchr/testify/mock" - utilfeature "k8s.io/apiserver/pkg/util/feature" - utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" - _ "k8s.io/kubernetes/pkg/features" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" ) -func TestMain(m *testing.M) { - utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run) +type MockStatusProvider struct { + mock.Mock +} + +func (m *MockStatusProvider) GetPodStatus(uid types.UID) (v1.PodStatus, bool) { + args := m.Called(uid) + return args.Get(0).(v1.PodStatus), args.Bool(1) } diff --git a/pkg/kubelet/types/BUILD b/pkg/kubelet/types/BUILD index b0fceedbbbd..cd0d4d8f448 100644 --- a/pkg/kubelet/types/BUILD +++ b/pkg/kubelet/types/BUILD @@ -32,7 +32,6 @@ go_test( name = "go_default_test", srcs = [ "labels_test.go", - "main_test.go", "pod_status_test.go", "pod_update_test.go", "types_test.go", diff --git a/pkg/kubelet/types/main_test.go b/pkg/kubelet/types/main_test.go deleted file mode 100644 index 928b1122930..00000000000 --- a/pkg/kubelet/types/main_test.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package types - -import ( - "testing" - - utilfeature "k8s.io/apiserver/pkg/util/feature" - utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" - _ "k8s.io/kubernetes/pkg/features" -) - -func TestMain(m *testing.M) { - utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run) -} diff --git a/pkg/kubelet/util/BUILD b/pkg/kubelet/util/BUILD index 4165864e0a3..7b590024861 100644 --- a/pkg/kubelet/util/BUILD +++ b/pkg/kubelet/util/BUILD @@ -34,6 +34,8 @@ go_test( go_library( name = "go_default_library", srcs = [ + "boottime_util_darwin.go", + "boottime_util_linux.go", "doc.go", "util.go", "util_unix.go", diff --git a/pkg/kubelet/util/boottime_util_darwin.go b/pkg/kubelet/util/boottime_util_darwin.go new file mode 100644 index 00000000000..09d3b8865da --- /dev/null +++ b/pkg/kubelet/util/boottime_util_darwin.go @@ -0,0 +1,44 @@ +// +build darwin + +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + "syscall" + "time" + "unsafe" + + "golang.org/x/sys/unix" +) + +// GetBootTime returns the time at which the machine was started, truncated to the nearest second +func GetBootTime() (time.Time, error) { + output, err := unix.SysctlRaw("kern.boottime") + if err != nil { + return time.Time{}, err + } + var timeval syscall.Timeval + if len(output) != int(unsafe.Sizeof(timeval)) { + return time.Time{}, fmt.Errorf("unexpected output when calling syscall kern.bootime. Expected len(output) to be %v, but got %v", + int(unsafe.Sizeof(timeval)), len(output)) + } + timeval = *(*syscall.Timeval)(unsafe.Pointer(&output[0])) + sec, nsec := timeval.Unix() + return time.Unix(sec, nsec).Truncate(time.Second), nil +} diff --git a/pkg/apis/storage/util/main_test.go b/pkg/kubelet/util/boottime_util_linux.go similarity index 56% rename from pkg/apis/storage/util/main_test.go rename to pkg/kubelet/util/boottime_util_linux.go index 6af02d0a11d..f00e7c06bfa 100644 --- a/pkg/apis/storage/util/main_test.go +++ b/pkg/kubelet/util/boottime_util_linux.go @@ -1,3 +1,5 @@ +// +build freebsd linux + /* Copyright 2018 The Kubernetes Authors. @@ -17,13 +19,18 @@ limitations under the License. package util import ( - "testing" + "fmt" + "time" - utilfeature "k8s.io/apiserver/pkg/util/feature" - utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" - _ "k8s.io/kubernetes/pkg/features" + "golang.org/x/sys/unix" ) -func TestMain(m *testing.M) { - utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run) +// GetBootTime returns the time at which the machine was started, truncated to the nearest second +func GetBootTime() (time.Time, error) { + currentTime := time.Now() + var info unix.Sysinfo_t + if err := unix.Sysinfo(&info); err != nil { + return time.Time{}, fmt.Errorf("error getting system uptime: %s", err) + } + return currentTime.Add(-time.Duration(info.Uptime) * time.Second).Truncate(time.Second), nil } diff --git a/pkg/kubelet/util/pluginwatcher/example_handler.go b/pkg/kubelet/util/pluginwatcher/example_handler.go index fc14dfe9559..d4fa2f5a029 100644 --- a/pkg/kubelet/util/pluginwatcher/example_handler.go +++ b/pkg/kubelet/util/pluginwatcher/example_handler.go @@ -38,6 +38,8 @@ type exampleHandler struct { m sync.Mutex count int + + permitDeprecatedDir bool } type examplePluginEvent int @@ -50,16 +52,21 @@ const ( ) // NewExampleHandler provide a example handler -func NewExampleHandler(supportedVersions []string) *exampleHandler { +func NewExampleHandler(supportedVersions []string, permitDeprecatedDir bool) *exampleHandler { return &exampleHandler{ SupportedVersions: supportedVersions, ExpectedNames: make(map[string]int), - eventChans: make(map[string]chan examplePluginEvent), + eventChans: make(map[string]chan examplePluginEvent), + permitDeprecatedDir: permitDeprecatedDir, } } -func (p *exampleHandler) ValidatePlugin(pluginName string, endpoint string, versions []string) error { +func (p *exampleHandler) ValidatePlugin(pluginName string, endpoint string, versions []string, foundInDeprecatedDir bool) error { + if foundInDeprecatedDir && !p.permitDeprecatedDir { + return fmt.Errorf("device plugin socket was found in a directory that is no longer supported and this test does not permit plugins from deprecated dir") + } + p.SendEvent(pluginName, exampleEventValidate) n, ok := p.DecreasePluginCount(pluginName) @@ -79,7 +86,7 @@ func (p *exampleHandler) ValidatePlugin(pluginName string, endpoint string, vers return nil } -func (p *exampleHandler) RegisterPlugin(pluginName, endpoint string) error { +func (p *exampleHandler) RegisterPlugin(pluginName, endpoint string, versions []string) error { p.SendEvent(pluginName, exampleEventRegister) // Verifies the grpcServer is ready to serve services. diff --git a/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta1/api.proto b/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta1/api.proto index 14aa7df2c4d..7f6cf218354 100644 --- a/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta1/api.proto +++ b/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta1/api.proto @@ -1,3 +1,19 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + syntax = 'proto3'; package v1beta1; diff --git a/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta2/api.proto b/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta2/api.proto index e34697f3a66..62760647cd6 100644 --- a/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta2/api.proto +++ b/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta2/api.proto @@ -1,3 +1,19 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + syntax = 'proto3'; package v1beta2; diff --git a/pkg/kubelet/util/pluginwatcher/plugin_watcher.go b/pkg/kubelet/util/pluginwatcher/plugin_watcher.go index 06076c6ffa1..a9f8422edf0 100644 --- a/pkg/kubelet/util/pluginwatcher/plugin_watcher.go +++ b/pkg/kubelet/util/pluginwatcher/plugin_watcher.go @@ -20,6 +20,7 @@ import ( "fmt" "net" "os" + "path/filepath" "strings" "sync" "time" @@ -36,11 +37,12 @@ import ( // Watcher is the plugin watcher type Watcher struct { - path string - stopCh chan interface{} - fs utilfs.Filesystem - fsWatcher *fsnotify.Watcher - wg sync.WaitGroup + path string + deprecatedPath string + stopCh chan interface{} + fs utilfs.Filesystem + fsWatcher *fsnotify.Watcher + wg sync.WaitGroup mutex sync.Mutex handlers map[string]PluginHandler @@ -54,10 +56,13 @@ type pathInfo struct { } // NewWatcher provides a new watcher -func NewWatcher(sockDir string) *Watcher { +// deprecatedSockDir refers to a pre-GA directory that was used by older plugins +// for socket registration. New plugins should not use this directory. +func NewWatcher(sockDir string, deprecatedSockDir string) *Watcher { return &Watcher{ - path: sockDir, - fs: &utilfs.DefaultFs{}, + path: sockDir, + deprecatedPath: deprecatedSockDir, + fs: &utilfs.DefaultFs{}, handlers: make(map[string]PluginHandler), plugins: make(map[string]pathInfo), @@ -106,7 +111,7 @@ func (w *Watcher) Start() error { //TODO: Handle errors by taking corrective measures w.wg.Add(1) - go func() { + func() { defer w.wg.Done() if event.Op&fsnotify.Create == fsnotify.Create { @@ -137,7 +142,15 @@ func (w *Watcher) Start() error { // Traverse plugin dir after starting the plugin processing goroutine if err := w.traversePluginDir(w.path); err != nil { w.Stop() - return fmt.Errorf("failed to traverse plugin socket path, err: %v", err) + return fmt.Errorf("failed to traverse plugin socket path %q, err: %v", w.path, err) + } + + // Traverse deprecated plugin dir, if specified. + if len(w.deprecatedPath) != 0 { + if err := w.traversePluginDir(w.deprecatedPath); err != nil { + w.Stop() + return fmt.Errorf("failed to traverse deprecated plugin socket path %q, err: %v", w.deprecatedPath, err) + } } return nil @@ -190,6 +203,10 @@ func (w *Watcher) traversePluginDir(dir string) error { switch mode := info.Mode(); { case mode.IsDir(): + if w.containsBlacklistedDir(path) { + return filepath.SkipDir + } + if err := w.fsWatcher.Add(path); err != nil { return fmt.Errorf("failed to watch %s, err: %v", path, err) } @@ -216,6 +233,10 @@ func (w *Watcher) traversePluginDir(dir string) error { func (w *Watcher) handleCreateEvent(event fsnotify.Event) error { klog.V(6).Infof("Handling create event: %v", event) + if w.containsBlacklistedDir(event.Name) { + return nil + } + fi, err := os.Stat(event.Name) if err != nil { return fmt.Errorf("stat file %s failed: %v", event.Name, err) @@ -271,8 +292,10 @@ func (w *Watcher) handlePluginRegistration(socketPath string) error { infoResp.Endpoint = socketPath } + foundInDeprecatedDir := w.foundInDeprecatedDir(socketPath) + // calls handler callback to verify registration request - if err := handler.ValidatePlugin(infoResp.Name, infoResp.Endpoint, infoResp.SupportedVersions); err != nil { + if err := handler.ValidatePlugin(infoResp.Name, infoResp.Endpoint, infoResp.SupportedVersions, foundInDeprecatedDir); err != nil { return w.notifyPlugin(client, false, fmt.Sprintf("plugin validation failed with err: %v", err)) } @@ -280,7 +303,7 @@ func (w *Watcher) handlePluginRegistration(socketPath string) error { // so that if we receive a delete event during Register Plugin, we can process it as a DeRegister call. w.registerPlugin(socketPath, infoResp.Type, infoResp.Name) - if err := handler.RegisterPlugin(infoResp.Name, infoResp.Endpoint); err != nil { + if err := handler.RegisterPlugin(infoResp.Name, infoResp.Endpoint, infoResp.SupportedVersions); err != nil { return w.notifyPlugin(client, false, fmt.Sprintf("plugin registration failed with err: %v", err)) } @@ -417,3 +440,27 @@ func dial(unixSocketPath string, timeout time.Duration) (registerapi.Registratio return registerapi.NewRegistrationClient(c), c, nil } + +// While deprecated dir is supported, to add extra protection around #69015 +// we will explicitly blacklist kubernetes.io directory. +func (w *Watcher) containsBlacklistedDir(path string) bool { + return strings.HasPrefix(path, w.deprecatedPath+"/kubernetes.io/") || + path == w.deprecatedPath+"/kubernetes.io" +} + +func (w *Watcher) foundInDeprecatedDir(socketPath string) bool { + if len(w.deprecatedPath) != 0 { + if socketPath == w.deprecatedPath { + return true + } + + deprecatedPath := w.deprecatedPath + if !strings.HasSuffix(deprecatedPath, "/") { + deprecatedPath = deprecatedPath + "/" + } + if strings.HasPrefix(socketPath, deprecatedPath) { + return true + } + } + return false +} diff --git a/pkg/kubelet/util/pluginwatcher/plugin_watcher_test.go b/pkg/kubelet/util/pluginwatcher/plugin_watcher_test.go index 05dac25ac80..a7f449c141b 100644 --- a/pkg/kubelet/util/pluginwatcher/plugin_watcher_test.go +++ b/pkg/kubelet/util/pluginwatcher/plugin_watcher_test.go @@ -32,7 +32,8 @@ import ( ) var ( - socketDir string + socketDir string + deprecatedSocketDir string supportedVersions = []string{"v1beta1", "v1beta2"} ) @@ -50,19 +51,27 @@ func init() { panic(fmt.Sprintf("Could not create a temp directory: %s", d)) } + d2, err := ioutil.TempDir("", "deprecated_plugin_test") + if err != nil { + panic(fmt.Sprintf("Could not create a temp directory: %s", d)) + } + socketDir = d + deprecatedSocketDir = d2 } func cleanup(t *testing.T) { require.NoError(t, os.RemoveAll(socketDir)) + require.NoError(t, os.RemoveAll(deprecatedSocketDir)) os.MkdirAll(socketDir, 0755) + os.MkdirAll(deprecatedSocketDir, 0755) } func TestPluginRegistration(t *testing.T) { defer cleanup(t) - hdlr := NewExampleHandler(supportedVersions) - w := newWatcherWithHandler(t, hdlr) + hdlr := NewExampleHandler(supportedVersions, false /* permitDeprecatedDir */) + w := newWatcherWithHandler(t, hdlr, false /* testDeprecatedDir */) defer func() { require.NoError(t, w.Stop()) }() for i := 0; i < 10; i++ { @@ -84,13 +93,40 @@ func TestPluginRegistration(t *testing.T) { } } +func TestPluginRegistrationDeprecated(t *testing.T) { + defer cleanup(t) + + hdlr := NewExampleHandler(supportedVersions, true /* permitDeprecatedDir */) + w := newWatcherWithHandler(t, hdlr, true /* testDeprecatedDir */) + defer func() { require.NoError(t, w.Stop()) }() + + // Test plugins in deprecated dir + for i := 0; i < 10; i++ { + endpoint := fmt.Sprintf("%s/dep-plugin-%d.sock", deprecatedSocketDir, i) + pluginName := fmt.Sprintf("dep-example-plugin-%d", i) + + hdlr.AddPluginName(pluginName) + + p := NewTestExamplePlugin(pluginName, registerapi.DevicePlugin, endpoint, supportedVersions...) + require.NoError(t, p.Serve("v1beta1", "v1beta2")) + + require.True(t, waitForEvent(t, exampleEventValidate, hdlr.EventChan(p.pluginName))) + require.True(t, waitForEvent(t, exampleEventRegister, hdlr.EventChan(p.pluginName))) + + require.True(t, waitForPluginRegistrationStatus(t, p.registrationStatus)) + + require.NoError(t, p.Stop()) + require.True(t, waitForEvent(t, exampleEventDeRegister, hdlr.EventChan(p.pluginName))) + } +} + func TestPluginReRegistration(t *testing.T) { defer cleanup(t) pluginName := fmt.Sprintf("example-plugin") - hdlr := NewExampleHandler(supportedVersions) + hdlr := NewExampleHandler(supportedVersions, false /* permitDeprecatedDir */) - w := newWatcherWithHandler(t, hdlr) + w := newWatcherWithHandler(t, hdlr, false /* testDeprecatedDir */) defer func() { require.NoError(t, w.Stop()) }() plugins := make([]*examplePlugin, 10) @@ -122,7 +158,7 @@ func TestPluginReRegistration(t *testing.T) { func TestPluginRegistrationAtKubeletStart(t *testing.T) { defer cleanup(t) - hdlr := NewExampleHandler(supportedVersions) + hdlr := NewExampleHandler(supportedVersions, false /* permitDeprecatedDir */) plugins := make([]*examplePlugin, 10) for i := 0; i < len(plugins); i++ { @@ -137,7 +173,7 @@ func TestPluginRegistrationAtKubeletStart(t *testing.T) { plugins[i] = p } - w := newWatcherWithHandler(t, hdlr) + w := newWatcherWithHandler(t, hdlr, false /* testDeprecatedDir */) defer func() { require.NoError(t, w.Stop()) }() var wg sync.WaitGroup @@ -173,10 +209,10 @@ func TestPluginRegistrationFailureWithUnsupportedVersion(t *testing.T) { pluginName := fmt.Sprintf("example-plugin") socketPath := socketDir + "/plugin.sock" - hdlr := NewExampleHandler(supportedVersions) + hdlr := NewExampleHandler(supportedVersions, false /* permitDeprecatedDir */) hdlr.AddPluginName(pluginName) - w := newWatcherWithHandler(t, hdlr) + w := newWatcherWithHandler(t, hdlr, false /* testDeprecatedDir */) defer func() { require.NoError(t, w.Stop()) }() // Advertise v1beta3 but don't serve anything else than the plugin service @@ -199,10 +235,10 @@ func TestPlugiRegistrationFailureWithUnsupportedVersionAtKubeletStart(t *testing require.NoError(t, p.Serve()) defer func() { require.NoError(t, p.Stop()) }() - hdlr := NewExampleHandler(supportedVersions) + hdlr := NewExampleHandler(supportedVersions, false /* permitDeprecatedDir */) hdlr.AddPluginName(pluginName) - w := newWatcherWithHandler(t, hdlr) + w := newWatcherWithHandler(t, hdlr, false /* testDeprecatedDir */) defer func() { require.NoError(t, w.Stop()) }() require.True(t, waitForEvent(t, exampleEventValidate, hdlr.EventChan(p.pluginName))) @@ -230,11 +266,215 @@ func waitForEvent(t *testing.T, expected examplePluginEvent, eventChan chan exam return false } -func newWatcherWithHandler(t *testing.T, hdlr PluginHandler) *Watcher { - w := NewWatcher(socketDir) +func newWatcherWithHandler(t *testing.T, hdlr PluginHandler, testDeprecatedDir bool) *Watcher { + depSocketDir := "" + if testDeprecatedDir { + depSocketDir = deprecatedSocketDir + } + w := NewWatcher(socketDir, depSocketDir) w.AddHandler(registerapi.DevicePlugin, hdlr) require.NoError(t, w.Start()) return w } + +func TestFoundInDeprecatedDir(t *testing.T) { + testCases := []struct { + sockDir string + deprecatedSockDir string + socketPath string + expectFoundInDeprecatedDir bool + }{ + { + sockDir: "/var/lib/kubelet/plugins_registry", + deprecatedSockDir: "/var/lib/kubelet/plugins", + socketPath: "/var/lib/kubelet/plugins_registry/mydriver.foo/csi.sock", + expectFoundInDeprecatedDir: false, + }, + { + sockDir: "/var/lib/kubelet/plugins_registry", + deprecatedSockDir: "/var/lib/kubelet/plugins", + socketPath: "/var/lib/kubelet/plugins/mydriver.foo/csi.sock", + expectFoundInDeprecatedDir: true, + }, + { + sockDir: "/var/lib/kubelet/plugins_registry", + deprecatedSockDir: "/var/lib/kubelet/plugins", + socketPath: "/var/lib/kubelet/plugins_registry", + expectFoundInDeprecatedDir: false, + }, + { + sockDir: "/var/lib/kubelet/plugins_registry", + deprecatedSockDir: "/var/lib/kubelet/plugins", + socketPath: "/var/lib/kubelet/plugins", + expectFoundInDeprecatedDir: true, + }, + { + sockDir: "/var/lib/kubelet/plugins_registry", + deprecatedSockDir: "/var/lib/kubelet/plugins", + socketPath: "/var/lib/kubelet/plugins/kubernetes.io", + expectFoundInDeprecatedDir: true, + }, + { + sockDir: "/var/lib/kubelet/plugins_registry", + deprecatedSockDir: "/var/lib/kubelet/plugins", + socketPath: "/var/lib/kubelet/plugins/my.driver.com", + expectFoundInDeprecatedDir: true, + }, + { + sockDir: "/var/lib/kubelet/plugins_registry", + deprecatedSockDir: "/var/lib/kubelet/plugins", + socketPath: "/var/lib/kubelet/plugins_registry", + expectFoundInDeprecatedDir: false, + }, + { + sockDir: "/var/lib/kubelet/plugins_registry", + deprecatedSockDir: "/var/lib/kubelet/plugins", + socketPath: "/var/lib/kubelet/plugins_registry/kubernetes.io", + expectFoundInDeprecatedDir: false, + }, + { + sockDir: "/var/lib/kubelet/plugins_registry", + deprecatedSockDir: "/var/lib/kubelet/plugins", + socketPath: "/var/lib/kubelet/plugins_registry/my.driver.com", + expectFoundInDeprecatedDir: false, + }, + } + + for _, tc := range testCases { + // Arrange & Act + watcher := NewWatcher(tc.sockDir, tc.deprecatedSockDir) + + actualFoundInDeprecatedDir := watcher.foundInDeprecatedDir(tc.socketPath) + + // Assert + if tc.expectFoundInDeprecatedDir != actualFoundInDeprecatedDir { + t.Fatalf("expecting actualFoundInDeprecatedDir=%v, but got %v for testcase: %#v", tc.expectFoundInDeprecatedDir, actualFoundInDeprecatedDir, tc) + } + } +} + +func TestContainsBlacklistedDir(t *testing.T) { + testCases := []struct { + sockDir string + deprecatedSockDir string + path string + expected bool + }{ + { + sockDir: "/var/lib/kubelet/plugins_registry", + deprecatedSockDir: "/var/lib/kubelet/plugins", + path: "/var/lib/kubelet/plugins_registry/mydriver.foo/csi.sock", + expected: false, + }, + { + sockDir: "/var/lib/kubelet/plugins_registry", + deprecatedSockDir: "/var/lib/kubelet/plugins", + path: "/var/lib/kubelet/plugins/mydriver.foo/csi.sock", + expected: false, + }, + { + sockDir: "/var/lib/kubelet/plugins_registry", + deprecatedSockDir: "/var/lib/kubelet/plugins", + path: "/var/lib/kubelet/plugins_registry", + expected: false, + }, + { + sockDir: "/var/lib/kubelet/plugins_registry", + deprecatedSockDir: "/var/lib/kubelet/plugins", + path: "/var/lib/kubelet/plugins", + expected: false, + }, + { + sockDir: "/var/lib/kubelet/plugins_registry", + deprecatedSockDir: "/var/lib/kubelet/plugins", + path: "/var/lib/kubelet/plugins/kubernetes.io", + expected: true, + }, + { + sockDir: "/var/lib/kubelet/plugins_registry", + deprecatedSockDir: "/var/lib/kubelet/plugins", + path: "/var/lib/kubelet/plugins/kubernetes.io/csi.sock", + expected: true, + }, + { + sockDir: "/var/lib/kubelet/plugins_registry", + deprecatedSockDir: "/var/lib/kubelet/plugins", + path: "/var/lib/kubelet/plugins/kubernetes.io/my.plugin/csi.sock", + expected: true, + }, + { + sockDir: "/var/lib/kubelet/plugins_registry", + deprecatedSockDir: "/var/lib/kubelet/plugins", + path: "/var/lib/kubelet/plugins/kubernetes.io/", + expected: true, + }, + { + sockDir: "/var/lib/kubelet/plugins_registry", + deprecatedSockDir: "/var/lib/kubelet/plugins", + path: "/var/lib/kubelet/plugins/my.driver.com", + expected: false, + }, + { + sockDir: "/var/lib/kubelet/plugins_registry", + deprecatedSockDir: "/var/lib/kubelet/plugins", + path: "/var/lib/kubelet/plugins_registry", + expected: false, + }, + { + sockDir: "/var/lib/kubelet/plugins_registry", + deprecatedSockDir: "/var/lib/kubelet/plugins", + path: "/var/lib/kubelet/plugins_registry/kubernetes.io", + expected: false, // New (non-deprecated dir) has no blacklist + }, + { + sockDir: "/var/lib/kubelet/plugins_registry", + deprecatedSockDir: "/var/lib/kubelet/plugins", + path: "/var/lib/kubelet/plugins_registry/my.driver.com", + expected: false, + }, + { + sockDir: "/var/lib/kubelet/plugins_registry", + deprecatedSockDir: "/var/lib/kubelet/plugins", + path: "/var/lib/kubelet/plugins/my-kubernetes.io-plugin", + expected: false, + }, + { + sockDir: "/var/lib/kubelet/plugins_registry", + deprecatedSockDir: "/var/lib/kubelet/plugins", + path: "/var/lib/kubelet/plugins/my-kubernetes.io-plugin/csi.sock", + expected: false, + }, + { + sockDir: "/var/lib/kubelet/plugins_registry", + deprecatedSockDir: "/var/lib/kubelet/plugins", + path: "/var/lib/kubelet/plugins/kubernetes.io-plugin", + expected: false, + }, + { + sockDir: "/var/lib/kubelet/plugins_registry", + deprecatedSockDir: "/var/lib/kubelet/plugins", + path: "/var/lib/kubelet/plugins/kubernetes.io-plugin/csi.sock", + expected: false, + }, + { + sockDir: "/var/lib/kubelet/plugins_registry", + deprecatedSockDir: "/var/lib/kubelet/plugins", + path: "/var/lib/kubelet/plugins/kubernetes.io-plugin/", + expected: false, + }, + } + + for _, tc := range testCases { + // Arrange & Act + watcher := NewWatcher(tc.sockDir, tc.deprecatedSockDir) + + actual := watcher.containsBlacklistedDir(tc.path) + + // Assert + if tc.expected != actual { + t.Fatalf("expecting %v but got %v for testcase: %#v", tc.expected, actual, tc) + } + } +} diff --git a/pkg/kubelet/util/pluginwatcher/types.go b/pkg/kubelet/util/pluginwatcher/types.go index f37ed241db3..83b96b1bc86 100644 --- a/pkg/kubelet/util/pluginwatcher/types.go +++ b/pkg/kubelet/util/pluginwatcher/types.go @@ -48,11 +48,11 @@ package pluginwatcher type PluginHandler interface { // Validate returns an error if the information provided by // the potential plugin is erroneous (unsupported version, ...) - ValidatePlugin(pluginName string, endpoint string, versions []string) error + ValidatePlugin(pluginName string, endpoint string, versions []string, foundInDeprecatedDir bool) error // RegisterPlugin is called so that the plugin can be register by any // plugin consumer // Error encountered here can still be Notified to the plugin. - RegisterPlugin(pluginName, endpoint string) error + RegisterPlugin(pluginName, endpoint string, versions []string) error // DeRegister is called once the pluginwatcher observes that the socket has // been deleted. DeRegisterPlugin(pluginName string) diff --git a/pkg/kubelet/util/util_unsupported.go b/pkg/kubelet/util/util_unsupported.go index 6661678aced..68a2bdf01b7 100644 --- a/pkg/kubelet/util/util_unsupported.go +++ b/pkg/kubelet/util/util_unsupported.go @@ -45,3 +45,8 @@ func UnlockPath(fileHandles []uintptr) { func LocalEndpoint(path, file string) string { return "" } + +// GetBootTime empty implementation +func GetBootTime() (time.Time, error) { + return time.Time{}, fmt.Errorf("GetBootTime is unsupported in this build") +} diff --git a/pkg/kubelet/util/util_windows.go b/pkg/kubelet/util/util_windows.go index 7123728ff94..92accc55e14 100644 --- a/pkg/kubelet/util/util_windows.go +++ b/pkg/kubelet/util/util_windows.go @@ -23,6 +23,7 @@ import ( "net" "net/url" "strings" + "syscall" "time" "github.com/Microsoft/go-winio" @@ -112,3 +113,15 @@ func LocalEndpoint(path, file string) string { } return u.String() + "//./pipe/" + file } + +var tickCount = syscall.NewLazyDLL("kernel32.dll").NewProc("GetTickCount64") + +// GetBootTime returns the time at which the machine was started, truncated to the nearest second +func GetBootTime() (time.Time, error) { + currentTime := time.Now() + output, _, err := tickCount.Call() + if errno, ok := err.(syscall.Errno); !ok || errno != 0 { + return time.Time{}, err + } + return currentTime.Add(-time.Duration(output) * time.Millisecond).Truncate(time.Second), nil +} diff --git a/pkg/kubelet/volumemanager/cache/actual_state_of_world.go b/pkg/kubelet/volumemanager/cache/actual_state_of_world.go index a7b06af94ea..43a9b0f8cdf 100644 --- a/pkg/kubelet/volumemanager/cache/actual_state_of_world.go +++ b/pkg/kubelet/volumemanager/cache/actual_state_of_world.go @@ -154,6 +154,11 @@ type ActualStateOfWorld interface { // mounted for the specified pod as requiring file system resize (if the plugin for the // volume indicates it requires file system resize). MarkFSResizeRequired(volumeName v1.UniqueVolumeName, podName volumetypes.UniquePodName) + + // GetAttachedVolumes returns a list of volumes that is known to be attached + // to the node. This list can be used to determine volumes that are either in-use + // or have a mount/unmount operation pending. + GetAttachedVolumes() []AttachedVolume } // MountedVolume represents a volume that has successfully been mounted to a pod. @@ -710,6 +715,20 @@ func (asw *actualStateOfWorld) GetGloballyMountedVolumes() []AttachedVolume { return globallyMountedVolumes } +func (asw *actualStateOfWorld) GetAttachedVolumes() []AttachedVolume { + asw.RLock() + defer asw.RUnlock() + allAttachedVolumes := make( + []AttachedVolume, 0 /* len */, len(asw.attachedVolumes) /* cap */) + for _, volumeObj := range asw.attachedVolumes { + allAttachedVolumes = append( + allAttachedVolumes, + asw.newAttachedVolume(&volumeObj)) + } + + return allAttachedVolumes +} + func (asw *actualStateOfWorld) GetUnmountedVolumes() []AttachedVolume { asw.RLock() defer asw.RUnlock() diff --git a/pkg/kubelet/volumemanager/populator/BUILD b/pkg/kubelet/volumemanager/populator/BUILD index a41a69e0cf5..f038b0d32e6 100644 --- a/pkg/kubelet/volumemanager/populator/BUILD +++ b/pkg/kubelet/volumemanager/populator/BUILD @@ -47,10 +47,7 @@ filegroup( go_test( name = "go_default_test", - srcs = [ - "desired_state_of_world_populator_test.go", - "main_test.go", - ], + srcs = ["desired_state_of_world_populator_test.go"], embed = [":go_default_library"], deps = [ "//pkg/features:go_default_library", diff --git a/pkg/kubelet/volumemanager/populator/main_test.go b/pkg/kubelet/volumemanager/populator/main_test.go deleted file mode 100644 index de49504ed70..00000000000 --- a/pkg/kubelet/volumemanager/populator/main_test.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package populator - -import ( - "testing" - - utilfeature "k8s.io/apiserver/pkg/util/feature" - utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" - _ "k8s.io/kubernetes/pkg/features" -) - -func TestMain(m *testing.M) { - utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run) -} diff --git a/pkg/kubelet/volumemanager/reconciler/BUILD b/pkg/kubelet/volumemanager/reconciler/BUILD index 53508804484..ca88d62d7b9 100644 --- a/pkg/kubelet/volumemanager/reconciler/BUILD +++ b/pkg/kubelet/volumemanager/reconciler/BUILD @@ -35,10 +35,7 @@ go_library( go_test( name = "go_default_test", - srcs = [ - "main_test.go", - "reconciler_test.go", - ], + srcs = ["reconciler_test.go"], embed = [":go_default_library"], deps = [ "//pkg/features:go_default_library", diff --git a/pkg/kubelet/volumemanager/reconciler/main_test.go b/pkg/kubelet/volumemanager/reconciler/main_test.go deleted file mode 100644 index e89d5ca3ea0..00000000000 --- a/pkg/kubelet/volumemanager/reconciler/main_test.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package reconciler - -import ( - "testing" - - utilfeature "k8s.io/apiserver/pkg/util/feature" - utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" - _ "k8s.io/kubernetes/pkg/features" -) - -func TestMain(m *testing.M) { - utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run) -} diff --git a/pkg/kubelet/volumemanager/volume_manager.go b/pkg/kubelet/volumemanager/volume_manager.go index ba3d99d64c8..561733d592c 100644 --- a/pkg/kubelet/volumemanager/volume_manager.go +++ b/pkg/kubelet/volumemanager/volume_manager.go @@ -295,9 +295,9 @@ func (vm *volumeManager) GetVolumesInUse() []v1.UniqueVolumeName { // that volumes are marked in use as soon as the decision is made that the // volume *should* be attached to this node until it is safely unmounted. desiredVolumes := vm.desiredStateOfWorld.GetVolumesToMount() - mountedVolumes := vm.actualStateOfWorld.GetGloballyMountedVolumes() - volumesToReportInUse := make([]v1.UniqueVolumeName, 0, len(desiredVolumes)+len(mountedVolumes)) - desiredVolumesMap := make(map[v1.UniqueVolumeName]bool, len(desiredVolumes)+len(mountedVolumes)) + allAttachedVolumes := vm.actualStateOfWorld.GetAttachedVolumes() + volumesToReportInUse := make([]v1.UniqueVolumeName, 0, len(desiredVolumes)+len(allAttachedVolumes)) + desiredVolumesMap := make(map[v1.UniqueVolumeName]bool, len(desiredVolumes)+len(allAttachedVolumes)) for _, volume := range desiredVolumes { if volume.PluginIsAttachable { @@ -308,7 +308,7 @@ func (vm *volumeManager) GetVolumesInUse() []v1.UniqueVolumeName { } } - for _, volume := range mountedVolumes { + for _, volume := range allAttachedVolumes { if volume.PluginIsAttachable { if _, exists := desiredVolumesMap[volume.VolumeName]; !exists { volumesToReportInUse = append(volumesToReportInUse, volume.VolumeName) diff --git a/pkg/master/master.go b/pkg/master/master.go index b2d25a12f70..5a90d984a4e 100644 --- a/pkg/master/master.go +++ b/pkg/master/master.go @@ -444,15 +444,26 @@ func (n nodeAddressProvider) externalAddresses() ([]string, error) { if err != nil { return nil, err } + var matchErr error addrs := []string{} for ix := range nodes.Items { node := &nodes.Items[ix] addr, err := nodeutil.GetPreferredNodeAddress(node, preferredAddressTypes) if err != nil { + if _, ok := err.(*nodeutil.NoMatchError); ok { + matchErr = err + continue + } return nil, err } addrs = append(addrs, addr) } + if len(addrs) == 0 && matchErr != nil { + // We only return an error if we have items. + // Currently we return empty list/no error if Items is empty. + // We do this for backward compatibility reasons. + return nil, matchErr + } return addrs, nil } diff --git a/pkg/master/master_test.go b/pkg/master/master_test.go index 5be9a014907..5ee2fa4f833 100644 --- a/pkg/master/master_test.go +++ b/pkg/master/master_test.go @@ -278,6 +278,22 @@ func TestGetNodeAddresses(t *testing.T) { assert.Equal([]string{"127.0.0.1", "127.0.0.1"}, addrs) } +func TestGetNodeAddressesWithOnlySomeExternalIP(t *testing.T) { + assert := assert.New(t) + + fakeNodeClient := fake.NewSimpleClientset(makeNodeList([]string{"node1", "node2", "node3"}, apiv1.NodeResources{})).Core().Nodes() + addressProvider := nodeAddressProvider{fakeNodeClient} + + // Pass case with 1 External type IP (index == 1) and nodes (indexes 0 & 2) have no External IP. + nodes, _ := fakeNodeClient.List(metav1.ListOptions{}) + nodes.Items[1].Status.Addresses = []apiv1.NodeAddress{{Type: apiv1.NodeExternalIP, Address: "127.0.0.1"}} + fakeNodeClient.Update(&nodes.Items[1]) + + addrs, err := addressProvider.externalAddresses() + assert.NoError(err, "addresses should not have returned an error.") + assert.Equal([]string{"127.0.0.1"}, addrs) +} + func decodeResponse(resp *http.Response, obj interface{}) error { defer resp.Body.Close() diff --git a/pkg/master/tunneler/BUILD b/pkg/master/tunneler/BUILD index fd4bf629dd9..99648d7f8aa 100644 --- a/pkg/master/tunneler/BUILD +++ b/pkg/master/tunneler/BUILD @@ -25,7 +25,6 @@ go_library( "//pkg/util/file:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/master/tunneler/ssh.go b/pkg/master/tunneler/ssh.go index 88217d170ed..4bf8307bad0 100644 --- a/pkg/master/tunneler/ssh.go +++ b/pkg/master/tunneler/ssh.go @@ -29,11 +29,9 @@ import ( "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog" "k8s.io/kubernetes/pkg/ssh" utilfile "k8s.io/kubernetes/pkg/util/file" - - "github.com/prometheus/client_golang/prometheus" - "k8s.io/klog" ) type InstallSSHKey func(ctx context.Context, user string, data []byte) error @@ -83,9 +81,8 @@ type SSHTunneler struct { InstallSSHKey InstallSSHKey HealthCheckURL *url.URL - tunnels *ssh.SSHTunnelList - lastSyncMetric prometheus.GaugeFunc - clock clock.Clock + tunnels *ssh.SSHTunnelList + clock clock.Clock getAddresses AddressFunc stopChan chan struct{} diff --git a/pkg/master/tunneler/ssh_test.go b/pkg/master/tunneler/ssh_test.go index f4f58b0f6e8..2fe8f283944 100644 --- a/pkg/master/tunneler/ssh_test.go +++ b/pkg/master/tunneler/ssh_test.go @@ -25,9 +25,9 @@ import ( "testing" "time" - "k8s.io/apimachinery/pkg/util/clock" - "github.com/stretchr/testify/assert" + + "k8s.io/apimachinery/pkg/util/clock" ) // TestSecondsSinceSync verifies that proper results are returned diff --git a/pkg/printers/internalversion/printers.go b/pkg/printers/internalversion/printers.go index 70dadf902cc..32722bd0916 100644 --- a/pkg/printers/internalversion/printers.go +++ b/pkg/printers/internalversion/printers.go @@ -370,7 +370,7 @@ func AddHandlers(h printers.PrintHandler) { {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, {Name: "Role", Type: "string", Priority: 1, Description: rbacv1beta1.RoleBinding{}.SwaggerDoc()["roleRef"]}, {Name: "Users", Type: "string", Priority: 1, Description: "Users in the roleBinding"}, - {Name: "Groups", Type: "string", Priority: 1, Description: "Gruops in the roleBinding"}, + {Name: "Groups", Type: "string", Priority: 1, Description: "Groups in the roleBinding"}, {Name: "ServiceAccounts", Type: "string", Priority: 1, Description: "ServiceAccounts in the roleBinding"}, } h.TableHandler(roleBindingsColumnDefinitions, printRoleBinding) @@ -380,9 +380,9 @@ func AddHandlers(h printers.PrintHandler) { {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, {Name: "Role", Type: "string", Priority: 1, Description: rbacv1beta1.ClusterRoleBinding{}.SwaggerDoc()["roleRef"]}, - {Name: "Users", Type: "string", Priority: 1, Description: "Users in the roleBinding"}, - {Name: "Groups", Type: "string", Priority: 1, Description: "Gruops in the roleBinding"}, - {Name: "ServiceAccounts", Type: "string", Priority: 1, Description: "ServiceAccounts in the roleBinding"}, + {Name: "Users", Type: "string", Priority: 1, Description: "Users in the clusterRoleBinding"}, + {Name: "Groups", Type: "string", Priority: 1, Description: "Groups in the clusterRoleBinding"}, + {Name: "ServiceAccounts", Type: "string", Priority: 1, Description: "ServiceAccounts in the clusterRoleBinding"}, } h.TableHandler(clusterRoleBindingsColumnDefinitions, printClusterRoleBinding) h.TableHandler(clusterRoleBindingsColumnDefinitions, printClusterRoleBindingList) diff --git a/pkg/probe/exec/exec_test.go b/pkg/probe/exec/exec_test.go index 36219830596..1d8eebb0634 100644 --- a/pkg/probe/exec/exec_test.go +++ b/pkg/probe/exec/exec_test.go @@ -50,8 +50,22 @@ func (f *FakeCmd) SetStdout(out io.Writer) {} func (f *FakeCmd) SetStderr(out io.Writer) {} +func (f *FakeCmd) SetEnv(env []string) {} + func (f *FakeCmd) Stop() {} +func (f *FakeCmd) Start() error { return nil } + +func (f *FakeCmd) Wait() error { return nil } + +func (f *FakeCmd) StdoutPipe() (io.ReadCloser, error) { + return nil, nil +} + +func (f *FakeCmd) StderrPipe() (io.ReadCloser, error) { + return nil, nil +} + type fakeExitError struct { exited bool statusCode int diff --git a/pkg/probe/http/http.go b/pkg/probe/http/http.go index e9bcc0f3e1b..6c31bed6f23 100644 --- a/pkg/probe/http/http.go +++ b/pkg/probe/http/http.go @@ -39,7 +39,13 @@ func New() Prober { // NewWithTLSConfig takes tls config as parameter. func NewWithTLSConfig(config *tls.Config) Prober { - transport := utilnet.SetTransportDefaults(&http.Transport{TLSClientConfig: config, DisableKeepAlives: true}) + // We do not want the probe use node's local proxy set. + transport := utilnet.SetTransportDefaults( + &http.Transport{ + TLSClientConfig: config, + DisableKeepAlives: true, + Proxy: http.ProxyURL(nil), + }) return httpProber{transport} } diff --git a/pkg/probe/http/http_test.go b/pkg/probe/http/http_test.go index 69ad77064c7..3afa8ff077e 100644 --- a/pkg/probe/http/http_test.go +++ b/pkg/probe/http/http_test.go @@ -22,6 +22,7 @@ import ( "net/http" "net/http/httptest" "net/url" + "os" "strconv" "strings" "testing" @@ -32,6 +33,62 @@ import ( const FailureCode int = -1 +func setEnv(key, value string) func() { + originalValue := os.Getenv(key) + os.Setenv(key, value) + if len(originalValue) > 0 { + return func() { + os.Setenv(key, originalValue) + } + } + return func() {} +} + +func unsetEnv(key string) func() { + originalValue := os.Getenv(key) + os.Unsetenv(key) + if len(originalValue) > 0 { + return func() { + os.Setenv(key, originalValue) + } + } + return func() {} +} + +func TestHTTPProbeProxy(t *testing.T) { + res := "welcome to http probe proxy" + localProxy := "http://127.0.0.1:9098/" + + defer setEnv("http_proxy", localProxy)() + defer setEnv("HTTP_PROXY", localProxy)() + defer unsetEnv("no_proxy")() + defer unsetEnv("NO_PROXY")() + + prober := New() + + go func() { + http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, res) + }) + err := http.ListenAndServe(":9098", nil) + if err != nil { + t.Errorf("Failed to start foo server: localhost:9098") + } + }() + + // take some time to wait server boot + time.Sleep(2 * time.Second) + url, err := url.Parse("http://example.com") + if err != nil { + t.Errorf("proxy test unexpected error: %v", err) + } + _, response, _ := prober.Probe(url, http.Header{}, time.Second*3) + + if response == res { + t.Errorf("proxy test unexpected error: the probe is using proxy") + } +} + func TestHTTPProbeChecker(t *testing.T) { handleReq := func(s int, body string) func(w http.ResponseWriter, r *http.Request) { return func(w http.ResponseWriter, r *http.Request) { diff --git a/pkg/proxy/iptables/proxier.go b/pkg/proxy/iptables/proxier.go index 1fb994be32f..4734de066d5 100644 --- a/pkg/proxy/iptables/proxier.go +++ b/pkg/proxy/iptables/proxier.go @@ -607,7 +607,13 @@ func (proxier *Proxier) deleteEndpointConnections(connectionMap []proxy.ServiceE for _, epSvcPair := range connectionMap { if svcInfo, ok := proxier.serviceMap[epSvcPair.ServicePortName]; ok && svcInfo.GetProtocol() == v1.ProtocolUDP { endpointIP := utilproxy.IPPart(epSvcPair.Endpoint) - err := conntrack.ClearEntriesForNAT(proxier.exec, svcInfo.ClusterIPString(), endpointIP, v1.ProtocolUDP) + nodePort := svcInfo.GetNodePort() + var err error + if nodePort != 0 { + err = conntrack.ClearEntriesForPortNAT(proxier.exec, endpointIP, nodePort, v1.ProtocolUDP) + } else { + err = conntrack.ClearEntriesForNAT(proxier.exec, svcInfo.ClusterIPString(), endpointIP, v1.ProtocolUDP) + } if err != nil { klog.Errorf("Failed to delete %s endpoint connections, error: %v", epSvcPair.ServicePortName.String(), err) } diff --git a/pkg/proxy/ipvs/graceful_termination.go b/pkg/proxy/ipvs/graceful_termination.go index d9357d2c6d8..a705bb9585d 100644 --- a/pkg/proxy/ipvs/graceful_termination.go +++ b/pkg/proxy/ipvs/graceful_termination.go @@ -75,10 +75,10 @@ func (q *graceTerminateRSList) remove(rs *listItem) bool { uniqueRS := rs.String() if _, ok := q.list[uniqueRS]; ok { - return false + delete(q.list, uniqueRS) + return true } - delete(q.list, uniqueRS) - return true + return false } func (q *graceTerminateRSList) flushList(handler func(rsToDelete *listItem) (bool, error)) bool { @@ -164,7 +164,10 @@ func (m *GracefulTerminationManager) deleteRsFunc(rsToDelete *listItem) (bool, e } for _, rs := range rss { if rsToDelete.RealServer.Equal(rs) { - if rs.ActiveConn != 0 { + // Delete RS with no connections + // For UDP, ActiveConn is always 0 + // For TCP, InactiveConn are connections not in ESTABLISHED state + if rs.ActiveConn+rs.InactiveConn != 0 { return false, nil } klog.Infof("Deleting rs: %s", rsToDelete.String()) diff --git a/pkg/proxy/ipvs/netlink_linux.go b/pkg/proxy/ipvs/netlink_linux.go index 0c671200f03..c917c9429c0 100644 --- a/pkg/proxy/ipvs/netlink_linux.go +++ b/pkg/proxy/ipvs/netlink_linux.go @@ -30,11 +30,12 @@ import ( type netlinkHandle struct { netlink.Handle + isIPv6 bool } // NewNetLinkHandle will crate a new NetLinkHandle -func NewNetLinkHandle() NetLinkHandle { - return &netlinkHandle{netlink.Handle{}} +func NewNetLinkHandle(isIPv6 bool) NetLinkHandle { + return &netlinkHandle{netlink.Handle{}, isIPv6} } // EnsureAddressBind checks if address is bound to the interface and, if not, binds it. If the address is already bound, return true. @@ -181,7 +182,11 @@ func (h *netlinkHandle) GetLocalAddresses(dev, filterDev string) (sets.String, e if route.LinkIndex == filterLinkIndex { continue } - if route.Src != nil { + if h.isIPv6 { + if route.Dst.IP.To4() == nil && !route.Dst.IP.IsLinkLocalUnicast() { + res.Insert(route.Dst.IP.String()) + } + } else if route.Src != nil { res.Insert(route.Src.String()) } } diff --git a/pkg/proxy/ipvs/netlink_unsupported.go b/pkg/proxy/ipvs/netlink_unsupported.go index a83081f1fdb..1c709cd2b60 100644 --- a/pkg/proxy/ipvs/netlink_unsupported.go +++ b/pkg/proxy/ipvs/netlink_unsupported.go @@ -28,7 +28,7 @@ type emptyHandle struct { } // NewNetLinkHandle will create an EmptyHandle -func NewNetLinkHandle() NetLinkHandle { +func NewNetLinkHandle(ipv6 bool) NetLinkHandle { return &emptyHandle{} } diff --git a/pkg/proxy/ipvs/proxier.go b/pkg/proxy/ipvs/proxier.go index 242a6025ca6..66883e18dc3 100644 --- a/pkg/proxy/ipvs/proxier.go +++ b/pkg/proxy/ipvs/proxier.go @@ -162,6 +162,8 @@ const sysctlRouteLocalnet = "net/ipv4/conf/all/route_localnet" const sysctlBridgeCallIPTables = "net/bridge/bridge-nf-call-iptables" const sysctlVSConnTrack = "net/ipv4/vs/conntrack" const sysctlConnReuse = "net/ipv4/vs/conn_reuse_mode" +const sysctlExpireNoDestConn = "net/ipv4/vs/expire_nodest_conn" +const sysctlExpireQuiescentTemplate = "net/ipv4/vs/expire_quiescent_template" const sysctlForward = "net/ipv4/ip_forward" const sysctlArpIgnore = "net/ipv4/conf/all/arp_ignore" const sysctlArpAnnounce = "net/ipv4/conf/all/arp_announce" @@ -321,6 +323,20 @@ func NewProxier(ipt utiliptables.Interface, } } + // Set the expire_nodest_conn sysctl we need for + if val, _ := sysctl.GetSysctl(sysctlExpireNoDestConn); val != 1 { + if err := sysctl.SetSysctl(sysctlExpireNoDestConn, 1); err != nil { + return nil, fmt.Errorf("can't set sysctl %s: %v", sysctlExpireNoDestConn, err) + } + } + + // Set the expire_quiescent_template sysctl we need for + if val, _ := sysctl.GetSysctl(sysctlExpireQuiescentTemplate); val != 1 { + if err := sysctl.SetSysctl(sysctlExpireQuiescentTemplate, 1); err != nil { + return nil, fmt.Errorf("can't set sysctl %s: %v", sysctlExpireQuiescentTemplate, err) + } + } + // Set the ip_forward sysctl we need for if val, _ := sysctl.GetSysctl(sysctlForward); val != 1 { if err := sysctl.SetSysctl(sysctlForward, 1); err != nil { @@ -390,14 +406,14 @@ func NewProxier(ipt utiliptables.Interface, healthzServer: healthzServer, ipvs: ipvs, ipvsScheduler: scheduler, - ipGetter: &realIPGetter{nl: NewNetLinkHandle()}, + ipGetter: &realIPGetter{nl: NewNetLinkHandle(isIPv6)}, iptablesData: bytes.NewBuffer(nil), filterChainsData: bytes.NewBuffer(nil), natChains: bytes.NewBuffer(nil), natRules: bytes.NewBuffer(nil), filterChains: bytes.NewBuffer(nil), filterRules: bytes.NewBuffer(nil), - netlinkHandle: NewNetLinkHandle(), + netlinkHandle: NewNetLinkHandle(isIPv6), ipset: ipset, nodePortAddresses: nodePortAddresses, networkInterfacer: utilproxy.RealNetwork{}, @@ -584,7 +600,7 @@ func CleanupLeftovers(ipvs utilipvs.Interface, ipt utiliptables.Interface, ipset } } // Delete dummy interface created by ipvs Proxier. - nl := NewNetLinkHandle() + nl := NewNetLinkHandle(false) err := nl.DeleteDummyDevice(DefaultDummyDevice) if err != nil { klog.Errorf("Error deleting dummy device %s created by IPVS proxier: %v", DefaultDummyDevice, err) @@ -1190,7 +1206,15 @@ func (proxier *Proxier) syncProxyRules() { } proxier.portsMap = replacementPortsMap - // Clean up legacy IPVS services + // Get legacy bind address + // currentBindAddrs represents ip addresses bind to DefaultDummyDevice from the system + currentBindAddrs, err := proxier.netlinkHandle.ListBindAddress(DefaultDummyDevice) + if err != nil { + klog.Errorf("Failed to get bind address, err: %v", err) + } + legacyBindAddrs := proxier.getLegacyBindAddr(activeBindAddrs, currentBindAddrs) + + // Clean up legacy IPVS services and unbind addresses appliedSvcs, err := proxier.ipvs.GetVirtualServers() if err == nil { for _, appliedSvc := range appliedSvcs { @@ -1199,15 +1223,7 @@ func (proxier *Proxier) syncProxyRules() { } else { klog.Errorf("Failed to get ipvs service, err: %v", err) } - proxier.cleanLegacyService(activeIPVSServices, currentIPVSServices) - - // Clean up legacy bind address - // currentBindAddrs represents ip addresses bind to DefaultDummyDevice from the system - currentBindAddrs, err := proxier.netlinkHandle.ListBindAddress(DefaultDummyDevice) - if err != nil { - klog.Errorf("Failed to get bind address, err: %v", err) - } - proxier.cleanLegacyBindAddr(activeBindAddrs, currentBindAddrs) + proxier.cleanLegacyService(activeIPVSServices, currentIPVSServices, legacyBindAddrs) // Update healthz timestamp if proxier.healthzServer != nil { @@ -1602,32 +1618,37 @@ func (proxier *Proxier) syncEndpoint(svcPortName proxy.ServicePortName, onlyNode Port: uint16(portNum), } - klog.V(5).Infof("Using graceful delete to delete: %v", delDest) + klog.V(5).Infof("Using graceful delete to delete: %v", uniqueRS) err = proxier.gracefuldeleteManager.GracefulDeleteRS(appliedVirtualServer, delDest) if err != nil { - klog.Errorf("Failed to delete destination: %v, error: %v", delDest, err) + klog.Errorf("Failed to delete destination: %v, error: %v", uniqueRS, err) continue } } return nil } -func (proxier *Proxier) cleanLegacyService(activeServices map[string]bool, currentServices map[string]*utilipvs.VirtualServer) { +func (proxier *Proxier) cleanLegacyService(activeServices map[string]bool, currentServices map[string]*utilipvs.VirtualServer, legacyBindAddrs map[string]bool) { for cs := range currentServices { svc := currentServices[cs] if _, ok := activeServices[cs]; !ok { // This service was not processed in the latest sync loop so before deleting it, - // make sure it does not fall within an excluded CIDR range. okayToDelete := true rsList, _ := proxier.ipvs.GetRealServers(svc) + + // If we still have real servers graceful termination is not done + if len(rsList) > 0 { + okayToDelete = false + } + // Applying graceful termination to all real servers for _, rs := range rsList { uniqueRS := GetUniqueRSName(svc, rs) - // if there are in terminating real server in this service, then handle it later - if proxier.gracefuldeleteManager.InTerminationList(uniqueRS) { - okayToDelete = false - break + klog.V(5).Infof("Using graceful delete to delete: %v", uniqueRS) + if err := proxier.gracefuldeleteManager.GracefulDeleteRS(svc, rs); err != nil { + klog.Errorf("Failed to delete destination: %v, error: %v", uniqueRS, err) } } + // make sure it does not fall within an excluded CIDR range. for _, excludedCIDR := range proxier.excludeCIDRs { // Any validation of this CIDR already should have occurred. _, n, _ := net.ParseCIDR(excludedCIDR) @@ -1637,26 +1658,38 @@ func (proxier *Proxier) cleanLegacyService(activeServices map[string]bool, curre } } if okayToDelete { + klog.V(4).Infof("Delete service %s", svc.String()) if err := proxier.ipvs.DeleteVirtualServer(svc); err != nil { - klog.Errorf("Failed to delete service, error: %v", err) + klog.Errorf("Failed to delete service %s, error: %v", svc.String(), err) + } + addr := svc.Address.String() + if _, ok := legacyBindAddrs[addr]; ok { + klog.V(4).Infof("Unbinding address %s", addr) + if err := proxier.netlinkHandle.UnbindAddress(addr, DefaultDummyDevice); err != nil { + klog.Errorf("Failed to unbind service addr %s from dummy interface %s: %v", addr, DefaultDummyDevice, err) + } else { + // In case we delete a multi-port service, avoid trying to unbind multiple times + delete(legacyBindAddrs, addr) + } } } } } } -func (proxier *Proxier) cleanLegacyBindAddr(activeBindAddrs map[string]bool, currentBindAddrs []string) { +func (proxier *Proxier) getLegacyBindAddr(activeBindAddrs map[string]bool, currentBindAddrs []string) map[string]bool { + legacyAddrs := make(map[string]bool) + isIpv6 := utilnet.IsIPv6(proxier.nodeIP) for _, addr := range currentBindAddrs { + addrIsIpv6 := utilnet.IsIPv6(net.ParseIP(addr)) + if addrIsIpv6 && !isIpv6 || !addrIsIpv6 && isIpv6 { + continue + } if _, ok := activeBindAddrs[addr]; !ok { - // This address was not processed in the latest sync loop - klog.V(4).Infof("Unbind addr %s", addr) - err := proxier.netlinkHandle.UnbindAddress(addr, DefaultDummyDevice) - // Ignore no such address error when try to unbind address - if err != nil { - klog.Errorf("Failed to unbind service addr %s from dummy interface %s: %v", addr, DefaultDummyDevice, err) - } + legacyAddrs[addr] = true } } + return legacyAddrs } // Join all words with spaces, terminate with newline and write to buff. diff --git a/pkg/proxy/ipvs/proxier_test.go b/pkg/proxy/ipvs/proxier_test.go index 921ff01f0d1..5c5f7492ff1 100644 --- a/pkg/proxy/ipvs/proxier_test.go +++ b/pkg/proxy/ipvs/proxier_test.go @@ -23,7 +23,6 @@ import ( "reflect" "strings" "testing" - "time" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -126,7 +125,7 @@ func (fakeSysctl *FakeSysctl) SetSysctl(sysctl string, newVal int) error { return nil } -func NewFakeProxier(ipt utiliptables.Interface, ipvs utilipvs.Interface, ipset utilipset.Interface, nodeIPs []net.IP) *Proxier { +func NewFakeProxier(ipt utiliptables.Interface, ipvs utilipvs.Interface, ipset utilipset.Interface, nodeIPs []net.IP, excludeCIDRs []string) *Proxier { fcmd := fakeexec.FakeCmd{ CombinedOutputScript: []fakeexec.FakeCombinedOutputAction{ func() ([]byte, error) { return []byte("dummy device have been created"), nil }, @@ -151,7 +150,7 @@ func NewFakeProxier(ipt utiliptables.Interface, ipvs utilipvs.Interface, ipset u serviceChanges: proxy.NewServiceChangeTracker(newServiceInfo, nil, nil), endpointsMap: make(proxy.EndpointsMap), endpointsChanges: proxy.NewEndpointChangeTracker(testHostname, nil, nil, nil), - excludeCIDRs: make([]string, 0), + excludeCIDRs: excludeCIDRs, iptables: ipt, ipvs: ipvs, ipset: ipset, @@ -228,7 +227,7 @@ func TestCleanupLeftovers(t *testing.T) { ipt := iptablestest.NewFake() ipvs := ipvstest.NewFake() ipset := ipsettest.NewFake(testIPSetVersion) - fp := NewFakeProxier(ipt, ipvs, ipset, nil) + fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil) svcIP := "10.20.30.41" svcPort := 80 svcNodePort := 3001 @@ -418,7 +417,7 @@ func TestNodePortUDP(t *testing.T) { ipt := iptablestest.NewFake() ipvs := ipvstest.NewFake() ipset := ipsettest.NewFake(testIPSetVersion) - fp := NewFakeProxier(ipt, ipvs, ipset, []net.IP{nodeIP}) + fp := NewFakeProxier(ipt, ipvs, ipset, []net.IP{nodeIP}, nil) svcIP := "10.20.30.41" svcPort := 80 @@ -495,7 +494,7 @@ func TestNodePort(t *testing.T) { nodeIPv4 := net.ParseIP("100.101.102.103") nodeIPv6 := net.ParseIP("2001:db8::1:1") nodeIPs := sets.NewString(nodeIPv4.String(), nodeIPv6.String()) - fp := NewFakeProxier(ipt, ipvs, ipset, []net.IP{nodeIPv4, nodeIPv6}) + fp := NewFakeProxier(ipt, ipvs, ipset, []net.IP{nodeIPv4, nodeIPv6}, nil) svcIP := "10.20.30.41" svcPort := 80 svcNodePort := 3001 @@ -573,7 +572,7 @@ func TestNodePortNoEndpoint(t *testing.T) { ipvs := ipvstest.NewFake() ipset := ipsettest.NewFake(testIPSetVersion) nodeIP := net.ParseIP("100.101.102.103") - fp := NewFakeProxier(ipt, ipvs, ipset, []net.IP{nodeIP}) + fp := NewFakeProxier(ipt, ipvs, ipset, []net.IP{nodeIP}, nil) svcIP := "10.20.30.41" svcPort := 80 svcNodePort := 3001 @@ -628,7 +627,7 @@ func TestClusterIPNoEndpoint(t *testing.T) { ipt := iptablestest.NewFake() ipvs := ipvstest.NewFake() ipset := ipsettest.NewFake(testIPSetVersion) - fp := NewFakeProxier(ipt, ipvs, ipset, nil) + fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil) svcIP := "10.20.30.41" svcPort := 80 svcPortName := proxy.ServicePortName{ @@ -672,7 +671,7 @@ func TestClusterIP(t *testing.T) { ipt := iptablestest.NewFake() ipvs := ipvstest.NewFake() ipset := ipsettest.NewFake(testIPSetVersion) - fp := NewFakeProxier(ipt, ipvs, ipset, nil) + fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil) svcIPv4 := "10.20.30.41" svcPortV4 := 80 @@ -779,7 +778,7 @@ func TestExternalIPsNoEndpoint(t *testing.T) { ipt := iptablestest.NewFake() ipvs := ipvstest.NewFake() ipset := ipsettest.NewFake(testIPSetVersion) - fp := NewFakeProxier(ipt, ipvs, ipset, nil) + fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil) svcIP := "10.20.30.41" svcPort := 80 svcExternalIPs := "50.60.70.81" @@ -834,7 +833,7 @@ func TestExternalIPs(t *testing.T) { ipt := iptablestest.NewFake() ipvs := ipvstest.NewFake() ipset := ipsettest.NewFake(testIPSetVersion) - fp := NewFakeProxier(ipt, ipvs, ipset, nil) + fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil) svcIP := "10.20.30.41" svcPort := 80 svcExternalIPs := sets.NewString("50.60.70.81", "2012::51", "127.0.0.1") @@ -1338,7 +1337,7 @@ func TestBuildServiceMapAddRemove(t *testing.T) { ipt := iptablestest.NewFake() ipvs := ipvstest.NewFake() ipset := ipsettest.NewFake(testIPSetVersion) - fp := NewFakeProxier(ipt, ipvs, ipset, nil) + fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil) services := []*v1.Service{ makeTestService("somewhere-else", "cluster-ip", func(svc *v1.Service) { @@ -1448,7 +1447,7 @@ func TestBuildServiceMapServiceHeadless(t *testing.T) { ipt := iptablestest.NewFake() ipvs := ipvstest.NewFake() ipset := ipsettest.NewFake(testIPSetVersion) - fp := NewFakeProxier(ipt, ipvs, ipset, nil) + fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil) makeServiceMap(fp, makeTestService("somewhere-else", "headless", func(svc *v1.Service) { @@ -1487,7 +1486,7 @@ func TestBuildServiceMapServiceTypeExternalName(t *testing.T) { ipt := iptablestest.NewFake() ipvs := ipvstest.NewFake() ipset := ipsettest.NewFake(testIPSetVersion) - fp := NewFakeProxier(ipt, ipvs, ipset, nil) + fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil) makeServiceMap(fp, makeTestService("somewhere-else", "external-name", func(svc *v1.Service) { @@ -1515,7 +1514,7 @@ func TestBuildServiceMapServiceUpdate(t *testing.T) { ipt := iptablestest.NewFake() ipvs := ipvstest.NewFake() ipset := ipsettest.NewFake(testIPSetVersion) - fp := NewFakeProxier(ipt, ipvs, ipset, nil) + fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil) servicev1 := makeTestService("somewhere", "some-service", func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeClusterIP @@ -1599,7 +1598,7 @@ func TestSessionAffinity(t *testing.T) { ipvs := ipvstest.NewFake() ipset := ipsettest.NewFake(testIPSetVersion) nodeIP := net.ParseIP("100.101.102.103") - fp := NewFakeProxier(ipt, ipvs, ipset, []net.IP{nodeIP}) + fp := NewFakeProxier(ipt, ipvs, ipset, []net.IP{nodeIP}, nil) svcIP := "10.20.30.41" svcPort := 80 svcNodePort := 3001 @@ -2462,7 +2461,7 @@ func Test_updateEndpointsMap(t *testing.T) { ipt := iptablestest.NewFake() ipvs := ipvstest.NewFake() ipset := ipsettest.NewFake(testIPSetVersion) - fp := NewFakeProxier(ipt, ipvs, ipset, nil) + fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil) fp.hostname = nodeName // First check that after adding all previous versions of endpoints, @@ -2706,7 +2705,7 @@ func Test_syncService(t *testing.T) { ipt := iptablestest.NewFake() ipvs := ipvstest.NewFake() ipset := ipsettest.NewFake(testIPSetVersion) - proxier := NewFakeProxier(ipt, ipvs, ipset, nil) + proxier := NewFakeProxier(ipt, ipvs, ipset, nil, nil) proxier.netlinkHandle.EnsureDummyDevice(DefaultDummyDevice) if testCases[i].oldVirtualServer != nil { @@ -2736,7 +2735,7 @@ func buildFakeProxier() (*iptablestest.FakeIPTables, *Proxier) { ipt := iptablestest.NewFake() ipvs := ipvstest.NewFake() ipset := ipsettest.NewFake(testIPSetVersion) - return ipt, NewFakeProxier(ipt, ipvs, ipset, nil) + return ipt, NewFakeProxier(ipt, ipvs, ipset, nil, nil) } func hasJump(rules []iptablestest.Rule, destChain, ipSet string) bool { @@ -2806,33 +2805,10 @@ func checkIPVS(t *testing.T, fp *Proxier, vs *netlinktest.ExpectedVirtualServer) } func TestCleanLegacyService(t *testing.T) { - execer := exec.New() ipt := iptablestest.NewFake() ipvs := ipvstest.NewFake() ipset := ipsettest.NewFake(testIPSetVersion) - excludeCIDRs := []string{"3.3.3.0/24", "4.4.4.0/24"} - proxier, err := NewProxier( - ipt, - ipvs, - ipset, - NewFakeSysctl(), - execer, - 250*time.Millisecond, - 100*time.Millisecond, - excludeCIDRs, - false, - 0, - "10.0.0.0/24", - testHostname, - net.ParseIP("127.0.0.1"), - nil, - nil, - DefaultScheduler, - make([]string, 0), - ) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } + fp := NewFakeProxier(ipt, ipvs, ipset, nil, []string{"3.3.3.0/24", "4.4.4.0/24"}) // All ipvs services that were processed in the latest sync loop. activeServices := map[string]bool{"ipvs0": true, "ipvs1": true} @@ -2888,15 +2864,23 @@ func TestCleanLegacyService(t *testing.T) { }, } for v := range currentServices { - proxier.ipvs.AddVirtualServer(currentServices[v]) + fp.ipvs.AddVirtualServer(currentServices[v]) } - proxier.cleanLegacyService(activeServices, currentServices) + + fp.netlinkHandle.EnsureDummyDevice(DefaultDummyDevice) + activeBindAddrs := map[string]bool{"1.1.1.1": true, "2.2.2.2": true, "3.3.3.3": true, "4.4.4.4": true} + // This is ipv4-only so ipv6 addresses should be ignored + currentBindAddrs := []string{"1.1.1.1", "2.2.2.2", "3.3.3.3", "4.4.4.4", "5.5.5.5", "6.6.6.6", "fd80::1:2:3", "fd80::1:2:4"} + for i := range currentBindAddrs { + fp.netlinkHandle.EnsureAddressBind(currentBindAddrs[i], DefaultDummyDevice) + } + + fp.cleanLegacyService(activeServices, currentServices, map[string]bool{"5.5.5.5": true, "6.6.6.6": true}) // ipvs4 and ipvs5 should have been cleaned. - remainingVirtualServers, _ := proxier.ipvs.GetVirtualServers() + remainingVirtualServers, _ := fp.ipvs.GetVirtualServers() if len(remainingVirtualServers) != 4 { t.Errorf("Expected number of remaining IPVS services after cleanup to be %v. Got %v", 4, len(remainingVirtualServers)) } - for _, vs := range remainingVirtualServers { // Checking that ipvs4 and ipvs5 were removed. if vs.Port == 57 { @@ -2906,47 +2890,138 @@ func TestCleanLegacyService(t *testing.T) { t.Errorf("Expected ipvs5 to be removed after cleanup. It still remains") } } -} - -func TestCleanLegacyBindAddr(t *testing.T) { - ipt := iptablestest.NewFake() - ipvs := ipvstest.NewFake() - ipset := ipsettest.NewFake(testIPSetVersion) - fp := NewFakeProxier(ipt, ipvs, ipset, nil) - - // All ipvs service addresses that were bound to ipvs0 in the latest sync loop. - activeBindAddrs := map[string]bool{"1.2.3.4": true, "1002:ab8::2:1": true} - // All service addresses that were bound to ipvs0 in system - currentBindAddrs := []string{"1.2.3.4", "1.2.3.5", "1.2.3.6", "1002:ab8::2:1", "1002:ab8::2:2"} - - fp.netlinkHandle.EnsureDummyDevice(DefaultDummyDevice) - - for i := range currentBindAddrs { - fp.netlinkHandle.EnsureAddressBind(currentBindAddrs[i], DefaultDummyDevice) - } - fp.cleanLegacyBindAddr(activeBindAddrs, currentBindAddrs) + // Addresses 5.5.5.5 and 6.6.6.6 should not be bound any more, but the ipv6 addresses should remain remainingAddrs, _ := fp.netlinkHandle.ListBindAddress(DefaultDummyDevice) - // should only remain "1.2.3.4" and "1002:ab8::2:1" - if len(remainingAddrs) != 2 { - t.Errorf("Expected number of remaining bound addrs after cleanup to be %v. Got %v", 2, len(remainingAddrs)) + if len(remainingAddrs) != 6 { + t.Errorf("Expected number of remaining bound addrs after cleanup to be %v. Got %v", 6, len(remainingAddrs)) } - - // check that address "1.2.3.4" and "1002:ab8::2:1" remain + // check that address "1.1.1.1", "2.2.2.2", "3.3.3.3", "4.4.4.4" are bound, ignore ipv6 addresses remainingAddrsMap := make(map[string]bool) - for i := range remainingAddrs { - remainingAddrsMap[remainingAddrs[i]] = true + for _, a := range remainingAddrs { + if net.ParseIP(a).To4() == nil { + continue + } + remainingAddrsMap[a] = true } if !reflect.DeepEqual(activeBindAddrs, remainingAddrsMap) { t.Errorf("Expected remainingAddrsMap %v, got %v", activeBindAddrs, remainingAddrsMap) } + +} + +func TestCleanLegacyService6(t *testing.T) { + ipt := iptablestest.NewFake() + ipvs := ipvstest.NewFake() + ipset := ipsettest.NewFake(testIPSetVersion) + fp := NewFakeProxier(ipt, ipvs, ipset, nil, []string{"3000::/64", "4000::/64"}) + fp.nodeIP = net.ParseIP("::1") + + // All ipvs services that were processed in the latest sync loop. + activeServices := map[string]bool{"ipvs0": true, "ipvs1": true} + // All ipvs services in the system. + currentServices := map[string]*utilipvs.VirtualServer{ + // Created by kube-proxy. + "ipvs0": { + Address: net.ParseIP("1000::1"), + Protocol: string(v1.ProtocolUDP), + Port: 53, + Scheduler: "rr", + Flags: utilipvs.FlagHashed, + }, + // Created by kube-proxy. + "ipvs1": { + Address: net.ParseIP("1000::2"), + Protocol: string(v1.ProtocolUDP), + Port: 54, + Scheduler: "rr", + Flags: utilipvs.FlagHashed, + }, + // Created by an external party. + "ipvs2": { + Address: net.ParseIP("3000::1"), + Protocol: string(v1.ProtocolUDP), + Port: 55, + Scheduler: "rr", + Flags: utilipvs.FlagHashed, + }, + // Created by an external party. + "ipvs3": { + Address: net.ParseIP("4000::1"), + Protocol: string(v1.ProtocolUDP), + Port: 56, + Scheduler: "rr", + Flags: utilipvs.FlagHashed, + }, + // Created by an external party. + "ipvs4": { + Address: net.ParseIP("5000::1"), + Protocol: string(v1.ProtocolUDP), + Port: 57, + Scheduler: "rr", + Flags: utilipvs.FlagHashed, + }, + // Created by kube-proxy, but now stale. + "ipvs5": { + Address: net.ParseIP("1000::6"), + Protocol: string(v1.ProtocolUDP), + Port: 58, + Scheduler: "rr", + Flags: utilipvs.FlagHashed, + }, + } + for v := range currentServices { + fp.ipvs.AddVirtualServer(currentServices[v]) + } + + fp.netlinkHandle.EnsureDummyDevice(DefaultDummyDevice) + activeBindAddrs := map[string]bool{"1000::1": true, "1000::2": true, "3000::1": true, "4000::1": true} + // This is ipv6-only so ipv4 addresses should be ignored + currentBindAddrs := []string{"1000::1", "1000::2", "3000::1", "4000::1", "5000::1", "1000::6", "1.1.1.1", "2.2.2.2"} + for i := range currentBindAddrs { + fp.netlinkHandle.EnsureAddressBind(currentBindAddrs[i], DefaultDummyDevice) + } + + fp.cleanLegacyService(activeServices, currentServices, map[string]bool{"5000::1": true, "1000::6": true}) + // ipvs4 and ipvs5 should have been cleaned. + remainingVirtualServers, _ := fp.ipvs.GetVirtualServers() + if len(remainingVirtualServers) != 4 { + t.Errorf("Expected number of remaining IPVS services after cleanup to be %v. Got %v", 4, len(remainingVirtualServers)) + } + for _, vs := range remainingVirtualServers { + // Checking that ipvs4 and ipvs5 were removed. + if vs.Port == 57 { + t.Errorf("Expected ipvs4 to be removed after cleanup. It still remains") + } + if vs.Port == 58 { + t.Errorf("Expected ipvs5 to be removed after cleanup. It still remains") + } + } + + // Addresses 5000::1 and 1000::6 should not be bound any more, but the ipv4 addresses should remain + remainingAddrs, _ := fp.netlinkHandle.ListBindAddress(DefaultDummyDevice) + if len(remainingAddrs) != 6 { + t.Errorf("Expected number of remaining bound addrs after cleanup to be %v. Got %v", 6, len(remainingAddrs)) + } + // check that address "1000::1", "1000::2", "3000::1", "4000::1" are still bound, ignore ipv4 addresses + remainingAddrsMap := make(map[string]bool) + for _, a := range remainingAddrs { + if net.ParseIP(a).To4() != nil { + continue + } + remainingAddrsMap[a] = true + } + if !reflect.DeepEqual(activeBindAddrs, remainingAddrsMap) { + t.Errorf("Expected remainingAddrsMap %v, got %v", activeBindAddrs, remainingAddrsMap) + } + } func TestMultiPortServiceBindAddr(t *testing.T) { ipt := iptablestest.NewFake() ipvs := ipvstest.NewFake() ipset := ipsettest.NewFake(testIPSetVersion) - fp := NewFakeProxier(ipt, ipvs, ipset, nil) + fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil) service1 := makeTestService("ns1", "svc1", func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeClusterIP diff --git a/pkg/proxy/service.go b/pkg/proxy/service.go index 8386e62c0e2..307c3d1b3bc 100644 --- a/pkg/proxy/service.go +++ b/pkg/proxy/service.go @@ -74,6 +74,11 @@ func (info *BaseServiceInfo) GetHealthCheckNodePort() int { return info.HealthCheckNodePort } +// GetNodePort is part of the ServicePort interface. +func (info *BaseServiceInfo) GetNodePort() int { + return info.NodePort +} + func (sct *ServiceChangeTracker) newBaseServiceInfo(port *v1.ServicePort, service *v1.Service) *BaseServiceInfo { onlyNodeLocalEndpoints := false if apiservice.RequestsOnlyLocalTraffic(service) { diff --git a/pkg/proxy/types.go b/pkg/proxy/types.go index f38937068c8..f77f9ed0f36 100644 --- a/pkg/proxy/types.go +++ b/pkg/proxy/types.go @@ -54,6 +54,8 @@ type ServicePort interface { GetProtocol() v1.Protocol // GetHealthCheckNodePort returns service health check node port if present. If return 0, it means not present. GetHealthCheckNodePort() int + // GetNodePort returns a service Node port if present. If return 0, it means not present. + GetNodePort() int } // Endpoint in an interface which abstracts information about an endpoint. diff --git a/pkg/proxy/util/utils.go b/pkg/proxy/util/utils.go index f1db309a941..2c1408da43b 100644 --- a/pkg/proxy/util/utils.go +++ b/pkg/proxy/util/utils.go @@ -17,6 +17,8 @@ limitations under the License. package util import ( + "context" + "errors" "fmt" "net" @@ -35,6 +37,11 @@ const ( IPv6ZeroCIDR = "::/0" ) +var ( + ErrAddressNotAllowed = errors.New("address not allowed") + ErrNoAddresses = errors.New("No addresses for hostname") +) + func IsZeroCIDR(cidr string) bool { if cidr == IPv4ZeroCIDR || cidr == IPv6ZeroCIDR { return true @@ -42,6 +49,46 @@ func IsZeroCIDR(cidr string) bool { return false } +// IsProxyableIP checks if a given IP address is permitted to be proxied +func IsProxyableIP(ip string) error { + netIP := net.ParseIP(ip) + if netIP == nil { + return ErrAddressNotAllowed + } + return isProxyableIP(netIP) +} + +func isProxyableIP(ip net.IP) error { + if ip.IsLoopback() || ip.IsLinkLocalUnicast() || ip.IsLinkLocalMulticast() || ip.IsInterfaceLocalMulticast() { + return ErrAddressNotAllowed + } + return nil +} + +// Resolver is an interface for net.Resolver +type Resolver interface { + LookupIPAddr(ctx context.Context, host string) ([]net.IPAddr, error) +} + +// IsProxyableHostname checks if the IP addresses for a given hostname are permitted to be proxied +func IsProxyableHostname(ctx context.Context, resolv Resolver, hostname string) error { + resp, err := resolv.LookupIPAddr(ctx, hostname) + if err != nil { + return err + } + + if len(resp) == 0 { + return ErrNoAddresses + } + + for _, host := range resp { + if err := isProxyableIP(host.IP); err != nil { + return err + } + } + return nil +} + func IsLocalIP(ip string) (bool, error) { addrs, err := net.InterfaceAddrs() if err != nil { diff --git a/pkg/proxy/util/utils_test.go b/pkg/proxy/util/utils_test.go index 0f4c19f2e8b..891a3520f1c 100644 --- a/pkg/proxy/util/utils_test.go +++ b/pkg/proxy/util/utils_test.go @@ -17,6 +17,7 @@ limitations under the License. package util import ( + "context" "net" "testing" @@ -27,6 +28,74 @@ import ( fake "k8s.io/kubernetes/pkg/proxy/util/testing" ) +func TestIsProxyableIP(t *testing.T) { + testCases := []struct { + ip string + want error + }{ + {"127.0.0.1", ErrAddressNotAllowed}, + {"127.0.0.2", ErrAddressNotAllowed}, + {"169.254.169.254", ErrAddressNotAllowed}, + {"169.254.1.1", ErrAddressNotAllowed}, + {"224.0.0.0", ErrAddressNotAllowed}, + {"10.0.0.1", nil}, + {"192.168.0.1", nil}, + {"172.16.0.1", nil}, + {"8.8.8.8", nil}, + {"::1", ErrAddressNotAllowed}, + {"fe80::", ErrAddressNotAllowed}, + {"ff02::", ErrAddressNotAllowed}, + {"ff01::", ErrAddressNotAllowed}, + {"2600::", nil}, + {"1", ErrAddressNotAllowed}, + {"", ErrAddressNotAllowed}, + } + + for i := range testCases { + got := IsProxyableIP(testCases[i].ip) + if testCases[i].want != got { + t.Errorf("case %d: expected %v, got %v", i, testCases[i].want, got) + } + } +} + +type dummyResolver struct { + ips []string + err error +} + +func (r *dummyResolver) LookupIPAddr(ctx context.Context, host string) ([]net.IPAddr, error) { + if r.err != nil { + return nil, r.err + } + resp := []net.IPAddr{} + for _, ipString := range r.ips { + resp = append(resp, net.IPAddr{IP: net.ParseIP(ipString)}) + } + return resp, nil +} + +func TestIsProxyableHostname(t *testing.T) { + testCases := []struct { + hostname string + ips []string + want error + }{ + {"k8s.io", []string{}, ErrNoAddresses}, + {"k8s.io", []string{"8.8.8.8"}, nil}, + {"k8s.io", []string{"169.254.169.254"}, ErrAddressNotAllowed}, + {"k8s.io", []string{"127.0.0.1", "8.8.8.8"}, ErrAddressNotAllowed}, + } + + for i := range testCases { + resolv := dummyResolver{ips: testCases[i].ips} + got := IsProxyableHostname(context.Background(), &resolv, testCases[i].hostname) + if testCases[i].want != got { + t.Errorf("case %d: expected %v, got %v", i, testCases[i].want, got) + } + } +} + func TestShouldSkipService(t *testing.T) { testCases := []struct { service *v1.Service diff --git a/pkg/registry/apps/daemonset/strategy.go b/pkg/registry/apps/daemonset/strategy.go index f9b89bb552e..d3e956bcd3d 100644 --- a/pkg/registry/apps/daemonset/strategy.go +++ b/pkg/registry/apps/daemonset/strategy.go @@ -44,19 +44,20 @@ type daemonSetStrategy struct { // Strategy is the default logic that applies when creating and updating DaemonSet objects. var Strategy = daemonSetStrategy{legacyscheme.Scheme, names.SimpleNameGenerator} -// DefaultGarbageCollectionPolicy returns OrphanDependents by default. For apps/v1, returns DeleteDependents. +// DefaultGarbageCollectionPolicy returns OrphanDependents for extensions/v1beta1 and apps/v1beta2 for backwards compatibility, +// and DeleteDependents for all other versions. func (daemonSetStrategy) DefaultGarbageCollectionPolicy(ctx context.Context) rest.GarbageCollectionPolicy { + var groupVersion schema.GroupVersion if requestInfo, found := genericapirequest.RequestInfoFrom(ctx); found { - groupVersion := schema.GroupVersion{Group: requestInfo.APIGroup, Version: requestInfo.APIVersion} - switch groupVersion { - case extensionsv1beta1.SchemeGroupVersion, appsv1beta2.SchemeGroupVersion: - // for back compatibility - return rest.OrphanDependents - default: - return rest.DeleteDependents - } + groupVersion = schema.GroupVersion{Group: requestInfo.APIGroup, Version: requestInfo.APIVersion} + } + switch groupVersion { + case extensionsv1beta1.SchemeGroupVersion, appsv1beta2.SchemeGroupVersion: + // for back compatibility + return rest.OrphanDependents + default: + return rest.DeleteDependents } - return rest.OrphanDependents } // NamespaceScoped returns true because all DaemonSets need to be within a namespace. diff --git a/pkg/registry/apps/daemonset/strategy_test.go b/pkg/registry/apps/daemonset/strategy_test.go index eb932cbf6ff..f48b3fd7e26 100644 --- a/pkg/registry/apps/daemonset/strategy_test.go +++ b/pkg/registry/apps/daemonset/strategy_test.go @@ -72,7 +72,7 @@ func TestDaemonsetDefaultGarbageCollectionPolicy(t *testing.T) { false, }, { - expectedGCPolicy: rest.OrphanDependents, + expectedGCPolicy: rest.DeleteDependents, isNilRequestInfo: true, }, } diff --git a/pkg/registry/apps/deployment/strategy.go b/pkg/registry/apps/deployment/strategy.go index e9d593d838b..1b96036da55 100644 --- a/pkg/registry/apps/deployment/strategy.go +++ b/pkg/registry/apps/deployment/strategy.go @@ -46,19 +46,20 @@ type deploymentStrategy struct { // objects via the REST API. var Strategy = deploymentStrategy{legacyscheme.Scheme, names.SimpleNameGenerator} -// DefaultGarbageCollectionPolicy returns OrphanDependents by default. For apps/v1, returns DeleteDependents. +// DefaultGarbageCollectionPolicy returns OrphanDependents for extensions/v1beta1, apps/v1beta1, and apps/v1beta2 for backwards compatibility, +// and DeleteDependents for all other versions. func (deploymentStrategy) DefaultGarbageCollectionPolicy(ctx context.Context) rest.GarbageCollectionPolicy { + var groupVersion schema.GroupVersion if requestInfo, found := genericapirequest.RequestInfoFrom(ctx); found { - groupVersion := schema.GroupVersion{Group: requestInfo.APIGroup, Version: requestInfo.APIVersion} - switch groupVersion { - case extensionsv1beta1.SchemeGroupVersion, appsv1beta1.SchemeGroupVersion, appsv1beta2.SchemeGroupVersion: - // for back compatibility - return rest.OrphanDependents - default: - return rest.DeleteDependents - } + groupVersion = schema.GroupVersion{Group: requestInfo.APIGroup, Version: requestInfo.APIVersion} + } + switch groupVersion { + case extensionsv1beta1.SchemeGroupVersion, appsv1beta1.SchemeGroupVersion, appsv1beta2.SchemeGroupVersion: + // for back compatibility + return rest.OrphanDependents + default: + return rest.DeleteDependents } - return rest.OrphanDependents } // NamespaceScoped is true for deployment. diff --git a/pkg/registry/apps/deployment/strategy_test.go b/pkg/registry/apps/deployment/strategy_test.go index a7a68766b41..3376993605a 100644 --- a/pkg/registry/apps/deployment/strategy_test.go +++ b/pkg/registry/apps/deployment/strategy_test.go @@ -231,7 +231,7 @@ func TestDeploymentDefaultGarbageCollectionPolicy(t *testing.T) { false, }, { - expectedGCPolicy: rest.OrphanDependents, + expectedGCPolicy: rest.DeleteDependents, isNilRequestInfo: true, }, } diff --git a/pkg/registry/apps/replicaset/strategy.go b/pkg/registry/apps/replicaset/strategy.go index 592c6893dea..9e3fe3ecf5b 100644 --- a/pkg/registry/apps/replicaset/strategy.go +++ b/pkg/registry/apps/replicaset/strategy.go @@ -52,19 +52,20 @@ type rsStrategy struct { // Strategy is the default logic that applies when creating and updating ReplicaSet objects. var Strategy = rsStrategy{legacyscheme.Scheme, names.SimpleNameGenerator} -// DefaultGarbageCollectionPolicy returns OrphanDependents by default. For apps/v1, returns DeleteDependents. +// DefaultGarbageCollectionPolicy returns OrphanDependents for extensions/v1beta1 and apps/v1beta2 for backwards compatibility, +// and DeleteDependents for all other versions. func (rsStrategy) DefaultGarbageCollectionPolicy(ctx context.Context) rest.GarbageCollectionPolicy { + var groupVersion schema.GroupVersion if requestInfo, found := genericapirequest.RequestInfoFrom(ctx); found { - groupVersion := schema.GroupVersion{Group: requestInfo.APIGroup, Version: requestInfo.APIVersion} - switch groupVersion { - case extensionsv1beta1.SchemeGroupVersion, appsv1beta2.SchemeGroupVersion: - // for back compatibility - return rest.OrphanDependents - default: - return rest.DeleteDependents - } + groupVersion = schema.GroupVersion{Group: requestInfo.APIGroup, Version: requestInfo.APIVersion} + } + switch groupVersion { + case extensionsv1beta1.SchemeGroupVersion, appsv1beta2.SchemeGroupVersion: + // for back compatibility + return rest.OrphanDependents + default: + return rest.DeleteDependents } - return rest.OrphanDependents } // NamespaceScoped returns true because all ReplicaSets need to be within a namespace. diff --git a/pkg/registry/apps/replicaset/strategy_test.go b/pkg/registry/apps/replicaset/strategy_test.go index 6023fe0aa1c..2c2994af768 100644 --- a/pkg/registry/apps/replicaset/strategy_test.go +++ b/pkg/registry/apps/replicaset/strategy_test.go @@ -266,7 +266,7 @@ func TestReplicasetDefaultGarbageCollectionPolicy(t *testing.T) { false, }, { - expectedGCPolicy: rest.OrphanDependents, + expectedGCPolicy: rest.DeleteDependents, isNilRequestInfo: true, }, } diff --git a/pkg/registry/apps/statefulset/strategy.go b/pkg/registry/apps/statefulset/strategy.go index cbf584933e3..af500f8af46 100644 --- a/pkg/registry/apps/statefulset/strategy.go +++ b/pkg/registry/apps/statefulset/strategy.go @@ -43,19 +43,20 @@ type statefulSetStrategy struct { // Strategy is the default logic that applies when creating and updating Replication StatefulSet objects. var Strategy = statefulSetStrategy{legacyscheme.Scheme, names.SimpleNameGenerator} -// DefaultGarbageCollectionPolicy returns OrphanDependents by default. For apps/v1, returns DeleteDependents. +// DefaultGarbageCollectionPolicy returns OrphanDependents for apps/v1beta1 and apps/v1beta2 for backwards compatibility, +// and DeleteDependents for all other versions. func (statefulSetStrategy) DefaultGarbageCollectionPolicy(ctx context.Context) rest.GarbageCollectionPolicy { + var groupVersion schema.GroupVersion if requestInfo, found := genericapirequest.RequestInfoFrom(ctx); found { - groupVersion := schema.GroupVersion{Group: requestInfo.APIGroup, Version: requestInfo.APIVersion} - switch groupVersion { - case appsv1beta1.SchemeGroupVersion, appsv1beta2.SchemeGroupVersion: - // for back compatibility - return rest.OrphanDependents - default: - return rest.DeleteDependents - } + groupVersion = schema.GroupVersion{Group: requestInfo.APIGroup, Version: requestInfo.APIVersion} + } + switch groupVersion { + case appsv1beta1.SchemeGroupVersion, appsv1beta2.SchemeGroupVersion: + // for back compatibility + return rest.OrphanDependents + default: + return rest.DeleteDependents } - return rest.OrphanDependents } // NamespaceScoped returns true because all StatefulSet' need to be within a namespace. diff --git a/pkg/registry/apps/statefulset/strategy_test.go b/pkg/registry/apps/statefulset/strategy_test.go index aba7b35bc61..4fdab0e9e62 100644 --- a/pkg/registry/apps/statefulset/strategy_test.go +++ b/pkg/registry/apps/statefulset/strategy_test.go @@ -130,7 +130,7 @@ func TestStatefulsetDefaultGarbageCollectionPolicy(t *testing.T) { false, }, { - expectedGCPolicy: rest.OrphanDependents, + expectedGCPolicy: rest.DeleteDependents, isNilRequestInfo: true, }, } diff --git a/pkg/registry/batch/cronjob/BUILD b/pkg/registry/batch/cronjob/BUILD index e9661e46b18..40cc472645c 100644 --- a/pkg/registry/batch/cronjob/BUILD +++ b/pkg/registry/batch/cronjob/BUILD @@ -18,8 +18,12 @@ go_library( "//pkg/api/pod:go_default_library", "//pkg/apis/batch:go_default_library", "//pkg/apis/batch/validation:go_default_library", + "//staging/src/k8s.io/api/batch/v1beta1:go_default_library", + "//staging/src/k8s.io/api/batch/v2alpha1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//staging/src/k8s.io/apiserver/pkg/registry/rest:go_default_library", "//staging/src/k8s.io/apiserver/pkg/storage/names:go_default_library", ], diff --git a/pkg/registry/batch/cronjob/strategy.go b/pkg/registry/batch/cronjob/strategy.go index 1e756d6b15f..a59c2b96e6b 100644 --- a/pkg/registry/batch/cronjob/strategy.go +++ b/pkg/registry/batch/cronjob/strategy.go @@ -19,8 +19,12 @@ package cronjob import ( "context" + batchv1beta1 "k8s.io/api/batch/v1beta1" + batchv2alpha1 "k8s.io/api/batch/v2alpha1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/validation/field" + genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/apiserver/pkg/storage/names" "k8s.io/kubernetes/pkg/api/legacyscheme" @@ -38,10 +42,20 @@ type cronJobStrategy struct { // Strategy is the default logic that applies when creating and updating CronJob objects. var Strategy = cronJobStrategy{legacyscheme.Scheme, names.SimpleNameGenerator} -// DefaultGarbageCollectionPolicy returns Orphan because that was the default -// behavior before the server-side garbage collection was implemented. +// DefaultGarbageCollectionPolicy returns OrphanDependents for batch/v1beta1 and batch/v2alpha1 for backwards compatibility, +// and DeleteDependents for all other versions. func (cronJobStrategy) DefaultGarbageCollectionPolicy(ctx context.Context) rest.GarbageCollectionPolicy { - return rest.OrphanDependents + var groupVersion schema.GroupVersion + if requestInfo, found := genericapirequest.RequestInfoFrom(ctx); found { + groupVersion = schema.GroupVersion{Group: requestInfo.APIGroup, Version: requestInfo.APIVersion} + } + switch groupVersion { + case batchv1beta1.SchemeGroupVersion, batchv2alpha1.SchemeGroupVersion: + // for back compatibility + return rest.OrphanDependents + default: + return rest.DeleteDependents + } } // NamespaceScoped returns true because all scheduled jobs need to be within a namespace. diff --git a/pkg/registry/batch/cronjob/strategy_test.go b/pkg/registry/batch/cronjob/strategy_test.go index 0b46ac9f861..be568d1bdcc 100644 --- a/pkg/registry/batch/cronjob/strategy_test.go +++ b/pkg/registry/batch/cronjob/strategy_test.go @@ -90,7 +90,22 @@ func TestCronJobStrategy(t *testing.T) { // Make sure we correctly implement the interface. // Otherwise a typo could silently change the default. var gcds rest.GarbageCollectionDeleteStrategy = Strategy - if got, want := gcds.DefaultGarbageCollectionPolicy(genericapirequest.NewContext()), rest.OrphanDependents; got != want { + if got, want := gcds.DefaultGarbageCollectionPolicy(genericapirequest.NewContext()), rest.DeleteDependents; got != want { + t.Errorf("DefaultGarbageCollectionPolicy() = %#v, want %#v", got, want) + } + + var ( + v1beta1Ctx = genericapirequest.WithRequestInfo(genericapirequest.NewContext(), &genericapirequest.RequestInfo{APIGroup: "batch", APIVersion: "v1beta1", Resource: "cronjobs"}) + v2alpha1Ctx = genericapirequest.WithRequestInfo(genericapirequest.NewContext(), &genericapirequest.RequestInfo{APIGroup: "batch", APIVersion: "v2alpha1", Resource: "cronjobs"}) + otherVersionCtx = genericapirequest.WithRequestInfo(genericapirequest.NewContext(), &genericapirequest.RequestInfo{APIGroup: "batch", APIVersion: "v100", Resource: "cronjobs"}) + ) + if got, want := gcds.DefaultGarbageCollectionPolicy(v1beta1Ctx), rest.OrphanDependents; got != want { + t.Errorf("DefaultGarbageCollectionPolicy() = %#v, want %#v", got, want) + } + if got, want := gcds.DefaultGarbageCollectionPolicy(v2alpha1Ctx), rest.OrphanDependents; got != want { + t.Errorf("DefaultGarbageCollectionPolicy() = %#v, want %#v", got, want) + } + if got, want := gcds.DefaultGarbageCollectionPolicy(otherVersionCtx), rest.DeleteDependents; got != want { t.Errorf("DefaultGarbageCollectionPolicy() = %#v, want %#v", got, want) } } diff --git a/pkg/registry/batch/job/BUILD b/pkg/registry/batch/job/BUILD index 30474db1f3c..98cfab64cc1 100644 --- a/pkg/registry/batch/job/BUILD +++ b/pkg/registry/batch/job/BUILD @@ -19,11 +19,14 @@ go_library( "//pkg/apis/batch:go_default_library", "//pkg/apis/batch/validation:go_default_library", "//pkg/features:go_default_library", + "//staging/src/k8s.io/api/batch/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//staging/src/k8s.io/apiserver/pkg/registry/generic:go_default_library", "//staging/src/k8s.io/apiserver/pkg/registry/rest:go_default_library", "//staging/src/k8s.io/apiserver/pkg/storage:go_default_library", diff --git a/pkg/registry/batch/job/strategy.go b/pkg/registry/batch/job/strategy.go index 4e46ddc4b37..e1ec1777659 100644 --- a/pkg/registry/batch/job/strategy.go +++ b/pkg/registry/batch/job/strategy.go @@ -21,11 +21,14 @@ import ( "fmt" "strconv" + batchv1 "k8s.io/api/batch/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/validation/field" + genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/generic" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/apiserver/pkg/storage" @@ -47,10 +50,20 @@ type jobStrategy struct { // Strategy is the default logic that applies when creating and updating Replication Controller objects. var Strategy = jobStrategy{legacyscheme.Scheme, names.SimpleNameGenerator} -// DefaultGarbageCollectionPolicy returns Orphan because that was the default -// behavior before the server-side garbage collection was implemented. +// DefaultGarbageCollectionPolicy returns OrphanDependents for batch/v1 for backwards compatibility, +// and DeleteDependents for all other versions. func (jobStrategy) DefaultGarbageCollectionPolicy(ctx context.Context) rest.GarbageCollectionPolicy { - return rest.OrphanDependents + var groupVersion schema.GroupVersion + if requestInfo, found := genericapirequest.RequestInfoFrom(ctx); found { + groupVersion = schema.GroupVersion{Group: requestInfo.APIGroup, Version: requestInfo.APIVersion} + } + switch groupVersion { + case batchv1.SchemeGroupVersion: + // for back compatibility + return rest.OrphanDependents + default: + return rest.DeleteDependents + } } // NamespaceScoped returns true because all jobs need to be within a namespace. diff --git a/pkg/registry/batch/job/strategy_test.go b/pkg/registry/batch/job/strategy_test.go index e3b51678c15..a60af6ab201 100644 --- a/pkg/registry/batch/job/strategy_test.go +++ b/pkg/registry/batch/job/strategy_test.go @@ -124,7 +124,18 @@ func TestJobStrategy(t *testing.T) { // Make sure we correctly implement the interface. // Otherwise a typo could silently change the default. var gcds rest.GarbageCollectionDeleteStrategy = Strategy - if got, want := gcds.DefaultGarbageCollectionPolicy(genericapirequest.NewContext()), rest.OrphanDependents; got != want { + if got, want := gcds.DefaultGarbageCollectionPolicy(genericapirequest.NewContext()), rest.DeleteDependents; got != want { + t.Errorf("DefaultGarbageCollectionPolicy() = %#v, want %#v", got, want) + } + + var ( + v1Ctx = genericapirequest.WithRequestInfo(genericapirequest.NewContext(), &genericapirequest.RequestInfo{APIGroup: "batch", APIVersion: "v1", Resource: "jobs"}) + otherVersionCtx = genericapirequest.WithRequestInfo(genericapirequest.NewContext(), &genericapirequest.RequestInfo{APIGroup: "batch", APIVersion: "v100", Resource: "jobs"}) + ) + if got, want := gcds.DefaultGarbageCollectionPolicy(v1Ctx), rest.OrphanDependents; got != want { + t.Errorf("DefaultGarbageCollectionPolicy() = %#v, want %#v", got, want) + } + if got, want := gcds.DefaultGarbageCollectionPolicy(otherVersionCtx), rest.DeleteDependents; got != want { t.Errorf("DefaultGarbageCollectionPolicy() = %#v, want %#v", got, want) } } diff --git a/pkg/registry/core/node/BUILD b/pkg/registry/core/node/BUILD index e026fe00b6d..0819655b314 100644 --- a/pkg/registry/core/node/BUILD +++ b/pkg/registry/core/node/BUILD @@ -19,6 +19,7 @@ go_library( "//pkg/apis/core/validation:go_default_library", "//pkg/features:go_default_library", "//pkg/kubelet/client:go_default_library", + "//pkg/proxy/util:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", diff --git a/pkg/registry/core/node/strategy.go b/pkg/registry/core/node/strategy.go index 72828b48998..1287f93dee9 100644 --- a/pkg/registry/core/node/strategy.go +++ b/pkg/registry/core/node/strategy.go @@ -40,6 +40,7 @@ import ( "k8s.io/kubernetes/pkg/apis/core/validation" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubelet/client" + proxyutil "k8s.io/kubernetes/pkg/proxy/util" ) // nodeStrategy implements behavior for nodes @@ -217,6 +218,10 @@ func ResourceLocation(getter ResourceGetter, connection client.ConnectionInfoGet nil } + if err := proxyutil.IsProxyableHostname(ctx, &net.Resolver{}, info.Hostname); err != nil { + return nil, nil, errors.NewBadRequest(err.Error()) + } + // Otherwise, return the requested scheme and port, and the proxy transport return &url.URL{Scheme: schemeReq, Host: net.JoinHostPort(info.Hostname, portReq)}, proxyTransport, nil } diff --git a/pkg/registry/core/pod/BUILD b/pkg/registry/core/pod/BUILD index e7386dfa1ac..ddf8fcb5d15 100644 --- a/pkg/registry/core/pod/BUILD +++ b/pkg/registry/core/pod/BUILD @@ -20,6 +20,7 @@ go_library( "//pkg/apis/core/helper/qos:go_default_library", "//pkg/apis/core/validation:go_default_library", "//pkg/kubelet/client:go_default_library", + "//pkg/proxy/util:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/registry/core/pod/strategy.go b/pkg/registry/core/pod/strategy.go index 69e77c5a612..4c2ab6becb2 100644 --- a/pkg/registry/core/pod/strategy.go +++ b/pkg/registry/core/pod/strategy.go @@ -47,6 +47,7 @@ import ( "k8s.io/kubernetes/pkg/apis/core/helper/qos" "k8s.io/kubernetes/pkg/apis/core/validation" "k8s.io/kubernetes/pkg/kubelet/client" + proxyutil "k8s.io/kubernetes/pkg/proxy/util" ) // podStrategy implements behavior for Pods @@ -290,6 +291,10 @@ func ResourceLocation(getter ResourceGetter, rt http.RoundTripper, ctx context.C } } + if err := proxyutil.IsProxyableIP(pod.Status.PodIP); err != nil { + return nil, nil, errors.NewBadRequest(err.Error()) + } + loc := &url.URL{ Scheme: scheme, } diff --git a/pkg/registry/core/replicationcontroller/BUILD b/pkg/registry/core/replicationcontroller/BUILD index 2f7a63edf4b..cfc09d1b4d9 100644 --- a/pkg/registry/core/replicationcontroller/BUILD +++ b/pkg/registry/core/replicationcontroller/BUILD @@ -19,11 +19,14 @@ go_library( "//pkg/apis/core:go_default_library", "//pkg/apis/core/helper:go_default_library", "//pkg/apis/core/validation:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//staging/src/k8s.io/apiserver/pkg/registry/generic:go_default_library", "//staging/src/k8s.io/apiserver/pkg/registry/rest:go_default_library", "//staging/src/k8s.io/apiserver/pkg/storage:go_default_library", diff --git a/pkg/registry/core/replicationcontroller/strategy.go b/pkg/registry/core/replicationcontroller/strategy.go index 0cc784bc9fd..0909efc6e7d 100644 --- a/pkg/registry/core/replicationcontroller/strategy.go +++ b/pkg/registry/core/replicationcontroller/strategy.go @@ -24,11 +24,14 @@ import ( "strconv" "strings" + corev1 "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/validation/field" + genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/generic" "k8s.io/apiserver/pkg/registry/rest" apistorage "k8s.io/apiserver/pkg/storage" @@ -49,10 +52,20 @@ type rcStrategy struct { // Strategy is the default logic that applies when creating and updating Replication Controller objects. var Strategy = rcStrategy{legacyscheme.Scheme, names.SimpleNameGenerator} -// DefaultGarbageCollectionPolicy returns Orphan because that was the default -// behavior before the server-side garbage collection was implemented. +// DefaultGarbageCollectionPolicy returns OrphanDependents for v1 for backwards compatibility, +// and DeleteDependents for all other versions. func (rcStrategy) DefaultGarbageCollectionPolicy(ctx context.Context) rest.GarbageCollectionPolicy { - return rest.OrphanDependents + var groupVersion schema.GroupVersion + if requestInfo, found := genericapirequest.RequestInfoFrom(ctx); found { + groupVersion = schema.GroupVersion{Group: requestInfo.APIGroup, Version: requestInfo.APIVersion} + } + switch groupVersion { + case corev1.SchemeGroupVersion: + // for back compatibility + return rest.OrphanDependents + default: + return rest.DeleteDependents + } } // NamespaceScoped returns true because all Replication Controllers need to be within a namespace. diff --git a/pkg/registry/storage/volumeattachment/BUILD b/pkg/registry/storage/volumeattachment/BUILD index 7cb2a4bb38d..6156e767673 100644 --- a/pkg/registry/storage/volumeattachment/BUILD +++ b/pkg/registry/storage/volumeattachment/BUILD @@ -13,6 +13,7 @@ go_library( "//pkg/apis/storage:go_default_library", "//pkg/apis/storage/validation:go_default_library", "//staging/src/k8s.io/api/storage/v1beta1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", diff --git a/pkg/registry/storage/volumeattachment/storage/storage.go b/pkg/registry/storage/volumeattachment/storage/storage.go index df8c470c895..3727e98f54f 100644 --- a/pkg/registry/storage/volumeattachment/storage/storage.go +++ b/pkg/registry/storage/volumeattachment/storage/storage.go @@ -70,6 +70,8 @@ type StatusREST struct { store *genericregistry.Store } +var _ = rest.Patcher(&StatusREST{}) + // New creates a new VolumeAttachment resource func (r *StatusREST) New() runtime.Object { return &storageapi.VolumeAttachment{} diff --git a/pkg/registry/storage/volumeattachment/strategy.go b/pkg/registry/storage/volumeattachment/strategy.go index 6d37f695c44..1b3a56c51aa 100644 --- a/pkg/registry/storage/volumeattachment/strategy.go +++ b/pkg/registry/storage/volumeattachment/strategy.go @@ -20,6 +20,7 @@ import ( "context" storageapiv1beta1 "k8s.io/api/storage/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/validation/field" @@ -134,14 +135,5 @@ func (volumeAttachmentStatusStrategy) PrepareForUpdate(ctx context.Context, obj, oldVolumeAttachment := old.(*storage.VolumeAttachment) newVolumeAttachment.Spec = oldVolumeAttachment.Spec - - oldMeta := oldVolumeAttachment.ObjectMeta - newMeta := &newVolumeAttachment.ObjectMeta - newMeta.SetDeletionTimestamp(oldMeta.GetDeletionTimestamp()) - newMeta.SetGeneration(oldMeta.GetGeneration()) - newMeta.SetSelfLink(oldMeta.GetSelfLink()) - newMeta.SetLabels(oldMeta.GetLabels()) - newMeta.SetAnnotations(oldMeta.GetAnnotations()) - newMeta.SetFinalizers(oldMeta.GetFinalizers()) - newMeta.SetOwnerReferences(oldMeta.GetOwnerReferences()) + metav1.ResetObjectMetaForStatus(&newVolumeAttachment.ObjectMeta, &oldVolumeAttachment.ObjectMeta) } diff --git a/pkg/registry/storage/volumeattachment/strategy_test.go b/pkg/registry/storage/volumeattachment/strategy_test.go index e2ec6b4b708..eadde7ccc1c 100644 --- a/pkg/registry/storage/volumeattachment/strategy_test.go +++ b/pkg/registry/storage/volumeattachment/strategy_test.go @@ -113,9 +113,18 @@ func TestVolumeAttachmentStatusStrategy(t *testing.T) { t.Errorf("unexpected objects differerence after modifying status: %v", diff.ObjectDiff(statusVolumeAttachment, expectedVolumeAttachment)) } - // modifying spec should be dropped + // spec and metadata modifications should be dropped newVolumeAttachment := volumeAttachment.DeepCopy() newVolumeAttachment.Spec.NodeName = "valid-node-2" + newVolumeAttachment.Labels = map[string]string{"foo": "bar"} + newVolumeAttachment.Annotations = map[string]string{"foo": "baz"} + newVolumeAttachment.OwnerReferences = []metav1.OwnerReference{ + { + APIVersion: "v1", + Kind: "Pod", + Name: "Foo", + }, + } StatusStrategy.PrepareForUpdate(ctx, newVolumeAttachment, volumeAttachment) if !apiequality.Semantic.DeepEqual(newVolumeAttachment, volumeAttachment) { diff --git a/pkg/scheduler/BUILD b/pkg/scheduler/BUILD index c3dc753d20d..e12ee333630 100644 --- a/pkg/scheduler/BUILD +++ b/pkg/scheduler/BUILD @@ -20,6 +20,7 @@ go_library( "//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/internal/queue:go_default_library", "//pkg/scheduler/metrics:go_default_library", + "//pkg/scheduler/plugins/v1alpha1:go_default_library", "//pkg/scheduler/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -40,10 +41,7 @@ go_library( go_test( name = "go_default_test", - srcs = [ - "main_test.go", - "scheduler_test.go", - ], + srcs = ["scheduler_test.go"], embed = [":go_default_library"], deps = [ "//pkg/api/legacyscheme:go_default_library", @@ -53,11 +51,11 @@ go_test( "//pkg/scheduler/algorithm/predicates:go_default_library", "//pkg/scheduler/api:go_default_library", "//pkg/scheduler/apis/config:go_default_library", - "//pkg/scheduler/cache:go_default_library", "//pkg/scheduler/core:go_default_library", "//pkg/scheduler/factory:go_default_library", "//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/internal/cache/fake:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/volumebinder:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", @@ -93,12 +91,13 @@ filegroup( "//pkg/scheduler/algorithmprovider:all-srcs", "//pkg/scheduler/api:all-srcs", "//pkg/scheduler/apis/config:all-srcs", - "//pkg/scheduler/cache:all-srcs", "//pkg/scheduler/core:all-srcs", "//pkg/scheduler/factory:all-srcs", "//pkg/scheduler/internal/cache:all-srcs", "//pkg/scheduler/internal/queue:all-srcs", "//pkg/scheduler/metrics:all-srcs", + "//pkg/scheduler/nodeinfo:all-srcs", + "//pkg/scheduler/plugins:all-srcs", "//pkg/scheduler/testing:all-srcs", "//pkg/scheduler/util:all-srcs", "//pkg/scheduler/volumebinder:all-srcs", diff --git a/pkg/scheduler/algorithm/BUILD b/pkg/scheduler/algorithm/BUILD index 0b19967bafa..4588914f120 100644 --- a/pkg/scheduler/algorithm/BUILD +++ b/pkg/scheduler/algorithm/BUILD @@ -16,8 +16,8 @@ go_library( importpath = "k8s.io/kubernetes/pkg/scheduler/algorithm", deps = [ "//pkg/scheduler/api:go_default_library", - "//pkg/scheduler/cache:go_default_library", "//pkg/scheduler/internal/cache:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/policy/v1beta1:go_default_library", @@ -30,7 +30,7 @@ go_test( srcs = ["types_test.go"], embed = [":go_default_library"], deps = [ - "//pkg/scheduler/cache:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", ], diff --git a/pkg/scheduler/algorithm/predicates/BUILD b/pkg/scheduler/algorithm/predicates/BUILD index 58ee7b487cc..88e8a3d63e9 100644 --- a/pkg/scheduler/algorithm/predicates/BUILD +++ b/pkg/scheduler/algorithm/predicates/BUILD @@ -25,7 +25,7 @@ go_library( "//pkg/scheduler/algorithm:go_default_library", "//pkg/scheduler/algorithm/priorities/util:go_default_library", "//pkg/scheduler/api:go_default_library", - "//pkg/scheduler/cache:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/util:go_default_library", "//pkg/scheduler/volumebinder:go_default_library", "//pkg/volume/util:go_default_library", @@ -49,7 +49,6 @@ go_test( name = "go_default_test", srcs = [ "csi_volume_predicate_test.go", - "main_test.go", "max_attachable_volume_predicate_test.go", "metadata_test.go", "predicates_test.go", @@ -62,7 +61,7 @@ go_test( "//pkg/kubelet/apis:go_default_library", "//pkg/scheduler/algorithm:go_default_library", "//pkg/scheduler/api:go_default_library", - "//pkg/scheduler/cache:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/testing:go_default_library", "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", diff --git a/pkg/scheduler/algorithm/predicates/csi_volume_predicate.go b/pkg/scheduler/algorithm/predicates/csi_volume_predicate.go index ff2215eb28d..86e30b305e3 100644 --- a/pkg/scheduler/algorithm/predicates/csi_volume_predicate.go +++ b/pkg/scheduler/algorithm/predicates/csi_volume_predicate.go @@ -24,7 +24,7 @@ import ( "k8s.io/klog" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/scheduler/algorithm" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" volumeutil "k8s.io/kubernetes/pkg/volume/util" ) @@ -45,7 +45,7 @@ func NewCSIMaxVolumeLimitPredicate( } func (c *CSIMaxVolumeLimitChecker) attachableLimitPredicate( - pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { + pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { // if feature gate is disable we return if !utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) { diff --git a/pkg/scheduler/algorithm/predicates/main_test.go b/pkg/scheduler/algorithm/predicates/main_test.go deleted file mode 100644 index 11bc537373b..00000000000 --- a/pkg/scheduler/algorithm/predicates/main_test.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package predicates - -import ( - "testing" - - utilfeature "k8s.io/apiserver/pkg/util/feature" - utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" - _ "k8s.io/kubernetes/pkg/features" -) - -func TestMain(m *testing.M) { - utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run) -} diff --git a/pkg/scheduler/algorithm/predicates/max_attachable_volume_predicate_test.go b/pkg/scheduler/algorithm/predicates/max_attachable_volume_predicate_test.go index 11434273aa7..708c50ea6cf 100644 --- a/pkg/scheduler/algorithm/predicates/max_attachable_volume_predicate_test.go +++ b/pkg/scheduler/algorithm/predicates/max_attachable_volume_predicate_test.go @@ -31,7 +31,7 @@ import ( "k8s.io/kubernetes/pkg/features" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/scheduler/algorithm" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" volumeutil "k8s.io/kubernetes/pkg/volume/util" ) @@ -748,7 +748,7 @@ func TestVolumeCountConflicts(t *testing.T) { for _, test := range tests { os.Setenv(KubeMaxPDVols, strconv.Itoa(test.maxVols)) pred := NewMaxPDVolumeCountPredicate(test.filterName, getFakePVInfo(test.filterName), getFakePVCInfo(test.filterName)) - fits, reasons, err := pred(test.newPod, PredicateMetadata(test.newPod, nil), schedulercache.NewNodeInfo(test.existingPods...)) + fits, reasons, err := pred(test.newPod, PredicateMetadata(test.newPod, nil), schedulernodeinfo.NewNodeInfo(test.existingPods...)) if err != nil { t.Errorf("[%s]%s: unexpected error: %v", test.filterName, test.test, err) } @@ -895,8 +895,8 @@ func TestMaxVolumeFuncM4(t *testing.T) { } } -func getNodeWithPodAndVolumeLimits(pods []*v1.Pod, limit int64, filter string) *schedulercache.NodeInfo { - nodeInfo := schedulercache.NewNodeInfo(pods...) +func getNodeWithPodAndVolumeLimits(pods []*v1.Pod, limit int64, filter string) *schedulernodeinfo.NodeInfo { + nodeInfo := schedulernodeinfo.NewNodeInfo(pods...) node := &v1.Node{ ObjectMeta: metav1.ObjectMeta{Name: "node-for-max-pd-test-1"}, Status: v1.NodeStatus{ diff --git a/pkg/scheduler/algorithm/predicates/metadata.go b/pkg/scheduler/algorithm/predicates/metadata.go index 9284cda2381..dc317572903 100644 --- a/pkg/scheduler/algorithm/predicates/metadata.go +++ b/pkg/scheduler/algorithm/predicates/metadata.go @@ -30,7 +30,7 @@ import ( "k8s.io/client-go/util/workqueue" "k8s.io/kubernetes/pkg/scheduler/algorithm" priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedutil "k8s.io/kubernetes/pkg/scheduler/util" ) @@ -68,7 +68,7 @@ type topologyPairsMaps struct { type predicateMetadata struct { pod *v1.Pod podBestEffort bool - podRequest *schedulercache.Resource + podRequest *schedulernodeinfo.Resource podPorts []*v1.ContainerPort topologyPairsAntiAffinityPodsMap *topologyPairsMaps @@ -126,7 +126,7 @@ func NewPredicateMetadataFactory(podLister algorithm.PodLister) algorithm.Predic } // GetMetadata returns the predicateMetadata used which will be used by various predicates. -func (pfactory *PredicateMetadataFactory) GetMetadata(pod *v1.Pod, nodeNameToInfoMap map[string]*schedulercache.NodeInfo) algorithm.PredicateMetadata { +func (pfactory *PredicateMetadataFactory) GetMetadata(pod *v1.Pod, nodeNameToInfoMap map[string]*schedulernodeinfo.NodeInfo) algorithm.PredicateMetadata { // If we cannot compute metadata, just return nil if pod == nil { return nil @@ -230,7 +230,7 @@ func (meta *predicateMetadata) RemovePod(deletedPod *v1.Pod) error { // AddPod changes predicateMetadata assuming that `newPod` is added to the // system. -func (meta *predicateMetadata) AddPod(addedPod *v1.Pod, nodeInfo *schedulercache.NodeInfo) error { +func (meta *predicateMetadata) AddPod(addedPod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) error { addedPodFullName := schedutil.GetPodFullName(addedPod) if addedPodFullName == schedutil.GetPodFullName(meta.pod) { return fmt.Errorf("addedPod and meta.pod must not be the same") @@ -359,7 +359,7 @@ func podMatchesAnyAffinityTermProperties(pod *v1.Pod, properties []*affinityTerm // getTPMapMatchingExistingAntiAffinity calculates the following for each existing pod on each node: // (1) Whether it has PodAntiAffinity // (2) Whether any AffinityTerm matches the incoming pod -func getTPMapMatchingExistingAntiAffinity(pod *v1.Pod, nodeInfoMap map[string]*schedulercache.NodeInfo) (*topologyPairsMaps, error) { +func getTPMapMatchingExistingAntiAffinity(pod *v1.Pod, nodeInfoMap map[string]*schedulernodeinfo.NodeInfo) (*topologyPairsMaps, error) { allNodeNames := make([]string, 0, len(nodeInfoMap)) for name := range nodeInfoMap { allNodeNames = append(allNodeNames, name) @@ -407,7 +407,7 @@ func getTPMapMatchingExistingAntiAffinity(pod *v1.Pod, nodeInfoMap map[string]*s // It returns a topologyPairsMaps that are checked later by the affinity // predicate. With this topologyPairsMaps available, the affinity predicate does not // need to check all the pods in the cluster. -func getTPMapMatchingIncomingAffinityAntiAffinity(pod *v1.Pod, nodeInfoMap map[string]*schedulercache.NodeInfo) (topologyPairsAffinityPodsMaps *topologyPairsMaps, topologyPairsAntiAffinityPodsMaps *topologyPairsMaps, err error) { +func getTPMapMatchingIncomingAffinityAntiAffinity(pod *v1.Pod, nodeInfoMap map[string]*schedulernodeinfo.NodeInfo) (topologyPairsAffinityPodsMaps *topologyPairsMaps, topologyPairsAntiAffinityPodsMaps *topologyPairsMaps, err error) { affinity := pod.Spec.Affinity if affinity == nil || (affinity.PodAffinity == nil && affinity.PodAntiAffinity == nil) { return newTopologyPairsMaps(), newTopologyPairsMaps(), nil diff --git a/pkg/scheduler/algorithm/predicates/metadata_test.go b/pkg/scheduler/algorithm/predicates/metadata_test.go index 7fb1e1f3115..3ab656dc30a 100644 --- a/pkg/scheduler/algorithm/predicates/metadata_test.go +++ b/pkg/scheduler/algorithm/predicates/metadata_test.go @@ -24,7 +24,7 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing" ) @@ -354,8 +354,8 @@ func TestPredicateMetadata_AddRemovePod(t *testing.T) { t.Run(test.name, func(t *testing.T) { allPodLister := schedulertesting.FakePodLister(append(test.existingPods, test.addedPod)) // getMeta creates predicate meta data given the list of pods. - getMeta := func(lister schedulertesting.FakePodLister) (*predicateMetadata, map[string]*schedulercache.NodeInfo) { - nodeInfoMap := schedulercache.CreateNodeNameToInfoMap(lister, test.nodes) + getMeta := func(lister schedulertesting.FakePodLister) (*predicateMetadata, map[string]*schedulernodeinfo.NodeInfo) { + nodeInfoMap := schedulernodeinfo.CreateNodeNameToInfoMap(lister, test.nodes) // nodeList is a list of non-pointer nodes to feed to FakeNodeListInfo. nodeList := []v1.Node{} for _, n := range test.nodes { @@ -407,7 +407,7 @@ func TestPredicateMetadata_ShallowCopy(t *testing.T) { }, }, podBestEffort: true, - podRequest: &schedulercache.Resource{ + podRequest: &schedulernodeinfo.Resource{ MilliCPU: 1000, Memory: 300, AllowedPodNumber: 4, @@ -775,7 +775,7 @@ func TestGetTPMapMatchingIncomingAffinityAntiAffinity(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - nodeInfoMap := schedulercache.CreateNodeNameToInfoMap(tt.existingPods, tt.nodes) + nodeInfoMap := schedulernodeinfo.CreateNodeNameToInfoMap(tt.existingPods, tt.nodes) gotAffinityPodsMaps, gotAntiAffinityPodsMaps, err := getTPMapMatchingIncomingAffinityAntiAffinity(tt.pod, nodeInfoMap) if (err != nil) != tt.wantErr { diff --git a/pkg/scheduler/algorithm/predicates/predicates.go b/pkg/scheduler/algorithm/predicates/predicates.go index 7594c76405d..3e3bd5010e7 100644 --- a/pkg/scheduler/algorithm/predicates/predicates.go +++ b/pkg/scheduler/algorithm/predicates/predicates.go @@ -43,7 +43,7 @@ import ( "k8s.io/kubernetes/pkg/scheduler/algorithm" priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedutil "k8s.io/kubernetes/pkg/scheduler/util" "k8s.io/kubernetes/pkg/scheduler/volumebinder" volumeutil "k8s.io/kubernetes/pkg/volume/util" @@ -274,7 +274,7 @@ func isVolumeConflict(volume v1.Volume, pod *v1.Pod) bool { // - Ceph RBD forbids if any two pods share at least same monitor, and match pool and image. // - ISCSI forbids if any two pods share at least same IQN, LUN and Target // TODO: migrate this into some per-volume specific code? -func NoDiskConflict(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func NoDiskConflict(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { for _, v := range pod.Spec.Volumes { for _, ev := range nodeInfo.Pods() { if isVolumeConflict(v, ev) { @@ -447,7 +447,7 @@ func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []v1.Volume, namespace s return nil } -func (c *MaxPDVolumeCountChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func (c *MaxPDVolumeCountChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { // If a pod doesn't have any volume attached to it, the predicate will always be true. // Thus we make a fast path for it, to avoid unnecessary computations in this case. if len(pod.Spec.Volumes) == 0 { @@ -584,7 +584,7 @@ func NewVolumeZonePredicate(pvInfo PersistentVolumeInfo, pvcInfo PersistentVolum return c.predicate } -func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { // If a pod doesn't have any volume attached to it, the predicate will always be true. // Thus we make a fast path for it, to avoid unnecessary computations in this case. if len(pod.Spec.Volumes) == 0 { @@ -680,7 +680,7 @@ func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMetad return true, nil, nil } -// GetResourceRequest returns a *schedulercache.Resource that covers the largest +// GetResourceRequest returns a *schedulernodeinfo.Resource that covers the largest // width in each resource dimension. Because init-containers run sequentially, we collect // the max in each dimension iteratively. In contrast, we sum the resource vectors for // regular containers since they run simultaneously. @@ -704,8 +704,8 @@ func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMetad // Memory: 1G // // Result: CPU: 3, Memory: 3G -func GetResourceRequest(pod *v1.Pod) *schedulercache.Resource { - result := &schedulercache.Resource{} +func GetResourceRequest(pod *v1.Pod) *schedulernodeinfo.Resource { + result := &schedulernodeinfo.Resource{} for _, container := range pod.Spec.Containers { result.Add(container.Resources.Requests) } @@ -725,7 +725,7 @@ func podName(pod *v1.Pod) string { // PodFitsResources checks if a node has sufficient resources, such as cpu, memory, gpu, opaque int resources etc to run a pod. // First return value indicates whether a node has sufficient resources to run a pod while the second return value indicates the // predicate failure reasons if the node has insufficient resources to run the pod. -func PodFitsResources(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func PodFitsResources(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") @@ -740,7 +740,7 @@ func PodFitsResources(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *s // No extended resources should be ignored by default. ignoredExtendedResources := sets.NewString() - var podRequest *schedulercache.Resource + var podRequest *schedulernodeinfo.Resource if predicateMeta, ok := meta.(*predicateMetadata); ok { podRequest = predicateMeta.podRequest if predicateMeta.ignoredExtendedResources != nil { @@ -850,7 +850,7 @@ func podMatchesNodeSelectorAndAffinityTerms(pod *v1.Pod, node *v1.Node) bool { } // PodMatchNodeSelector checks if a pod node selector matches the node label. -func PodMatchNodeSelector(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func PodMatchNodeSelector(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") @@ -862,7 +862,7 @@ func PodMatchNodeSelector(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInf } // PodFitsHost checks if a pod spec node name matches the current node. -func PodFitsHost(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func PodFitsHost(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { if len(pod.Spec.NodeName) == 0 { return true, nil, nil } @@ -904,7 +904,7 @@ func NewNodeLabelPredicate(labels []string, presence bool) algorithm.FitPredicat // Alternately, eliminating nodes that have a certain label, regardless of value, is also useful // A node may have a label with "retiring" as key and the date as the value // and it may be desirable to avoid scheduling new pods on this node -func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") @@ -989,7 +989,7 @@ func NewServiceAffinityPredicate(podLister algorithm.PodLister, serviceLister al // // WARNING: This Predicate is NOT guaranteed to work if some of the predicateMetadata data isn't precomputed... // For that reason it is not exported, i.e. it is highly coupled to the implementation of the FitPredicate construction. -func (s *ServiceAffinity) checkServiceAffinity(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func (s *ServiceAffinity) checkServiceAffinity(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { var services []*v1.Service var pods []*v1.Pod if pm, ok := meta.(*predicateMetadata); ok && (pm.serviceAffinityMatchingPodList != nil || pm.serviceAffinityMatchingPodServices != nil) { @@ -1028,7 +1028,7 @@ func (s *ServiceAffinity) checkServiceAffinity(pod *v1.Pod, meta algorithm.Predi } // PodFitsHostPorts checks if a node has free ports for the requested pod ports. -func PodFitsHostPorts(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func PodFitsHostPorts(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { var wantPorts []*v1.ContainerPort if predicateMeta, ok := meta.(*predicateMetadata); ok { wantPorts = predicateMeta.podPorts @@ -1068,7 +1068,7 @@ func haveOverlap(a1, a2 []string) bool { // GeneralPredicates checks whether noncriticalPredicates and EssentialPredicates pass. noncriticalPredicates are the predicates // that only non-critical pods need and EssentialPredicates are the predicates that all pods, including critical pods, need -func GeneralPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func GeneralPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { var predicateFails []algorithm.PredicateFailureReason fit, reasons, err := noncriticalPredicates(pod, meta, nodeInfo) if err != nil { @@ -1090,7 +1090,7 @@ func GeneralPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo * } // noncriticalPredicates are the predicates that only non-critical pods need -func noncriticalPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func noncriticalPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { var predicateFails []algorithm.PredicateFailureReason fit, reasons, err := PodFitsResources(pod, meta, nodeInfo) if err != nil { @@ -1104,7 +1104,7 @@ func noncriticalPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeIn } // EssentialPredicates are the predicates that all pods, including critical pods, need -func EssentialPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func EssentialPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { var predicateFails []algorithm.PredicateFailureReason fit, reasons, err := PodFitsHost(pod, meta, nodeInfo) if err != nil { @@ -1152,7 +1152,7 @@ func NewPodAffinityPredicate(info NodeInfo, podLister algorithm.PodLister) algor // InterPodAffinityMatches checks if a pod can be scheduled on the specified node with pod affinity/anti-affinity configuration. // First return value indicates whether a pod can be scheduled on the specified node while the second return value indicates the // predicate failure reasons if the pod cannot be scheduled on the specified node. -func (c *PodAffinityChecker) InterPodAffinityMatches(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func (c *PodAffinityChecker) InterPodAffinityMatches(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") @@ -1186,7 +1186,7 @@ func (c *PodAffinityChecker) InterPodAffinityMatches(pod *v1.Pod, meta algorithm // targetPod matches all the terms and their topologies, 2) whether targetPod // matches all the terms label selector and namespaces (AKA term properties), // 3) any error. -func (c *PodAffinityChecker) podMatchesPodAffinityTerms(pod, targetPod *v1.Pod, nodeInfo *schedulercache.NodeInfo, terms []v1.PodAffinityTerm) (bool, bool, error) { +func (c *PodAffinityChecker) podMatchesPodAffinityTerms(pod, targetPod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo, terms []v1.PodAffinityTerm) (bool, bool, error) { if len(terms) == 0 { return false, false, fmt.Errorf("terms array is empty") } @@ -1290,7 +1290,7 @@ func (c *PodAffinityChecker) getMatchingAntiAffinityTopologyPairsOfPods(pod *v1. // Checks if scheduling the pod onto this node would break any anti-affinity // terms indicated by the existing pods. -func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (algorithm.PredicateFailureReason, error) { +func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (algorithm.PredicateFailureReason, error) { node := nodeInfo.Node() if node == nil { return ErrExistingPodsAntiAffinityRulesNotMatch, fmt.Errorf("Node is nil") @@ -1333,7 +1333,7 @@ func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta // nodeMatchesAllTopologyTerms checks whether "nodeInfo" matches // topology of all the "terms" for the given "pod". -func (c *PodAffinityChecker) nodeMatchesAllTopologyTerms(pod *v1.Pod, topologyPairs *topologyPairsMaps, nodeInfo *schedulercache.NodeInfo, terms []v1.PodAffinityTerm) bool { +func (c *PodAffinityChecker) nodeMatchesAllTopologyTerms(pod *v1.Pod, topologyPairs *topologyPairsMaps, nodeInfo *schedulernodeinfo.NodeInfo, terms []v1.PodAffinityTerm) bool { node := nodeInfo.Node() for _, term := range terms { if topologyValue, ok := node.Labels[term.TopologyKey]; ok { @@ -1350,7 +1350,7 @@ func (c *PodAffinityChecker) nodeMatchesAllTopologyTerms(pod *v1.Pod, topologyPa // nodeMatchesAnyTopologyTerm checks whether "nodeInfo" matches // topology of any "term" for the given "pod". -func (c *PodAffinityChecker) nodeMatchesAnyTopologyTerm(pod *v1.Pod, topologyPairs *topologyPairsMaps, nodeInfo *schedulercache.NodeInfo, terms []v1.PodAffinityTerm) bool { +func (c *PodAffinityChecker) nodeMatchesAnyTopologyTerm(pod *v1.Pod, topologyPairs *topologyPairsMaps, nodeInfo *schedulernodeinfo.NodeInfo, terms []v1.PodAffinityTerm) bool { node := nodeInfo.Node() for _, term := range terms { if topologyValue, ok := node.Labels[term.TopologyKey]; ok { @@ -1365,7 +1365,7 @@ func (c *PodAffinityChecker) nodeMatchesAnyTopologyTerm(pod *v1.Pod, topologyPai // Checks if scheduling the pod onto this node would break any term of this pod. func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, - meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo, + meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo, affinity *v1.Affinity) (algorithm.PredicateFailureReason, error) { node := nodeInfo.Node() if node == nil { @@ -1466,7 +1466,7 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, } // CheckNodeUnschedulablePredicate checks if a pod can be scheduled on a node with Unschedulable spec. -func CheckNodeUnschedulablePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func CheckNodeUnschedulablePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { if nodeInfo == nil || nodeInfo.Node() == nil { return false, []algorithm.PredicateFailureReason{ErrNodeUnknownCondition}, nil } @@ -1486,7 +1486,7 @@ func CheckNodeUnschedulablePredicate(pod *v1.Pod, meta algorithm.PredicateMetada } // PodToleratesNodeTaints checks if a pod tolerations can tolerate the node taints -func PodToleratesNodeTaints(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func PodToleratesNodeTaints(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { if nodeInfo == nil || nodeInfo.Node() == nil { return false, []algorithm.PredicateFailureReason{ErrNodeUnknownCondition}, nil } @@ -1498,13 +1498,13 @@ func PodToleratesNodeTaints(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeI } // PodToleratesNodeNoExecuteTaints checks if a pod tolerations can tolerate the node's NoExecute taints -func PodToleratesNodeNoExecuteTaints(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func PodToleratesNodeNoExecuteTaints(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { return podToleratesNodeTaints(pod, nodeInfo, func(t *v1.Taint) bool { return t.Effect == v1.TaintEffectNoExecute }) } -func podToleratesNodeTaints(pod *v1.Pod, nodeInfo *schedulercache.NodeInfo, filter func(t *v1.Taint) bool) (bool, []algorithm.PredicateFailureReason, error) { +func podToleratesNodeTaints(pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo, filter func(t *v1.Taint) bool) (bool, []algorithm.PredicateFailureReason, error) { taints, err := nodeInfo.Taints() if err != nil { return false, nil, err @@ -1523,7 +1523,7 @@ func isPodBestEffort(pod *v1.Pod) bool { // CheckNodeMemoryPressurePredicate checks if a pod can be scheduled on a node // reporting memory pressure condition. -func CheckNodeMemoryPressurePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func CheckNodeMemoryPressurePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { var podBestEffort bool if predicateMeta, ok := meta.(*predicateMetadata); ok { podBestEffort = predicateMeta.podBestEffort @@ -1545,7 +1545,7 @@ func CheckNodeMemoryPressurePredicate(pod *v1.Pod, meta algorithm.PredicateMetad // CheckNodeDiskPressurePredicate checks if a pod can be scheduled on a node // reporting disk pressure condition. -func CheckNodeDiskPressurePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func CheckNodeDiskPressurePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { // check if node is under disk pressure if nodeInfo.DiskPressureCondition() == v1.ConditionTrue { return false, []algorithm.PredicateFailureReason{ErrNodeUnderDiskPressure}, nil @@ -1555,7 +1555,7 @@ func CheckNodeDiskPressurePredicate(pod *v1.Pod, meta algorithm.PredicateMetadat // CheckNodePIDPressurePredicate checks if a pod can be scheduled on a node // reporting pid pressure condition. -func CheckNodePIDPressurePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func CheckNodePIDPressurePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { // check if node is under pid pressure if nodeInfo.PIDPressureCondition() == v1.ConditionTrue { return false, []algorithm.PredicateFailureReason{ErrNodeUnderPIDPressure}, nil @@ -1565,7 +1565,7 @@ func CheckNodePIDPressurePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata // CheckNodeConditionPredicate checks if a pod can be scheduled on a node reporting out of disk, // network unavailable and not ready condition. Only node conditions are accounted in this predicate. -func CheckNodeConditionPredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func CheckNodeConditionPredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { reasons := []algorithm.PredicateFailureReason{} if nodeInfo == nil || nodeInfo.Node() == nil { @@ -1617,7 +1617,7 @@ func NewVolumeBindingPredicate(binder *volumebinder.VolumeBinder) algorithm.FitP return c.predicate } -func (c *VolumeBindingChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func (c *VolumeBindingChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { if !utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) { return true, nil, nil } diff --git a/pkg/scheduler/algorithm/predicates/predicates_test.go b/pkg/scheduler/algorithm/predicates/predicates_test.go index 5a752901060..39f6e83dc76 100644 --- a/pkg/scheduler/algorithm/predicates/predicates_test.go +++ b/pkg/scheduler/algorithm/predicates/predicates_test.go @@ -36,7 +36,7 @@ import ( kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/scheduler/algorithm" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing" ) @@ -72,7 +72,7 @@ func makeAllocatableResources(milliCPU, memory, pods, extendedA, storage, hugePa } } -func newResourcePod(usage ...schedulercache.Resource) *v1.Pod { +func newResourcePod(usage ...schedulernodeinfo.Resource) *v1.Pod { containers := []v1.Container{} for _, req := range usage { containers = append(containers, v1.Container{ @@ -86,12 +86,12 @@ func newResourcePod(usage ...schedulercache.Resource) *v1.Pod { } } -func newResourceInitPod(pod *v1.Pod, usage ...schedulercache.Resource) *v1.Pod { +func newResourceInitPod(pod *v1.Pod, usage ...schedulernodeinfo.Resource) *v1.Pod { pod.Spec.InitContainers = newResourcePod(usage...).Spec.Containers return pod } -func PredicateMetadata(p *v1.Pod, nodeInfo map[string]*schedulercache.NodeInfo) algorithm.PredicateMetadata { +func PredicateMetadata(p *v1.Pod, nodeInfo map[string]*schedulernodeinfo.NodeInfo) algorithm.PredicateMetadata { pm := PredicateMetadataFactory{schedulertesting.FakePodLister{p}} return pm.GetMetadata(p, nodeInfo) } @@ -99,7 +99,7 @@ func PredicateMetadata(p *v1.Pod, nodeInfo map[string]*schedulercache.NodeInfo) func TestPodFitsResources(t *testing.T) { enoughPodsTests := []struct { pod *v1.Pod - nodeInfo *schedulercache.NodeInfo + nodeInfo *schedulernodeinfo.NodeInfo fits bool name string reasons []algorithm.PredicateFailureReason @@ -107,15 +107,15 @@ func TestPodFitsResources(t *testing.T) { }{ { pod: &v1.Pod{}, - nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 10, Memory: 20})), + nodeInfo: schedulernodeinfo.NewNodeInfo( + newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 20})), fits: true, name: "no resources requested always fits", }, { - pod: newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), - nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 10, Memory: 20})), + pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), + nodeInfo: schedulernodeinfo.NewNodeInfo( + newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 20})), fits: false, name: "too many resources fails", reasons: []algorithm.PredicateFailureReason{ @@ -124,234 +124,234 @@ func TestPodFitsResources(t *testing.T) { }, }, { - pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), schedulercache.Resource{MilliCPU: 3, Memory: 1}), - nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 8, Memory: 19})), + pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 3, Memory: 1}), + nodeInfo: schedulernodeinfo.NewNodeInfo( + newResourcePod(schedulernodeinfo.Resource{MilliCPU: 8, Memory: 19})), fits: false, name: "too many resources fails due to init container cpu", reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceCPU, 3, 8, 10)}, }, { - pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), schedulercache.Resource{MilliCPU: 3, Memory: 1}, schedulercache.Resource{MilliCPU: 2, Memory: 1}), - nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 8, Memory: 19})), + pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 3, Memory: 1}, schedulernodeinfo.Resource{MilliCPU: 2, Memory: 1}), + nodeInfo: schedulernodeinfo.NewNodeInfo( + newResourcePod(schedulernodeinfo.Resource{MilliCPU: 8, Memory: 19})), fits: false, name: "too many resources fails due to highest init container cpu", reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceCPU, 3, 8, 10)}, }, { - pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), schedulercache.Resource{MilliCPU: 1, Memory: 3}), - nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 9, Memory: 19})), + pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 3}), + nodeInfo: schedulernodeinfo.NewNodeInfo( + newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 19})), fits: false, name: "too many resources fails due to init container memory", reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceMemory, 3, 19, 20)}, }, { - pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), schedulercache.Resource{MilliCPU: 1, Memory: 3}, schedulercache.Resource{MilliCPU: 1, Memory: 2}), - nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 9, Memory: 19})), + pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 3}, schedulernodeinfo.Resource{MilliCPU: 1, Memory: 2}), + nodeInfo: schedulernodeinfo.NewNodeInfo( + newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 19})), fits: false, name: "too many resources fails due to highest init container memory", reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceMemory, 3, 19, 20)}, }, { - pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), schedulercache.Resource{MilliCPU: 1, Memory: 1}), - nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 9, Memory: 19})), + pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), + nodeInfo: schedulernodeinfo.NewNodeInfo( + newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 19})), fits: true, name: "init container fits because it's the max, not sum, of containers and init containers", }, { - pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), schedulercache.Resource{MilliCPU: 1, Memory: 1}, schedulercache.Resource{MilliCPU: 1, Memory: 1}), - nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 9, Memory: 19})), + pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}, schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), + nodeInfo: schedulernodeinfo.NewNodeInfo( + newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 19})), fits: true, name: "multiple init containers fit because it's the max, not sum, of containers and init containers", }, { - pod: newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), - nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 5})), + pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), + nodeInfo: schedulernodeinfo.NewNodeInfo( + newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 5})), fits: true, name: "both resources fit", }, { - pod: newResourcePod(schedulercache.Resource{MilliCPU: 2, Memory: 1}), - nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 9, Memory: 5})), + pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 2, Memory: 1}), + nodeInfo: schedulernodeinfo.NewNodeInfo( + newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 5})), fits: false, name: "one resource memory fits", reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceCPU, 2, 9, 10)}, }, { - pod: newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 2}), - nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 19})), + pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 2}), + nodeInfo: schedulernodeinfo.NewNodeInfo( + newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})), fits: false, name: "one resource cpu fits", reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceMemory, 2, 19, 20)}, }, { - pod: newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 1}), - nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 19})), + pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}), + nodeInfo: schedulernodeinfo.NewNodeInfo( + newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})), fits: true, name: "equal edge case", }, { - pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 4, Memory: 1}), schedulercache.Resource{MilliCPU: 5, Memory: 1}), - nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 19})), + pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 4, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}), + nodeInfo: schedulernodeinfo.NewNodeInfo( + newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})), fits: true, name: "equal edge case for init container", }, { - pod: newResourcePod(schedulercache.Resource{ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}), - nodeInfo: schedulercache.NewNodeInfo(newResourcePod(schedulercache.Resource{})), + pod: newResourcePod(schedulernodeinfo.Resource{ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}), + nodeInfo: schedulernodeinfo.NewNodeInfo(newResourcePod(schedulernodeinfo.Resource{})), fits: true, name: "extended resource fits", }, { - pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}), schedulercache.Resource{ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}), - nodeInfo: schedulercache.NewNodeInfo(newResourcePod(schedulercache.Resource{})), + pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}), schedulernodeinfo.Resource{ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}), + nodeInfo: schedulernodeinfo.NewNodeInfo(newResourcePod(schedulernodeinfo.Resource{})), fits: true, name: "extended resource fits for init container", }, { pod: newResourcePod( - schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 10}}), - nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})), + schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 10}}), + nodeInfo: schedulernodeinfo.NewNodeInfo( + newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})), fits: false, name: "extended resource capacity enforced", reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 10, 0, 5)}, }, { - pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}), - schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 10}}), - nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})), + pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}), + schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 10}}), + nodeInfo: schedulernodeinfo.NewNodeInfo( + newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})), fits: false, name: "extended resource capacity enforced for init container", reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 10, 0, 5)}, }, { pod: newResourcePod( - schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}), - nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})), + schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}), + nodeInfo: schedulernodeinfo.NewNodeInfo( + newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})), fits: false, name: "extended resource allocatable enforced", reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 1, 5, 5)}, }, { - pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}), - schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}), - nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})), + pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}), + schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}), + nodeInfo: schedulernodeinfo.NewNodeInfo( + newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})), fits: false, name: "extended resource allocatable enforced for init container", reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 1, 5, 5)}, }, { pod: newResourcePod( - schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}, - schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}), - nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})), + schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}, + schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}), + nodeInfo: schedulernodeinfo.NewNodeInfo( + newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})), fits: false, name: "extended resource allocatable enforced for multiple containers", reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 6, 2, 5)}, }, { - pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}), - schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}, - schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}), - nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})), + pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}), + schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}, + schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}), + nodeInfo: schedulernodeinfo.NewNodeInfo( + newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})), fits: true, name: "extended resource allocatable admits multiple init containers", }, { - pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}), - schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 6}}, - schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}), - nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})), + pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}), + schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 6}}, + schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}), + nodeInfo: schedulernodeinfo.NewNodeInfo( + newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})), fits: false, name: "extended resource allocatable enforced for multiple init containers", reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 6, 2, 5)}, }, { pod: newResourcePod( - schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}), - nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0})), + schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}), + nodeInfo: schedulernodeinfo.NewNodeInfo( + newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})), fits: false, name: "extended resource allocatable enforced for unknown resource", reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceB, 1, 0, 0)}, }, { - pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}), - schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}), - nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0})), + pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}), + schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}), + nodeInfo: schedulernodeinfo.NewNodeInfo( + newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})), fits: false, name: "extended resource allocatable enforced for unknown resource for init container", reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceB, 1, 0, 0)}, }, { pod: newResourcePod( - schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{kubernetesIOResourceA: 10}}), - nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0})), + schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{kubernetesIOResourceA: 10}}), + nodeInfo: schedulernodeinfo.NewNodeInfo( + newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})), fits: false, name: "kubernetes.io resource capacity enforced", reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(kubernetesIOResourceA, 10, 0, 0)}, }, { - pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}), - schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{kubernetesIOResourceB: 10}}), - nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0})), + pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}), + schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{kubernetesIOResourceB: 10}}), + nodeInfo: schedulernodeinfo.NewNodeInfo( + newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})), fits: false, name: "kubernetes.io resource capacity enforced for init container", reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(kubernetesIOResourceB, 10, 0, 0)}, }, { pod: newResourcePod( - schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}), - nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})), + schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}), + nodeInfo: schedulernodeinfo.NewNodeInfo( + newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})), fits: false, name: "hugepages resource capacity enforced", reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 10, 0, 5)}, }, { - pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}), - schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}), - nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})), + pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}), + schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}), + nodeInfo: schedulernodeinfo.NewNodeInfo( + newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})), fits: false, name: "hugepages resource capacity enforced for init container", reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 10, 0, 5)}, }, { pod: newResourcePod( - schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 3}}, - schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 3}}), - nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 2}})), + schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 3}}, + schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 3}}), + nodeInfo: schedulernodeinfo.NewNodeInfo( + newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 2}})), fits: false, name: "hugepages resource allocatable enforced for multiple containers", reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 6, 2, 5)}, }, { pod: newResourcePod( - schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}), - nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0})), + schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}), + nodeInfo: schedulernodeinfo.NewNodeInfo( + newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})), fits: true, ignoredExtendedResources: sets.NewString(string(extendedResourceB)), name: "skip checking ignored extended resource", @@ -379,39 +379,39 @@ func TestPodFitsResources(t *testing.T) { notEnoughPodsTests := []struct { pod *v1.Pod - nodeInfo *schedulercache.NodeInfo + nodeInfo *schedulernodeinfo.NodeInfo fits bool name string reasons []algorithm.PredicateFailureReason }{ { pod: &v1.Pod{}, - nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 10, Memory: 20})), + nodeInfo: schedulernodeinfo.NewNodeInfo( + newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 20})), fits: false, name: "even without specified resources predicate fails when there's no space for additional pod", reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)}, }, { - pod: newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), - nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 5})), + pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), + nodeInfo: schedulernodeinfo.NewNodeInfo( + newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 5})), fits: false, name: "even if both resources fit predicate fails when there's no space for additional pod", reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)}, }, { - pod: newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 1}), - nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 19})), + pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}), + nodeInfo: schedulernodeinfo.NewNodeInfo( + newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})), fits: false, name: "even for equal edge case predicate fails when there's no space for additional pod", reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)}, }, { - pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 1}), schedulercache.Resource{MilliCPU: 5, Memory: 1}), - nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 19})), + pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}), + nodeInfo: schedulernodeinfo.NewNodeInfo( + newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})), fits: false, name: "even for equal edge case predicate fails when there's no space for additional pod due to init container", reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)}, @@ -436,15 +436,15 @@ func TestPodFitsResources(t *testing.T) { storagePodsTests := []struct { pod *v1.Pod - nodeInfo *schedulercache.NodeInfo + nodeInfo *schedulernodeinfo.NodeInfo fits bool name string reasons []algorithm.PredicateFailureReason }{ { - pod: newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), - nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 10, Memory: 10})), + pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), + nodeInfo: schedulernodeinfo.NewNodeInfo( + newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 10})), fits: false, name: "due to container scratch disk", reasons: []algorithm.PredicateFailureReason{ @@ -452,16 +452,16 @@ func TestPodFitsResources(t *testing.T) { }, }, { - pod: newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), - nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 2, Memory: 10})), + pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), + nodeInfo: schedulernodeinfo.NewNodeInfo( + newResourcePod(schedulernodeinfo.Resource{MilliCPU: 2, Memory: 10})), fits: true, name: "pod fit", }, { - pod: newResourcePod(schedulercache.Resource{EphemeralStorage: 25}), - nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 2, Memory: 2})), + pod: newResourcePod(schedulernodeinfo.Resource{EphemeralStorage: 25}), + nodeInfo: schedulernodeinfo.NewNodeInfo( + newResourcePod(schedulernodeinfo.Resource{MilliCPU: 2, Memory: 2})), fits: false, name: "storage ephemeral local storage request exceeds allocatable", reasons: []algorithm.PredicateFailureReason{ @@ -469,9 +469,9 @@ func TestPodFitsResources(t *testing.T) { }, }, { - pod: newResourcePod(schedulercache.Resource{EphemeralStorage: 10}), - nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 2, Memory: 2})), + pod: newResourcePod(schedulernodeinfo.Resource{EphemeralStorage: 10}), + nodeInfo: schedulernodeinfo.NewNodeInfo( + newResourcePod(schedulernodeinfo.Resource{MilliCPU: 2, Memory: 2})), fits: true, name: "pod fits", }, @@ -542,7 +542,7 @@ func TestPodFitsHost(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - nodeInfo := schedulercache.NewNodeInfo() + nodeInfo := schedulernodeinfo.NewNodeInfo() nodeInfo.SetNode(test.node) fits, reasons, err := PodFitsHost(test.pod, PredicateMetadata(test.pod, nil), nodeInfo) if err != nil { @@ -585,96 +585,96 @@ func newPod(host string, hostPortInfos ...string) *v1.Pod { func TestPodFitsHostPorts(t *testing.T) { tests := []struct { pod *v1.Pod - nodeInfo *schedulercache.NodeInfo + nodeInfo *schedulernodeinfo.NodeInfo fits bool name string }{ { pod: &v1.Pod{}, - nodeInfo: schedulercache.NewNodeInfo(), + nodeInfo: schedulernodeinfo.NewNodeInfo(), fits: true, name: "nothing running", }, { pod: newPod("m1", "UDP/127.0.0.1/8080"), - nodeInfo: schedulercache.NewNodeInfo( + nodeInfo: schedulernodeinfo.NewNodeInfo( newPod("m1", "UDP/127.0.0.1/9090")), fits: true, name: "other port", }, { pod: newPod("m1", "UDP/127.0.0.1/8080"), - nodeInfo: schedulercache.NewNodeInfo( + nodeInfo: schedulernodeinfo.NewNodeInfo( newPod("m1", "UDP/127.0.0.1/8080")), fits: false, name: "same udp port", }, { pod: newPod("m1", "TCP/127.0.0.1/8080"), - nodeInfo: schedulercache.NewNodeInfo( + nodeInfo: schedulernodeinfo.NewNodeInfo( newPod("m1", "TCP/127.0.0.1/8080")), fits: false, name: "same tcp port", }, { pod: newPod("m1", "TCP/127.0.0.1/8080"), - nodeInfo: schedulercache.NewNodeInfo( + nodeInfo: schedulernodeinfo.NewNodeInfo( newPod("m1", "TCP/127.0.0.2/8080")), fits: true, name: "different host ip", }, { pod: newPod("m1", "UDP/127.0.0.1/8080"), - nodeInfo: schedulercache.NewNodeInfo( + nodeInfo: schedulernodeinfo.NewNodeInfo( newPod("m1", "TCP/127.0.0.1/8080")), fits: true, name: "different protocol", }, { pod: newPod("m1", "UDP/127.0.0.1/8000", "UDP/127.0.0.1/8080"), - nodeInfo: schedulercache.NewNodeInfo( + nodeInfo: schedulernodeinfo.NewNodeInfo( newPod("m1", "UDP/127.0.0.1/8080")), fits: false, name: "second udp port conflict", }, { pod: newPod("m1", "TCP/127.0.0.1/8001", "UDP/127.0.0.1/8080"), - nodeInfo: schedulercache.NewNodeInfo( + nodeInfo: schedulernodeinfo.NewNodeInfo( newPod("m1", "TCP/127.0.0.1/8001", "UDP/127.0.0.1/8081")), fits: false, name: "first tcp port conflict", }, { pod: newPod("m1", "TCP/0.0.0.0/8001"), - nodeInfo: schedulercache.NewNodeInfo( + nodeInfo: schedulernodeinfo.NewNodeInfo( newPod("m1", "TCP/127.0.0.1/8001")), fits: false, name: "first tcp port conflict due to 0.0.0.0 hostIP", }, { pod: newPod("m1", "TCP/10.0.10.10/8001", "TCP/0.0.0.0/8001"), - nodeInfo: schedulercache.NewNodeInfo( + nodeInfo: schedulernodeinfo.NewNodeInfo( newPod("m1", "TCP/127.0.0.1/8001")), fits: false, name: "TCP hostPort conflict due to 0.0.0.0 hostIP", }, { pod: newPod("m1", "TCP/127.0.0.1/8001"), - nodeInfo: schedulercache.NewNodeInfo( + nodeInfo: schedulernodeinfo.NewNodeInfo( newPod("m1", "TCP/0.0.0.0/8001")), fits: false, name: "second tcp port conflict to 0.0.0.0 hostIP", }, { pod: newPod("m1", "UDP/127.0.0.1/8001"), - nodeInfo: schedulercache.NewNodeInfo( + nodeInfo: schedulernodeinfo.NewNodeInfo( newPod("m1", "TCP/0.0.0.0/8001")), fits: true, name: "second different protocol", }, { pod: newPod("m1", "UDP/127.0.0.1/8001"), - nodeInfo: schedulercache.NewNodeInfo( + nodeInfo: schedulernodeinfo.NewNodeInfo( newPod("m1", "TCP/0.0.0.0/8001", "UDP/0.0.0.0/8001")), fits: false, name: "UDP hostPort conflict due to 0.0.0.0 hostIP", @@ -723,14 +723,14 @@ func TestGCEDiskConflicts(t *testing.T) { } tests := []struct { pod *v1.Pod - nodeInfo *schedulercache.NodeInfo + nodeInfo *schedulernodeinfo.NodeInfo isOk bool name string }{ - {&v1.Pod{}, schedulercache.NewNodeInfo(), true, "nothing"}, - {&v1.Pod{}, schedulercache.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state"}, - {&v1.Pod{Spec: volState}, schedulercache.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state"}, - {&v1.Pod{Spec: volState2}, schedulercache.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state"}, + {&v1.Pod{}, schedulernodeinfo.NewNodeInfo(), true, "nothing"}, + {&v1.Pod{}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state"}, + {&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state"}, + {&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state"}, } expectedFailureReasons := []algorithm.PredicateFailureReason{ErrDiskConflict} @@ -778,14 +778,14 @@ func TestAWSDiskConflicts(t *testing.T) { } tests := []struct { pod *v1.Pod - nodeInfo *schedulercache.NodeInfo + nodeInfo *schedulernodeinfo.NodeInfo isOk bool name string }{ - {&v1.Pod{}, schedulercache.NewNodeInfo(), true, "nothing"}, - {&v1.Pod{}, schedulercache.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state"}, - {&v1.Pod{Spec: volState}, schedulercache.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state"}, - {&v1.Pod{Spec: volState2}, schedulercache.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state"}, + {&v1.Pod{}, schedulernodeinfo.NewNodeInfo(), true, "nothing"}, + {&v1.Pod{}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state"}, + {&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state"}, + {&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state"}, } expectedFailureReasons := []algorithm.PredicateFailureReason{ErrDiskConflict} @@ -839,14 +839,14 @@ func TestRBDDiskConflicts(t *testing.T) { } tests := []struct { pod *v1.Pod - nodeInfo *schedulercache.NodeInfo + nodeInfo *schedulernodeinfo.NodeInfo isOk bool name string }{ - {&v1.Pod{}, schedulercache.NewNodeInfo(), true, "nothing"}, - {&v1.Pod{}, schedulercache.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state"}, - {&v1.Pod{Spec: volState}, schedulercache.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state"}, - {&v1.Pod{Spec: volState2}, schedulercache.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state"}, + {&v1.Pod{}, schedulernodeinfo.NewNodeInfo(), true, "nothing"}, + {&v1.Pod{}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state"}, + {&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state"}, + {&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state"}, } expectedFailureReasons := []algorithm.PredicateFailureReason{ErrDiskConflict} @@ -900,14 +900,14 @@ func TestISCSIDiskConflicts(t *testing.T) { } tests := []struct { pod *v1.Pod - nodeInfo *schedulercache.NodeInfo + nodeInfo *schedulernodeinfo.NodeInfo isOk bool name string }{ - {&v1.Pod{}, schedulercache.NewNodeInfo(), true, "nothing"}, - {&v1.Pod{}, schedulercache.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state"}, - {&v1.Pod{Spec: volState}, schedulercache.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state"}, - {&v1.Pod{Spec: volState2}, schedulercache.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state"}, + {&v1.Pod{}, schedulernodeinfo.NewNodeInfo(), true, "nothing"}, + {&v1.Pod{}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state"}, + {&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state"}, + {&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state"}, } expectedFailureReasons := []algorithm.PredicateFailureReason{ErrDiskConflict} @@ -1611,7 +1611,7 @@ func TestPodFitsSelector(t *testing.T) { Name: test.nodeName, Labels: test.labels, }} - nodeInfo := schedulercache.NewNodeInfo() + nodeInfo := schedulernodeinfo.NewNodeInfo() nodeInfo.SetNode(&node) fits, reasons, err := PodMatchNodeSelector(test.pod, PredicateMetadata(test.pod, nil), nodeInfo) @@ -1679,7 +1679,7 @@ func TestNodeLabelPresence(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { node := v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: label}} - nodeInfo := schedulercache.NewNodeInfo() + nodeInfo := schedulernodeinfo.NewNodeInfo() nodeInfo.SetNode(&node) labelChecker := NodeLabelChecker{test.labels, test.presence} @@ -1828,9 +1828,9 @@ func TestServiceAffinity(t *testing.T) { testIt := func(skipPrecompute bool) { t.Run(fmt.Sprintf("%v/skipPrecompute/%v", test.name, skipPrecompute), func(t *testing.T) { nodes := []v1.Node{node1, node2, node3, node4, node5} - nodeInfo := schedulercache.NewNodeInfo() + nodeInfo := schedulernodeinfo.NewNodeInfo() nodeInfo.SetNode(test.node) - nodeInfoMap := map[string]*schedulercache.NodeInfo{test.node.Name: nodeInfo} + nodeInfoMap := map[string]*schedulernodeinfo.NodeInfo{test.node.Name: nodeInfo} // Reimplementing the logic that the scheduler implements: Any time it makes a predicate, it registers any precomputations. predicate, precompute := NewServiceAffinityPredicate(schedulertesting.FakePodLister(test.pods), schedulertesting.FakeServiceLister(test.services), FakeNodeListInfo(nodes), test.labels) // Register a precomputation or Rewrite the precomputation to a no-op, depending on the state we want to test. @@ -1880,7 +1880,7 @@ func newPodWithPort(hostPorts ...int) *v1.Pod { func TestRunGeneralPredicates(t *testing.T) { resourceTests := []struct { pod *v1.Pod - nodeInfo *schedulercache.NodeInfo + nodeInfo *schedulernodeinfo.NodeInfo node *v1.Node fits bool name string @@ -1889,8 +1889,8 @@ func TestRunGeneralPredicates(t *testing.T) { }{ { pod: &v1.Pod{}, - nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 9, Memory: 19})), + nodeInfo: schedulernodeinfo.NewNodeInfo( + newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 19})), node: &v1.Node{ ObjectMeta: metav1.ObjectMeta{Name: "machine1"}, Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)}, @@ -1900,9 +1900,9 @@ func TestRunGeneralPredicates(t *testing.T) { name: "no resources/port/host requested always fits", }, { - pod: newResourcePod(schedulercache.Resource{MilliCPU: 8, Memory: 10}), - nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 19})), + pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 8, Memory: 10}), + nodeInfo: schedulernodeinfo.NewNodeInfo( + newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})), node: &v1.Node{ ObjectMeta: metav1.ObjectMeta{Name: "machine1"}, Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)}, @@ -1921,7 +1921,7 @@ func TestRunGeneralPredicates(t *testing.T) { NodeName: "machine2", }, }, - nodeInfo: schedulercache.NewNodeInfo(), + nodeInfo: schedulernodeinfo.NewNodeInfo(), node: &v1.Node{ ObjectMeta: metav1.ObjectMeta{Name: "machine1"}, Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)}, @@ -1933,7 +1933,7 @@ func TestRunGeneralPredicates(t *testing.T) { }, { pod: newPodWithPort(123), - nodeInfo: schedulercache.NewNodeInfo(newPodWithPort(123)), + nodeInfo: schedulernodeinfo.NewNodeInfo(newPodWithPort(123)), node: &v1.Node{ ObjectMeta: metav1.ObjectMeta{Name: "machine1"}, Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)}, @@ -2908,9 +2908,9 @@ func TestInterPodAffinity(t *testing.T) { info: FakeNodeInfo(*node), podLister: schedulertesting.FakePodLister(test.pods), } - nodeInfo := schedulercache.NewNodeInfo(podsOnNode...) + nodeInfo := schedulernodeinfo.NewNodeInfo(podsOnNode...) nodeInfo.SetNode(test.node) - nodeInfoMap := map[string]*schedulercache.NodeInfo{test.node.Name: nodeInfo} + nodeInfoMap := map[string]*schedulernodeinfo.NodeInfo{test.node.Name: nodeInfo} fits, reasons, _ := fit.InterPodAffinityMatches(test.pod, PredicateMetadata(test.pod, nodeInfoMap), nodeInfo) if !fits && !reflect.DeepEqual(reasons, test.expectFailureReasons) { t.Errorf("unexpected failure reasons: %v, want: %v", reasons, test.expectFailureReasons) @@ -4003,7 +4003,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) { for indexTest, test := range tests { t.Run(test.name, func(t *testing.T) { nodeListInfo := FakeNodeListInfo(test.nodes) - nodeInfoMap := make(map[string]*schedulercache.NodeInfo) + nodeInfoMap := make(map[string]*schedulernodeinfo.NodeInfo) for i, node := range test.nodes { var podsOnNode []*v1.Pod for _, pod := range test.pods { @@ -4012,7 +4012,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) { } } - nodeInfo := schedulercache.NewNodeInfo(podsOnNode...) + nodeInfo := schedulernodeinfo.NewNodeInfo(podsOnNode...) nodeInfo.SetNode(&test.nodes[i]) nodeInfoMap[node.Name] = nodeInfo } @@ -4034,9 +4034,9 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) { } affinity := test.pod.Spec.Affinity if affinity != nil && affinity.NodeAffinity != nil { - nodeInfo := schedulercache.NewNodeInfo() + nodeInfo := schedulernodeinfo.NewNodeInfo() nodeInfo.SetNode(&node) - nodeInfoMap := map[string]*schedulercache.NodeInfo{node.Name: nodeInfo} + nodeInfoMap := map[string]*schedulernodeinfo.NodeInfo{node.Name: nodeInfo} fits2, reasons, err := PodMatchNodeSelector(test.pod, PredicateMetadata(test.pod, nodeInfoMap), nodeInfo) if err != nil { t.Errorf("unexpected error: %v", err) @@ -4242,7 +4242,7 @@ func TestPodToleratesTaints(t *testing.T) { for _, test := range podTolerateTaintsTests { t.Run(test.name, func(t *testing.T) { - nodeInfo := schedulercache.NewNodeInfo() + nodeInfo := schedulernodeinfo.NewNodeInfo() nodeInfo.SetNode(&test.node) fits, reasons, err := PodToleratesNodeTaints(test.pod, PredicateMetadata(test.pod, nil), nodeInfo) if err != nil { @@ -4258,8 +4258,8 @@ func TestPodToleratesTaints(t *testing.T) { } } -func makeEmptyNodeInfo(node *v1.Node) *schedulercache.NodeInfo { - nodeInfo := schedulercache.NewNodeInfo() +func makeEmptyNodeInfo(node *v1.Node) *schedulernodeinfo.NodeInfo { + nodeInfo := schedulernodeinfo.NewNodeInfo() nodeInfo.SetNode(node) return nodeInfo } @@ -4323,7 +4323,7 @@ func TestPodSchedulesOnNodeWithMemoryPressureCondition(t *testing.T) { tests := []struct { pod *v1.Pod - nodeInfo *schedulercache.NodeInfo + nodeInfo *schedulernodeinfo.NodeInfo fits bool name string }{ @@ -4409,7 +4409,7 @@ func TestPodSchedulesOnNodeWithDiskPressureCondition(t *testing.T) { tests := []struct { pod *v1.Pod - nodeInfo *schedulercache.NodeInfo + nodeInfo *schedulernodeinfo.NodeInfo fits bool name string }{ @@ -4471,7 +4471,7 @@ func TestPodSchedulesOnNodeWithPIDPressureCondition(t *testing.T) { } tests := []struct { - nodeInfo *schedulercache.NodeInfo + nodeInfo *schedulernodeinfo.NodeInfo fits bool name string }{ @@ -4708,7 +4708,7 @@ func TestVolumeZonePredicate(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { fit := NewVolumeZonePredicate(pvInfo, pvcInfo, nil) - node := &schedulercache.NodeInfo{} + node := &schedulernodeinfo.NodeInfo{} node.SetNode(test.Node) fits, reasons, err := fit(test.Pod, nil, node) @@ -4802,7 +4802,7 @@ func TestVolumeZonePredicateMultiZone(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { fit := NewVolumeZonePredicate(pvInfo, pvcInfo, nil) - node := &schedulercache.NodeInfo{} + node := &schedulernodeinfo.NodeInfo{} node.SetNode(test.Node) fits, reasons, err := fit(test.Pod, nil, node) @@ -4920,7 +4920,7 @@ func TestVolumeZonePredicateWithVolumeBinding(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { fit := NewVolumeZonePredicate(pvInfo, pvcInfo, classInfo) - node := &schedulercache.NodeInfo{} + node := &schedulernodeinfo.NodeInfo{} node.SetNode(test.Node) fits, _, err := fit(test.Pod, nil, node) @@ -5028,7 +5028,7 @@ func TestCheckNodeUnschedulablePredicate(t *testing.T) { } for _, test := range testCases { - nodeInfo := schedulercache.NewNodeInfo() + nodeInfo := schedulernodeinfo.NewNodeInfo() nodeInfo.SetNode(test.node) fit, _, err := CheckNodeUnschedulablePredicate(test.pod, nil, nodeInfo) if err != nil { diff --git a/pkg/scheduler/algorithm/predicates/utils.go b/pkg/scheduler/algorithm/predicates/utils.go index de2826a4df4..4080baf91ee 100644 --- a/pkg/scheduler/algorithm/predicates/utils.go +++ b/pkg/scheduler/algorithm/predicates/utils.go @@ -19,7 +19,7 @@ package predicates import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) // FindLabelsInSet gets as many key/value pairs as possible out of a label set. @@ -68,7 +68,7 @@ func CreateSelectorFromLabels(aL map[string]string) labels.Selector { // portsConflict check whether existingPorts and wantPorts conflict with each other // return true if we have a conflict -func portsConflict(existingPorts schedulercache.HostPortInfo, wantPorts []*v1.ContainerPort) bool { +func portsConflict(existingPorts schedulernodeinfo.HostPortInfo, wantPorts []*v1.ContainerPort) bool { for _, cp := range wantPorts { if existingPorts.CheckConflict(cp.HostIP, string(cp.Protocol), cp.HostPort) { return true diff --git a/pkg/scheduler/algorithm/priorities/BUILD b/pkg/scheduler/algorithm/priorities/BUILD index e8a08011f35..5a4314299ff 100644 --- a/pkg/scheduler/algorithm/priorities/BUILD +++ b/pkg/scheduler/algorithm/priorities/BUILD @@ -34,7 +34,7 @@ go_library( "//pkg/scheduler/algorithm/predicates:go_default_library", "//pkg/scheduler/algorithm/priorities/util:go_default_library", "//pkg/scheduler/api:go_default_library", - "//pkg/scheduler/cache:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", "//pkg/util/node:go_default_library", "//pkg/util/parsers:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -55,7 +55,6 @@ go_test( "image_locality_test.go", "interpod_affinity_test.go", "least_requested_test.go", - "main_test.go", "metadata_test.go", "most_requested_test.go", "node_affinity_test.go", @@ -72,7 +71,7 @@ go_test( "//pkg/kubelet/apis:go_default_library", "//pkg/scheduler/algorithm/priorities/util:go_default_library", "//pkg/scheduler/api:go_default_library", - "//pkg/scheduler/cache:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/testing:go_default_library", "//pkg/util/parsers:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library", diff --git a/pkg/scheduler/algorithm/priorities/balanced_resource_allocation.go b/pkg/scheduler/algorithm/priorities/balanced_resource_allocation.go index c77dd399d5a..97635cddddc 100644 --- a/pkg/scheduler/algorithm/priorities/balanced_resource_allocation.go +++ b/pkg/scheduler/algorithm/priorities/balanced_resource_allocation.go @@ -22,7 +22,7 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/kubernetes/pkg/features" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) var ( @@ -38,7 +38,7 @@ var ( BalancedResourceAllocationMap = balancedResourcePriority.PriorityMap ) -func balancedResourceScorer(requested, allocable *schedulercache.Resource, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 { +func balancedResourceScorer(requested, allocable *schedulernodeinfo.Resource, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 { cpuFraction := fractionOfCapacity(requested.MilliCPU, allocable.MilliCPU) memoryFraction := fractionOfCapacity(requested.Memory, allocable.Memory) // This to find a node which has most balanced CPU, memory and volume usage. diff --git a/pkg/scheduler/algorithm/priorities/balanced_resource_allocation_test.go b/pkg/scheduler/algorithm/priorities/balanced_resource_allocation_test.go index ccbfe45959b..07dcd733179 100644 --- a/pkg/scheduler/algorithm/priorities/balanced_resource_allocation_test.go +++ b/pkg/scheduler/algorithm/priorities/balanced_resource_allocation_test.go @@ -27,7 +27,7 @@ import ( utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" "k8s.io/kubernetes/pkg/features" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) // getExistingVolumeCountForNode gets the current number of volumes on node. @@ -401,7 +401,7 @@ func TestBalancedResourceAllocation(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes) + nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, test.nodes) if len(test.pod.Spec.Volumes) > 0 { maxVolumes := 5 for _, info := range nodeNameToInfo { diff --git a/pkg/scheduler/algorithm/priorities/image_locality.go b/pkg/scheduler/algorithm/priorities/image_locality.go index 041e52d4fce..cc1db725ad9 100644 --- a/pkg/scheduler/algorithm/priorities/image_locality.go +++ b/pkg/scheduler/algorithm/priorities/image_locality.go @@ -22,7 +22,7 @@ import ( "k8s.io/api/core/v1" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" "k8s.io/kubernetes/pkg/util/parsers" ) @@ -39,7 +39,7 @@ const ( // based on the total size of those images. // - If none of the images are present, this node will be given the lowest priority. // - If some of the images are present on a node, the larger their sizes' sum, the higher the node's priority. -func ImageLocalityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { +func ImageLocalityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) { node := nodeInfo.Node() if node == nil { return schedulerapi.HostPriority{}, fmt.Errorf("node not found") @@ -74,7 +74,7 @@ func calculatePriority(sumScores int64) int { // sumImageScores returns the sum of image scores of all the containers that are already on the node. // Each image receives a raw score of its size, scaled by scaledImageScore. The raw scores are later used to calculate // the final score. Note that the init containers are not considered for it's rare for users to deploy huge init containers. -func sumImageScores(nodeInfo *schedulercache.NodeInfo, containers []v1.Container, totalNumNodes int) int64 { +func sumImageScores(nodeInfo *schedulernodeinfo.NodeInfo, containers []v1.Container, totalNumNodes int) int64 { var sum int64 imageStates := nodeInfo.ImageStates() @@ -91,7 +91,7 @@ func sumImageScores(nodeInfo *schedulercache.NodeInfo, containers []v1.Container // The size of the image is used as the base score, scaled by a factor which considers how much nodes the image has "spread" to. // This heuristic aims to mitigate the undesirable "node heating problem", i.e., pods get assigned to the same or // a few nodes due to image locality. -func scaledImageScore(imageState *schedulercache.ImageStateSummary, totalNumNodes int) int64 { +func scaledImageScore(imageState *schedulernodeinfo.ImageStateSummary, totalNumNodes int) int64 { spread := float64(imageState.NumNodes) / float64(totalNumNodes) return int64(float64(imageState.Size) * spread) } diff --git a/pkg/scheduler/algorithm/priorities/image_locality_test.go b/pkg/scheduler/algorithm/priorities/image_locality_test.go index 58dc02a2335..55c0aa546a8 100644 --- a/pkg/scheduler/algorithm/priorities/image_locality_test.go +++ b/pkg/scheduler/algorithm/priorities/image_locality_test.go @@ -26,7 +26,7 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" "k8s.io/kubernetes/pkg/util/parsers" ) @@ -164,7 +164,7 @@ func TestImageLocalityPriority(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes) + nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, test.nodes) list, err := priorityFunction(ImageLocalityPriorityMap, nil, &priorityMetadata{totalNumNodes: len(test.nodes)})(test.pod, nodeNameToInfo, test.nodes) if err != nil { t.Errorf("unexpected error: %v", err) diff --git a/pkg/scheduler/algorithm/priorities/interpod_affinity.go b/pkg/scheduler/algorithm/priorities/interpod_affinity.go index 32cf27c83bf..d18397446a5 100644 --- a/pkg/scheduler/algorithm/priorities/interpod_affinity.go +++ b/pkg/scheduler/algorithm/priorities/interpod_affinity.go @@ -28,7 +28,7 @@ import ( "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" "k8s.io/klog" ) @@ -116,7 +116,7 @@ func (p *podAffinityPriorityMap) processTerms(terms []v1.WeightedPodAffinityTerm // that node; the node(s) with the highest sum are the most preferred. // Symmetry need to be considered for preferredDuringSchedulingIgnoredDuringExecution from podAffinity & podAntiAffinity, // symmetry need to be considered for hard requirements from podAffinity -func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { +func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { affinity := pod.Spec.Affinity hasAffinityConstraints := affinity != nil && affinity.PodAffinity != nil hasAntiAffinityConstraints := affinity != nil && affinity.PodAntiAffinity != nil diff --git a/pkg/scheduler/algorithm/priorities/interpod_affinity_test.go b/pkg/scheduler/algorithm/priorities/interpod_affinity_test.go index 7f86006d56a..582e73d9c24 100644 --- a/pkg/scheduler/algorithm/priorities/interpod_affinity_test.go +++ b/pkg/scheduler/algorithm/priorities/interpod_affinity_test.go @@ -24,7 +24,7 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing" ) @@ -510,7 +510,7 @@ func TestInterPodAffinityPriority(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes) + nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, test.nodes) interPodAffinity := InterPodAffinity{ info: FakeNodeListInfo(test.nodes), nodeLister: schedulertesting.FakeNodeLister(test.nodes), @@ -600,7 +600,7 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes) + nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, test.nodes) ipa := InterPodAffinity{ info: FakeNodeListInfo(test.nodes), nodeLister: schedulertesting.FakeNodeLister(test.nodes), diff --git a/pkg/scheduler/algorithm/priorities/least_requested.go b/pkg/scheduler/algorithm/priorities/least_requested.go index d691810896d..e469ee50356 100644 --- a/pkg/scheduler/algorithm/priorities/least_requested.go +++ b/pkg/scheduler/algorithm/priorities/least_requested.go @@ -18,7 +18,7 @@ package priorities import ( schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) var ( @@ -33,7 +33,7 @@ var ( LeastRequestedPriorityMap = leastResourcePriority.PriorityMap ) -func leastResourceScorer(requested, allocable *schedulercache.Resource, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 { +func leastResourceScorer(requested, allocable *schedulernodeinfo.Resource, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 { return (leastRequestedScore(requested.MilliCPU, allocable.MilliCPU) + leastRequestedScore(requested.Memory, allocable.Memory)) / 2 } diff --git a/pkg/scheduler/algorithm/priorities/least_requested_test.go b/pkg/scheduler/algorithm/priorities/least_requested_test.go index ca9d569bfc9..08a3e3be09e 100644 --- a/pkg/scheduler/algorithm/priorities/least_requested_test.go +++ b/pkg/scheduler/algorithm/priorities/least_requested_test.go @@ -24,7 +24,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) func TestLeastRequested(t *testing.T) { @@ -253,7 +253,7 @@ func TestLeastRequested(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes) + nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, test.nodes) list, err := priorityFunction(LeastRequestedPriorityMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes) if err != nil { t.Errorf("unexpected error: %v", err) diff --git a/pkg/scheduler/algorithm/priorities/main_test.go b/pkg/scheduler/algorithm/priorities/main_test.go deleted file mode 100644 index 8ca621ecd52..00000000000 --- a/pkg/scheduler/algorithm/priorities/main_test.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package priorities - -import ( - "testing" - - utilfeature "k8s.io/apiserver/pkg/util/feature" - utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" - _ "k8s.io/kubernetes/pkg/features" -) - -func TestMain(m *testing.M) { - utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run) -} diff --git a/pkg/scheduler/algorithm/priorities/metadata.go b/pkg/scheduler/algorithm/priorities/metadata.go index 0771cdca83d..b860c496959 100644 --- a/pkg/scheduler/algorithm/priorities/metadata.go +++ b/pkg/scheduler/algorithm/priorities/metadata.go @@ -21,7 +21,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/kubernetes/pkg/scheduler/algorithm" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) // PriorityMetadataFactory is a factory to produce PriorityMetadata. @@ -45,7 +45,7 @@ func NewPriorityMetadataFactory(serviceLister algorithm.ServiceLister, controlle // priorityMetadata is a type that is passed as metadata for priority functions type priorityMetadata struct { - nonZeroRequest *schedulercache.Resource + nonZeroRequest *schedulernodeinfo.Resource podTolerations []v1.Toleration affinity *v1.Affinity podSelectors []labels.Selector @@ -55,7 +55,7 @@ type priorityMetadata struct { } // PriorityMetadata is a PriorityMetadataProducer. Node info can be nil. -func (pmf *PriorityMetadataFactory) PriorityMetadata(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{} { +func (pmf *PriorityMetadataFactory) PriorityMetadata(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) interface{} { // If we cannot compute metadata, just return nil if pod == nil { return nil diff --git a/pkg/scheduler/algorithm/priorities/metadata_test.go b/pkg/scheduler/algorithm/priorities/metadata_test.go index a778df6f649..9a992b5797f 100644 --- a/pkg/scheduler/algorithm/priorities/metadata_test.go +++ b/pkg/scheduler/algorithm/priorities/metadata_test.go @@ -25,16 +25,16 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing" ) func TestPriorityMetadata(t *testing.T) { - nonZeroReqs := &schedulercache.Resource{} + nonZeroReqs := &schedulernodeinfo.Resource{} nonZeroReqs.MilliCPU = priorityutil.DefaultMilliCPURequest nonZeroReqs.Memory = priorityutil.DefaultMemoryRequest - specifiedReqs := &schedulercache.Resource{} + specifiedReqs := &schedulernodeinfo.Resource{} specifiedReqs.MilliCPU = 200 specifiedReqs.Memory = 2000 diff --git a/pkg/scheduler/algorithm/priorities/most_requested.go b/pkg/scheduler/algorithm/priorities/most_requested.go index f1cc7c6ad5c..ef9dd3a7283 100644 --- a/pkg/scheduler/algorithm/priorities/most_requested.go +++ b/pkg/scheduler/algorithm/priorities/most_requested.go @@ -18,7 +18,7 @@ package priorities import ( schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) var ( @@ -31,7 +31,7 @@ var ( MostRequestedPriorityMap = mostResourcePriority.PriorityMap ) -func mostResourceScorer(requested, allocable *schedulercache.Resource, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 { +func mostResourceScorer(requested, allocable *schedulernodeinfo.Resource, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 { return (mostRequestedScore(requested.MilliCPU, allocable.MilliCPU) + mostRequestedScore(requested.Memory, allocable.Memory)) / 2 } diff --git a/pkg/scheduler/algorithm/priorities/most_requested_test.go b/pkg/scheduler/algorithm/priorities/most_requested_test.go index 94262d32cbc..af53ef47424 100644 --- a/pkg/scheduler/algorithm/priorities/most_requested_test.go +++ b/pkg/scheduler/algorithm/priorities/most_requested_test.go @@ -24,7 +24,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) func TestMostRequested(t *testing.T) { @@ -210,7 +210,7 @@ func TestMostRequested(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes) + nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, test.nodes) list, err := priorityFunction(MostRequestedPriorityMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes) if err != nil { t.Errorf("unexpected error: %v", err) diff --git a/pkg/scheduler/algorithm/priorities/node_affinity.go b/pkg/scheduler/algorithm/priorities/node_affinity.go index e274e170921..f9db3fec117 100644 --- a/pkg/scheduler/algorithm/priorities/node_affinity.go +++ b/pkg/scheduler/algorithm/priorities/node_affinity.go @@ -23,7 +23,7 @@ import ( "k8s.io/apimachinery/pkg/labels" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) // CalculateNodeAffinityPriorityMap prioritizes nodes according to node affinity scheduling preferences @@ -31,7 +31,7 @@ import ( // it will a get an add of preferredSchedulingTerm.Weight. Thus, the more preferredSchedulingTerms // the node satisfies and the more the preferredSchedulingTerm that is satisfied weights, the higher // score the node gets. -func CalculateNodeAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { +func CalculateNodeAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) { node := nodeInfo.Node() if node == nil { return schedulerapi.HostPriority{}, fmt.Errorf("node not found") diff --git a/pkg/scheduler/algorithm/priorities/node_affinity_test.go b/pkg/scheduler/algorithm/priorities/node_affinity_test.go index 8ba1cb1e8c0..6425047df46 100644 --- a/pkg/scheduler/algorithm/priorities/node_affinity_test.go +++ b/pkg/scheduler/algorithm/priorities/node_affinity_test.go @@ -23,7 +23,7 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) func TestNodeAffinityPriority(t *testing.T) { @@ -167,7 +167,7 @@ func TestNodeAffinityPriority(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(nil, test.nodes) + nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(nil, test.nodes) nap := priorityFunction(CalculateNodeAffinityPriorityMap, CalculateNodeAffinityPriorityReduce, nil) list, err := nap(test.pod, nodeNameToInfo, test.nodes) if err != nil { diff --git a/pkg/scheduler/algorithm/priorities/node_label.go b/pkg/scheduler/algorithm/priorities/node_label.go index 82505f788ac..c3ddb1af51a 100644 --- a/pkg/scheduler/algorithm/priorities/node_label.go +++ b/pkg/scheduler/algorithm/priorities/node_label.go @@ -23,7 +23,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/kubernetes/pkg/scheduler/algorithm" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) // NodeLabelPrioritizer contains information to calculate node label priority. @@ -44,7 +44,7 @@ func NewNodeLabelPriority(label string, presence bool) (algorithm.PriorityMapFun // CalculateNodeLabelPriorityMap checks whether a particular label exists on a node or not, regardless of its value. // If presence is true, prioritizes nodes that have the specified label, regardless of value. // If presence is false, prioritizes nodes that do not have the specified label. -func (n *NodeLabelPrioritizer) CalculateNodeLabelPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { +func (n *NodeLabelPrioritizer) CalculateNodeLabelPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) { node := nodeInfo.Node() if node == nil { return schedulerapi.HostPriority{}, fmt.Errorf("node not found") diff --git a/pkg/scheduler/algorithm/priorities/node_label_test.go b/pkg/scheduler/algorithm/priorities/node_label_test.go index f713d02f34a..0bde8e100a9 100644 --- a/pkg/scheduler/algorithm/priorities/node_label_test.go +++ b/pkg/scheduler/algorithm/priorities/node_label_test.go @@ -24,7 +24,7 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) func TestNewNodeLabelPriority(t *testing.T) { @@ -108,7 +108,7 @@ func TestNewNodeLabelPriority(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(nil, test.nodes) + nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(nil, test.nodes) labelPrioritizer := &NodeLabelPrioritizer{ label: test.label, presence: test.presence, diff --git a/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods.go b/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods.go index 810face510b..8af4ce15c04 100644 --- a/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods.go +++ b/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods.go @@ -23,12 +23,12 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) // CalculateNodePreferAvoidPodsPriorityMap priorities nodes according to the node annotation // "scheduler.alpha.kubernetes.io/preferAvoidPods". -func CalculateNodePreferAvoidPodsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { +func CalculateNodePreferAvoidPodsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) { node := nodeInfo.Node() if node == nil { return schedulerapi.HostPriority{}, fmt.Errorf("node not found") diff --git a/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods_test.go b/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods_test.go index c9b8c9365e6..b7a2059309e 100644 --- a/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods_test.go +++ b/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods_test.go @@ -24,7 +24,7 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) func TestNodePreferAvoidPriority(t *testing.T) { @@ -142,7 +142,7 @@ func TestNodePreferAvoidPriority(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(nil, test.nodes) + nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(nil, test.nodes) list, err := priorityFunction(CalculateNodePreferAvoidPodsPriorityMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes) if err != nil { t.Errorf("unexpected error: %v", err) diff --git a/pkg/scheduler/algorithm/priorities/reduce.go b/pkg/scheduler/algorithm/priorities/reduce.go index 6a6a7427f1d..aaeb9add87b 100644 --- a/pkg/scheduler/algorithm/priorities/reduce.go +++ b/pkg/scheduler/algorithm/priorities/reduce.go @@ -20,7 +20,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/kubernetes/pkg/scheduler/algorithm" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) // NormalizeReduce generates a PriorityReduceFunction that can normalize the result @@ -30,7 +30,7 @@ func NormalizeReduce(maxPriority int, reverse bool) algorithm.PriorityReduceFunc return func( _ *v1.Pod, _ interface{}, - _ map[string]*schedulercache.NodeInfo, + _ map[string]*schedulernodeinfo.NodeInfo, result schedulerapi.HostPriorityList) error { var maxCount int diff --git a/pkg/scheduler/algorithm/priorities/requested_to_capacity_ratio.go b/pkg/scheduler/algorithm/priorities/requested_to_capacity_ratio.go index a6ac7a837e8..9337404dd75 100644 --- a/pkg/scheduler/algorithm/priorities/requested_to_capacity_ratio.go +++ b/pkg/scheduler/algorithm/priorities/requested_to_capacity_ratio.go @@ -20,7 +20,7 @@ import ( "fmt" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) // FunctionShape represents shape of scoring function. @@ -98,7 +98,7 @@ func RequestedToCapacityRatioResourceAllocationPriority(scoringFunctionShape Fun return &ResourceAllocationPriority{"RequestedToCapacityRatioResourceAllocationPriority", buildRequestedToCapacityRatioScorerFunction(scoringFunctionShape)} } -func buildRequestedToCapacityRatioScorerFunction(scoringFunctionShape FunctionShape) func(*schedulercache.Resource, *schedulercache.Resource, bool, int, int) int64 { +func buildRequestedToCapacityRatioScorerFunction(scoringFunctionShape FunctionShape) func(*schedulernodeinfo.Resource, *schedulernodeinfo.Resource, bool, int, int) int64 { rawScoringFunction := buildBrokenLinearFunction(scoringFunctionShape) resourceScoringFunction := func(requested, capacity int64) int64 { @@ -109,7 +109,7 @@ func buildRequestedToCapacityRatioScorerFunction(scoringFunctionShape FunctionSh return rawScoringFunction(maxUtilization - (capacity-requested)*maxUtilization/capacity) } - return func(requested, allocable *schedulercache.Resource, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 { + return func(requested, allocable *schedulernodeinfo.Resource, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 { cpuScore := resourceScoringFunction(requested.MilliCPU, allocable.MilliCPU) memoryScore := resourceScoringFunction(requested.Memory, allocable.Memory) return (cpuScore + memoryScore) / 2 diff --git a/pkg/scheduler/algorithm/priorities/requested_to_capacity_ratio_test.go b/pkg/scheduler/algorithm/priorities/requested_to_capacity_ratio_test.go index 0d360c8d444..9e520d61e5f 100644 --- a/pkg/scheduler/algorithm/priorities/requested_to_capacity_ratio_test.go +++ b/pkg/scheduler/algorithm/priorities/requested_to_capacity_ratio_test.go @@ -25,7 +25,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) func TestCreatingFunctionShapeErrorsIfEmptyPoints(t *testing.T) { @@ -229,7 +229,7 @@ func TestRequestedToCapacityRatio(t *testing.T) { newPod := buildResourcesPod("", test.requested) - nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(scheduledPods, nodes) + nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(scheduledPods, nodes) list, err := priorityFunction(RequestedToCapacityRatioResourceAllocationPriorityDefault().PriorityMap, nil, nil)(newPod, nodeNameToInfo, nodes) if err != nil { t.Errorf("unexpected error: %v", err) diff --git a/pkg/scheduler/algorithm/priorities/resource_allocation.go b/pkg/scheduler/algorithm/priorities/resource_allocation.go index 027eabae5ff..fea2e680697 100644 --- a/pkg/scheduler/algorithm/priorities/resource_allocation.go +++ b/pkg/scheduler/algorithm/priorities/resource_allocation.go @@ -25,13 +25,13 @@ import ( "k8s.io/kubernetes/pkg/features" priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) // ResourceAllocationPriority contains information to calculate resource allocation priority. type ResourceAllocationPriority struct { Name string - scorer func(requested, allocable *schedulercache.Resource, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 + scorer func(requested, allocable *schedulernodeinfo.Resource, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 } // PriorityMap priorities nodes according to the resource allocations on the node. @@ -39,14 +39,14 @@ type ResourceAllocationPriority struct { func (r *ResourceAllocationPriority) PriorityMap( pod *v1.Pod, meta interface{}, - nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { + nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) { node := nodeInfo.Node() if node == nil { return schedulerapi.HostPriority{}, fmt.Errorf("node not found") } allocatable := nodeInfo.AllocatableResource() - var requested schedulercache.Resource + var requested schedulernodeinfo.Resource if priorityMeta, ok := meta.(*priorityMetadata); ok { requested = *priorityMeta.nonZeroRequest } else { @@ -91,8 +91,8 @@ func (r *ResourceAllocationPriority) PriorityMap( }, nil } -func getNonZeroRequests(pod *v1.Pod) *schedulercache.Resource { - result := &schedulercache.Resource{} +func getNonZeroRequests(pod *v1.Pod) *schedulernodeinfo.Resource { + result := &schedulernodeinfo.Resource{} for i := range pod.Spec.Containers { container := &pod.Spec.Containers[i] cpu, memory := priorityutil.GetNonzeroRequests(&container.Resources.Requests) diff --git a/pkg/scheduler/algorithm/priorities/resource_limits.go b/pkg/scheduler/algorithm/priorities/resource_limits.go index 82b803cbf72..1344dc1eec4 100644 --- a/pkg/scheduler/algorithm/priorities/resource_limits.go +++ b/pkg/scheduler/algorithm/priorities/resource_limits.go @@ -21,7 +21,7 @@ import ( "k8s.io/api/core/v1" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" "k8s.io/klog" ) @@ -33,7 +33,7 @@ import ( // of the pod are satisfied, the node is assigned a score of 1. // Rationale of choosing the lowest score of 1 is that this is mainly selected to break ties between nodes that have // same scores assigned by one of least and most requested priority functions. -func ResourceLimitsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { +func ResourceLimitsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) { node := nodeInfo.Node() if node == nil { return schedulerapi.HostPriority{}, fmt.Errorf("node not found") @@ -82,10 +82,10 @@ func computeScore(limit, allocatable int64) int64 { // getResourceLimits computes resource limits for input pod. // The reason to create this new function is to be consistent with other // priority functions because most or perhaps all priority functions work -// with schedulercache.Resource. +// with schedulernodeinfo.Resource. // TODO: cache it as part of metadata passed to priority functions. -func getResourceLimits(pod *v1.Pod) *schedulercache.Resource { - result := &schedulercache.Resource{} +func getResourceLimits(pod *v1.Pod) *schedulernodeinfo.Resource { + result := &schedulernodeinfo.Resource{} for _, container := range pod.Spec.Containers { result.Add(container.Resources.Limits) } diff --git a/pkg/scheduler/algorithm/priorities/resource_limits_test.go b/pkg/scheduler/algorithm/priorities/resource_limits_test.go index 86ecd8cca95..1e3782b014c 100644 --- a/pkg/scheduler/algorithm/priorities/resource_limits_test.go +++ b/pkg/scheduler/algorithm/priorities/resource_limits_test.go @@ -24,7 +24,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" //metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) func TestResourceLimistPriority(t *testing.T) { @@ -139,7 +139,7 @@ func TestResourceLimistPriority(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(nil, test.nodes) + nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(nil, test.nodes) list, err := priorityFunction(ResourceLimitsPriorityMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes) if err != nil { t.Errorf("unexpected error: %v", err) diff --git a/pkg/scheduler/algorithm/priorities/selector_spreading.go b/pkg/scheduler/algorithm/priorities/selector_spreading.go index 1371d765a53..06cdc6edbbf 100644 --- a/pkg/scheduler/algorithm/priorities/selector_spreading.go +++ b/pkg/scheduler/algorithm/priorities/selector_spreading.go @@ -23,7 +23,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/kubernetes/pkg/scheduler/algorithm" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" utilnode "k8s.io/kubernetes/pkg/util/node" "k8s.io/klog" @@ -63,7 +63,7 @@ func NewSelectorSpreadPriority( // It favors nodes that have fewer existing matching pods. // i.e. it pushes the scheduler towards a node where there's the smallest number of // pods which match the same service, RC,RSs or StatefulSets selectors as the pod being scheduled. -func (s *SelectorSpread) CalculateSpreadPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { +func (s *SelectorSpread) CalculateSpreadPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) { var selectors []labels.Selector node := nodeInfo.Node() if node == nil { @@ -114,7 +114,7 @@ func (s *SelectorSpread) CalculateSpreadPriorityMap(pod *v1.Pod, meta interface{ // based on the number of existing matching pods on the node // where zone information is included on the nodes, it favors nodes // in zones with fewer existing matching pods. -func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulercache.NodeInfo, result schedulerapi.HostPriorityList) error { +func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, result schedulerapi.HostPriorityList) error { countsByZone := make(map[string]int, 10) maxCountByZone := int(0) maxCountByNodeName := int(0) @@ -202,7 +202,7 @@ func (s *ServiceAntiAffinity) getNodeClassificationByLabels(nodes []*v1.Node) (m } // filteredPod get pods based on namespace and selector -func filteredPod(namespace string, selector labels.Selector, nodeInfo *schedulercache.NodeInfo) (pods []*v1.Pod) { +func filteredPod(namespace string, selector labels.Selector, nodeInfo *schedulernodeinfo.NodeInfo) (pods []*v1.Pod) { if nodeInfo.Pods() == nil || len(nodeInfo.Pods()) == 0 || selector == nil { return []*v1.Pod{} } @@ -218,7 +218,7 @@ func filteredPod(namespace string, selector labels.Selector, nodeInfo *scheduler // CalculateAntiAffinityPriorityMap spreads pods by minimizing the number of pods belonging to the same service // on given machine -func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { +func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) { var firstServiceSelector labels.Selector node := nodeInfo.Node() @@ -242,7 +242,7 @@ func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityMap(pod *v1.Pod, meta // CalculateAntiAffinityPriorityReduce computes each node score with the same value for a particular label. // The label to be considered is provided to the struct (ServiceAntiAffinity). -func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityReduce(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulercache.NodeInfo, result schedulerapi.HostPriorityList) error { +func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityReduce(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, result schedulerapi.HostPriorityList) error { var numServicePods int var label string podCounts := map[string]int{} diff --git a/pkg/scheduler/algorithm/priorities/selector_spreading_test.go b/pkg/scheduler/algorithm/priorities/selector_spreading_test.go index 954f7900683..e76d879aef9 100644 --- a/pkg/scheduler/algorithm/priorities/selector_spreading_test.go +++ b/pkg/scheduler/algorithm/priorities/selector_spreading_test.go @@ -26,7 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing" ) @@ -339,7 +339,7 @@ func TestSelectorSpreadPriority(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, makeNodeList(test.nodes)) + nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, makeNodeList(test.nodes)) selectorSpread := SelectorSpread{ serviceLister: schedulertesting.FakeServiceLister(test.services), controllerLister: schedulertesting.FakeControllerLister(test.rcs), @@ -575,7 +575,7 @@ func TestZoneSelectorSpreadPriority(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, makeLabeledNodeList(labeledNodes)) + nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, makeLabeledNodeList(labeledNodes)) selectorSpread := SelectorSpread{ serviceLister: schedulertesting.FakeServiceLister(test.services), controllerLister: schedulertesting.FakeControllerLister(test.rcs), @@ -767,7 +767,7 @@ func TestZoneSpreadPriority(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, makeLabeledNodeList(test.nodes)) + nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, makeLabeledNodeList(test.nodes)) zoneSpread := ServiceAntiAffinity{podLister: schedulertesting.FakePodLister(test.pods), serviceLister: schedulertesting.FakeServiceLister(test.services), label: "zone"} metaDataProducer := NewPriorityMetadataFactory( diff --git a/pkg/scheduler/algorithm/priorities/taint_toleration.go b/pkg/scheduler/algorithm/priorities/taint_toleration.go index 5790a4b091a..85be011cabe 100644 --- a/pkg/scheduler/algorithm/priorities/taint_toleration.go +++ b/pkg/scheduler/algorithm/priorities/taint_toleration.go @@ -22,7 +22,7 @@ import ( "k8s.io/api/core/v1" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) // CountIntolerableTaintsPreferNoSchedule gives the count of intolerable taints of a pod with effect PreferNoSchedule @@ -52,7 +52,7 @@ func getAllTolerationPreferNoSchedule(tolerations []v1.Toleration) (tolerationLi } // ComputeTaintTolerationPriorityMap prepares the priority list for all the nodes based on the number of intolerable taints on the node -func ComputeTaintTolerationPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { +func ComputeTaintTolerationPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) { node := nodeInfo.Node() if node == nil { return schedulerapi.HostPriority{}, fmt.Errorf("node not found") diff --git a/pkg/scheduler/algorithm/priorities/taint_toleration_test.go b/pkg/scheduler/algorithm/priorities/taint_toleration_test.go index 70b69aed6de..e093692eb0b 100644 --- a/pkg/scheduler/algorithm/priorities/taint_toleration_test.go +++ b/pkg/scheduler/algorithm/priorities/taint_toleration_test.go @@ -23,7 +23,7 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) func nodeWithTaints(nodeName string, taints []v1.Taint) *v1.Node { @@ -227,7 +227,7 @@ func TestTaintAndToleration(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(nil, test.nodes) + nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(nil, test.nodes) ttp := priorityFunction(ComputeTaintTolerationPriorityMap, ComputeTaintTolerationPriorityReduce, nil) list, err := ttp(test.pod, nodeNameToInfo, test.nodes) if err != nil { diff --git a/pkg/scheduler/algorithm/priorities/test_util.go b/pkg/scheduler/algorithm/priorities/test_util.go index da85c6b391b..d154fca7526 100644 --- a/pkg/scheduler/algorithm/priorities/test_util.go +++ b/pkg/scheduler/algorithm/priorities/test_util.go @@ -22,7 +22,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/scheduler/algorithm" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) func makeNode(node string, milliCPU, memory int64) *v1.Node { @@ -42,7 +42,7 @@ func makeNode(node string, milliCPU, memory int64) *v1.Node { } func priorityFunction(mapFn algorithm.PriorityMapFunction, reduceFn algorithm.PriorityReduceFunction, metaData interface{}) algorithm.PriorityFunction { - return func(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { + return func(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { result := make(schedulerapi.HostPriorityList, 0, len(nodes)) for i := range nodes { hostResult, err := mapFn(pod, metaData, nodeNameToInfo[nodes[i].Name]) diff --git a/pkg/scheduler/algorithm/scheduler_interface.go b/pkg/scheduler/algorithm/scheduler_interface.go index d74af089d35..81dedd42928 100644 --- a/pkg/scheduler/algorithm/scheduler_interface.go +++ b/pkg/scheduler/algorithm/scheduler_interface.go @@ -19,7 +19,7 @@ package algorithm import ( "k8s.io/api/core/v1" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) // SchedulerExtender is an interface for external processes to influence scheduling @@ -33,7 +33,7 @@ type SchedulerExtender interface { // expected to be a subset of the supplied list. failedNodesMap optionally contains // the list of failed nodes and failure reasons. Filter(pod *v1.Pod, - nodes []*v1.Node, nodeNameToInfo map[string]*schedulercache.NodeInfo, + nodes []*v1.Node, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, ) (filteredNodes []*v1.Node, failedNodesMap schedulerapi.FailedNodesMap, err error) // Prioritize based on extender-implemented priority functions. The returned scores & weight @@ -62,7 +62,7 @@ type SchedulerExtender interface { ProcessPreemption( pod *v1.Pod, nodeToVictims map[*v1.Node]*schedulerapi.Victims, - nodeNameToInfo map[string]*schedulercache.NodeInfo, + nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, ) (map[*v1.Node]*schedulerapi.Victims, error) // SupportsPreemption returns if the scheduler extender support preemption or not. @@ -72,20 +72,3 @@ type SchedulerExtender interface { // is unavailable. This gives scheduler ability to fail fast and tolerate non-critical extenders as well. IsIgnorable() bool } - -// ScheduleAlgorithm is an interface implemented by things that know how to schedule pods -// onto machines. -type ScheduleAlgorithm interface { - Schedule(*v1.Pod, NodeLister) (selectedMachine string, err error) - // Preempt receives scheduling errors for a pod and tries to create room for - // the pod by preempting lower priority pods if possible. - // It returns the node where preemption happened, a list of preempted pods, a - // list of pods whose nominated node name should be removed, and error if any. - Preempt(*v1.Pod, NodeLister, error) (selectedNode *v1.Node, preemptedPods []*v1.Pod, cleanupNominatedPods []*v1.Pod, err error) - // Predicates() returns a pointer to a map of predicate functions. This is - // exposed for testing. - Predicates() map[string]FitPredicate - // Prioritizers returns a slice of priority config. This is exposed for - // testing. - Prioritizers() []PriorityConfig -} diff --git a/pkg/scheduler/algorithm/types.go b/pkg/scheduler/algorithm/types.go index 835a5a0bfb8..e4f479d1e61 100644 --- a/pkg/scheduler/algorithm/types.go +++ b/pkg/scheduler/algorithm/types.go @@ -22,8 +22,8 @@ import ( policyv1beta1 "k8s.io/api/policy/v1beta1" "k8s.io/apimachinery/pkg/labels" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) // NodeFieldSelectorKeys is a map that: the key are node field selector keys; the values are @@ -34,30 +34,30 @@ var NodeFieldSelectorKeys = map[string]func(*v1.Node) string{ // FitPredicate is a function that indicates if a pod fits into an existing node. // The failure information is given by the error. -type FitPredicate func(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []PredicateFailureReason, error) +type FitPredicate func(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) // PriorityMapFunction is a function that computes per-node results for a given node. // TODO: Figure out the exact API of this method. // TODO: Change interface{} to a specific type. -type PriorityMapFunction func(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) +type PriorityMapFunction func(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) // PriorityReduceFunction is a function that aggregated per-node results and computes // final scores for all nodes. // TODO: Figure out the exact API of this method. // TODO: Change interface{} to a specific type. -type PriorityReduceFunction func(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulercache.NodeInfo, result schedulerapi.HostPriorityList) error +type PriorityReduceFunction func(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, result schedulerapi.HostPriorityList) error // PredicateMetadataProducer is a function that computes predicate metadata for a given pod. -type PredicateMetadataProducer func(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) PredicateMetadata +type PredicateMetadataProducer func(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) PredicateMetadata // PriorityMetadataProducer is a function that computes metadata for a given pod. This // is now used for only for priority functions. For predicates please use PredicateMetadataProducer. -type PriorityMetadataProducer func(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{} +type PriorityMetadataProducer func(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) interface{} // PriorityFunction is a function that computes scores for all nodes. // DEPRECATED // Use Map-Reduce pattern for priority functions. -type PriorityFunction func(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) +type PriorityFunction func(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) // PriorityConfig is a config used for a priority function. type PriorityConfig struct { @@ -71,12 +71,12 @@ type PriorityConfig struct { } // EmptyPredicateMetadataProducer returns a no-op MetadataProducer type. -func EmptyPredicateMetadataProducer(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) PredicateMetadata { +func EmptyPredicateMetadataProducer(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) PredicateMetadata { return nil } // EmptyPriorityMetadataProducer returns a no-op PriorityMetadataProducer type. -func EmptyPriorityMetadataProducer(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{} { +func EmptyPriorityMetadataProducer(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) interface{} { return nil } @@ -174,6 +174,6 @@ func (f EmptyStatefulSetLister) GetPodStatefulSets(pod *v1.Pod) (sss []*apps.Sta // PredicateMetadata interface represents anything that can access a predicate metadata. type PredicateMetadata interface { ShallowCopy() PredicateMetadata - AddPod(addedPod *v1.Pod, nodeInfo *schedulercache.NodeInfo) error + AddPod(addedPod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) error RemovePod(deletedPod *v1.Pod) error } diff --git a/pkg/scheduler/algorithm/types_test.go b/pkg/scheduler/algorithm/types_test.go index e613c8c5028..9f117240d3f 100644 --- a/pkg/scheduler/algorithm/types_test.go +++ b/pkg/scheduler/algorithm/types_test.go @@ -21,7 +21,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) // EmptyPriorityMetadataProducer should returns a no-op PriorityMetadataProducer type. @@ -29,9 +29,9 @@ func TestEmptyPriorityMetadataProducer(t *testing.T) { fakePod := new(v1.Pod) fakeLabelSelector := labels.SelectorFromSet(labels.Set{"foo": "bar"}) - nodeNameToInfo := map[string]*schedulercache.NodeInfo{ - "2": schedulercache.NewNodeInfo(fakePod), - "1": schedulercache.NewNodeInfo(), + nodeNameToInfo := map[string]*schedulernodeinfo.NodeInfo{ + "2": schedulernodeinfo.NewNodeInfo(fakePod), + "1": schedulernodeinfo.NewNodeInfo(), } // Test EmptyPriorityMetadataProducer metadata := EmptyPriorityMetadataProducer(fakePod, nodeNameToInfo) diff --git a/pkg/scheduler/algorithmprovider/BUILD b/pkg/scheduler/algorithmprovider/BUILD index 1e3a118c9f2..c06cbd716e4 100644 --- a/pkg/scheduler/algorithmprovider/BUILD +++ b/pkg/scheduler/algorithmprovider/BUILD @@ -15,10 +15,7 @@ go_library( go_test( name = "go_default_test", - srcs = [ - "main_test.go", - "plugins_test.go", - ], + srcs = ["plugins_test.go"], embed = [":go_default_library"], deps = [ "//pkg/features:go_default_library", diff --git a/pkg/scheduler/algorithmprovider/main_test.go b/pkg/scheduler/algorithmprovider/main_test.go deleted file mode 100644 index 73e322ffdac..00000000000 --- a/pkg/scheduler/algorithmprovider/main_test.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package algorithmprovider - -import ( - "testing" - - utilfeature "k8s.io/apiserver/pkg/util/feature" - utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" - _ "k8s.io/kubernetes/pkg/features" -) - -func TestMain(m *testing.M) { - utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run) -} diff --git a/pkg/scheduler/api/validation/validation_test.go b/pkg/scheduler/api/validation/validation_test.go index 9c3a5cd355d..d81e90a3156 100644 --- a/pkg/scheduler/api/validation/validation_test.go +++ b/pkg/scheduler/api/validation/validation_test.go @@ -28,44 +28,55 @@ func TestValidatePolicy(t *testing.T) { tests := []struct { policy api.Policy expected error + name string }{ { + name: "no weight defined in policy", policy: api.Policy{Priorities: []api.PriorityPolicy{{Name: "NoWeightPriority"}}}, expected: errors.New("Priority NoWeightPriority should have a positive weight applied to it or it has overflown"), }, { + name: "policy weight is not positive", policy: api.Policy{Priorities: []api.PriorityPolicy{{Name: "NoWeightPriority", Weight: 0}}}, expected: errors.New("Priority NoWeightPriority should have a positive weight applied to it or it has overflown"), }, { + name: "valid weight priority", policy: api.Policy{Priorities: []api.PriorityPolicy{{Name: "WeightPriority", Weight: 2}}}, expected: nil, }, { + name: "invalid negative weight policy", policy: api.Policy{Priorities: []api.PriorityPolicy{{Name: "WeightPriority", Weight: -2}}}, expected: errors.New("Priority WeightPriority should have a positive weight applied to it or it has overflown"), }, { + name: "policy weight exceeds maximum", policy: api.Policy{Priorities: []api.PriorityPolicy{{Name: "WeightPriority", Weight: api.MaxWeight}}}, expected: errors.New("Priority WeightPriority should have a positive weight applied to it or it has overflown"), }, { + name: "valid weight in policy extender config", policy: api.Policy{ExtenderConfigs: []api.ExtenderConfig{{URLPrefix: "http://127.0.0.1:8081/extender", PrioritizeVerb: "prioritize", Weight: 2}}}, expected: nil, }, { + name: "invalid negative weight in policy extender config", policy: api.Policy{ExtenderConfigs: []api.ExtenderConfig{{URLPrefix: "http://127.0.0.1:8081/extender", PrioritizeVerb: "prioritize", Weight: -2}}}, expected: errors.New("Priority for extender http://127.0.0.1:8081/extender should have a positive weight applied to it"), }, { + name: "valid filter verb and url prefix", policy: api.Policy{ExtenderConfigs: []api.ExtenderConfig{{URLPrefix: "http://127.0.0.1:8081/extender", FilterVerb: "filter"}}}, expected: nil, }, { + name: "valid preemt verb and urlprefix", policy: api.Policy{ExtenderConfigs: []api.ExtenderConfig{{URLPrefix: "http://127.0.0.1:8081/extender", PreemptVerb: "preempt"}}}, expected: nil, }, { + name: "invalid multiple extenders", policy: api.Policy{ ExtenderConfigs: []api.ExtenderConfig{ {URLPrefix: "http://127.0.0.1:8081/extender", BindVerb: "bind"}, @@ -74,6 +85,7 @@ func TestValidatePolicy(t *testing.T) { expected: errors.New("Only one extender can implement bind, found 2"), }, { + name: "invalid duplicate extender resource name", policy: api.Policy{ ExtenderConfigs: []api.ExtenderConfig{ {URLPrefix: "http://127.0.0.1:8081/extender", ManagedResources: []api.ExtenderManagedResource{{Name: "foo.com/bar"}}}, @@ -82,6 +94,7 @@ func TestValidatePolicy(t *testing.T) { expected: errors.New("Duplicate extender managed resource name foo.com/bar"), }, { + name: "invalid extended resource name", policy: api.Policy{ ExtenderConfigs: []api.ExtenderConfig{ {URLPrefix: "http://127.0.0.1:8081/extender", ManagedResources: []api.ExtenderManagedResource{{Name: "kubernetes.io/foo"}}}, @@ -91,9 +104,11 @@ func TestValidatePolicy(t *testing.T) { } for _, test := range tests { - actual := ValidatePolicy(test.policy) - if fmt.Sprint(test.expected) != fmt.Sprint(actual) { - t.Errorf("expected: %s, actual: %s", test.expected, actual) - } + t.Run(test.name, func(t *testing.T) { + actual := ValidatePolicy(test.policy) + if fmt.Sprint(test.expected) != fmt.Sprint(actual) { + t.Errorf("expected: %s, actual: %s", test.expected, actual) + } + }) } } diff --git a/pkg/scheduler/core/BUILD b/pkg/scheduler/core/BUILD index 7df943d3f7e..fa8497b6e7d 100644 --- a/pkg/scheduler/core/BUILD +++ b/pkg/scheduler/core/BUILD @@ -12,11 +12,12 @@ go_library( "//pkg/scheduler/algorithm:go_default_library", "//pkg/scheduler/algorithm/predicates:go_default_library", "//pkg/scheduler/api:go_default_library", - "//pkg/scheduler/cache:go_default_library", "//pkg/scheduler/core/equivalence:go_default_library", "//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/internal/queue:go_default_library", "//pkg/scheduler/metrics:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", + "//pkg/scheduler/plugins/v1alpha1:go_default_library", "//pkg/scheduler/util:go_default_library", "//pkg/scheduler/volumebinder:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -47,10 +48,11 @@ go_test( "//pkg/scheduler/algorithm/priorities:go_default_library", "//pkg/scheduler/algorithm/priorities/util:go_default_library", "//pkg/scheduler/api:go_default_library", - "//pkg/scheduler/cache:go_default_library", "//pkg/scheduler/core/equivalence:go_default_library", "//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/internal/queue:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", + "//pkg/scheduler/plugins/v1alpha1:go_default_library", "//pkg/scheduler/testing:go_default_library", "//pkg/scheduler/util:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library", diff --git a/pkg/scheduler/core/equivalence/BUILD b/pkg/scheduler/core/equivalence/BUILD index a6ad12ff724..7cdad3cb51a 100644 --- a/pkg/scheduler/core/equivalence/BUILD +++ b/pkg/scheduler/core/equivalence/BUILD @@ -9,8 +9,8 @@ go_library( "//pkg/features:go_default_library", "//pkg/scheduler/algorithm:go_default_library", "//pkg/scheduler/algorithm/predicates:go_default_library", - "//pkg/scheduler/cache:go_default_library", "//pkg/scheduler/metrics:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", "//pkg/util/hash:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", @@ -26,7 +26,7 @@ go_test( deps = [ "//pkg/scheduler/algorithm:go_default_library", "//pkg/scheduler/algorithm/predicates:go_default_library", - "//pkg/scheduler/cache:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/scheduler/core/equivalence/eqivalence.go b/pkg/scheduler/core/equivalence/eqivalence.go index d776981999b..12f2eed72c5 100644 --- a/pkg/scheduler/core/equivalence/eqivalence.go +++ b/pkg/scheduler/core/equivalence/eqivalence.go @@ -30,8 +30,8 @@ import ( "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" "k8s.io/kubernetes/pkg/scheduler/metrics" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" hashutil "k8s.io/kubernetes/pkg/util/hash" ) @@ -283,7 +283,7 @@ func (n *NodeCache) RunPredicate( predicateID int, pod *v1.Pod, meta algorithm.PredicateMetadata, - nodeInfo *schedulercache.NodeInfo, + nodeInfo *schedulernodeinfo.NodeInfo, equivClass *Class, ) (bool, []algorithm.PredicateFailureReason, error) { if nodeInfo == nil || nodeInfo.Node() == nil { @@ -310,7 +310,7 @@ func (n *NodeCache) updateResult( fit bool, reasons []algorithm.PredicateFailureReason, equivalenceHash uint64, - nodeInfo *schedulercache.NodeInfo, + nodeInfo *schedulernodeinfo.NodeInfo, ) { if nodeInfo == nil || nodeInfo.Node() == nil { // This may happen during tests. diff --git a/pkg/scheduler/core/equivalence/eqivalence_test.go b/pkg/scheduler/core/equivalence/eqivalence_test.go index f0f70b27b54..1c5779991c7 100644 --- a/pkg/scheduler/core/equivalence/eqivalence_test.go +++ b/pkg/scheduler/core/equivalence/eqivalence_test.go @@ -27,7 +27,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) // makeBasicPod returns a Pod object with many of the fields populated. @@ -162,7 +162,7 @@ type mockPredicate struct { callCount int } -func (p *mockPredicate) predicate(*v1.Pod, algorithm.PredicateMetadata, *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func (p *mockPredicate) predicate(*v1.Pod, algorithm.PredicateMetadata, *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { p.callCount++ return p.fit, p.reasons, p.err } @@ -219,7 +219,7 @@ func TestRunPredicate(t *testing.T) { predicateID := 0 for _, test := range tests { t.Run(test.name, func(t *testing.T) { - node := schedulercache.NewNodeInfo() + node := schedulernodeinfo.NewNodeInfo() testNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "n1"}} node.SetNode(testNode) pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p1"}} @@ -323,7 +323,7 @@ func TestUpdateResult(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - node := schedulercache.NewNodeInfo() + node := schedulernodeinfo.NewNodeInfo() testNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: test.nodeName}} node.SetNode(testNode) @@ -469,7 +469,7 @@ func TestLookupResult(t *testing.T) { ecache := NewCache(predicatesOrdering) nodeCache, _ := ecache.GetNodeCache(testNode.Name) - node := schedulercache.NewNodeInfo() + node := schedulernodeinfo.NewNodeInfo() node.SetNode(testNode) // set cached item to equivalence cache nodeCache.updateResult( @@ -687,7 +687,7 @@ func TestInvalidateCachedPredicateItemOfAllNodes(t *testing.T) { ecache := NewCache(predicatesOrdering) for _, test := range tests { - node := schedulercache.NewNodeInfo() + node := schedulernodeinfo.NewNodeInfo() testNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: test.nodeName}} node.SetNode(testNode) @@ -765,7 +765,7 @@ func TestInvalidateAllCachedPredicateItemOfNode(t *testing.T) { ecache := NewCache(predicatesOrdering) for _, test := range tests { - node := schedulercache.NewNodeInfo() + node := schedulernodeinfo.NewNodeInfo() testNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: test.nodeName}} node.SetNode(testNode) diff --git a/pkg/scheduler/core/extender.go b/pkg/scheduler/core/extender.go index 9053a2e4ea2..010d48daf2d 100644 --- a/pkg/scheduler/core/extender.go +++ b/pkg/scheduler/core/extender.go @@ -30,7 +30,7 @@ import ( restclient "k8s.io/client-go/rest" "k8s.io/kubernetes/pkg/scheduler/algorithm" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) const ( @@ -128,7 +128,7 @@ func (h *HTTPExtender) SupportsPreemption() bool { func (h *HTTPExtender) ProcessPreemption( pod *v1.Pod, nodeToVictims map[*v1.Node]*schedulerapi.Victims, - nodeNameToInfo map[string]*schedulercache.NodeInfo, + nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, ) (map[*v1.Node]*schedulerapi.Victims, error) { var ( result schedulerapi.ExtenderPreemptionResult @@ -172,7 +172,7 @@ func (h *HTTPExtender) ProcessPreemption( // such as UIDs and names, to object pointers. func (h *HTTPExtender) convertToNodeToVictims( nodeNameToMetaVictims map[string]*schedulerapi.MetaVictims, - nodeNameToInfo map[string]*schedulercache.NodeInfo, + nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, ) (map[*v1.Node]*schedulerapi.Victims, error) { nodeToVictims := map[*v1.Node]*schedulerapi.Victims{} for nodeName, metaVictims := range nodeNameToMetaVictims { @@ -198,8 +198,8 @@ func (h *HTTPExtender) convertToNodeToVictims( func (h *HTTPExtender) convertPodUIDToPod( metaPod *schedulerapi.MetaPod, nodeName string, - nodeNameToInfo map[string]*schedulercache.NodeInfo) (*v1.Pod, error) { - var nodeInfo *schedulercache.NodeInfo + nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) (*v1.Pod, error) { + var nodeInfo *schedulernodeinfo.NodeInfo if nodeInfo, ok := nodeNameToInfo[nodeName]; ok { for _, pod := range nodeInfo.Pods() { if string(pod.UID) == metaPod.UID { @@ -250,7 +250,7 @@ func convertToNodeNameToVictims( // the list of failed nodes and failure reasons. func (h *HTTPExtender) Filter( pod *v1.Pod, - nodes []*v1.Node, nodeNameToInfo map[string]*schedulercache.NodeInfo, + nodes []*v1.Node, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, ) ([]*v1.Node, schedulerapi.FailedNodesMap, error) { var ( result schedulerapi.ExtenderFilterResult diff --git a/pkg/scheduler/core/extender_test.go b/pkg/scheduler/core/extender_test.go index e71f8805d67..218e2f82502 100644 --- a/pkg/scheduler/core/extender_test.go +++ b/pkg/scheduler/core/extender_test.go @@ -26,9 +26,9 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/pkg/scheduler/algorithm" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing" "k8s.io/kubernetes/pkg/scheduler/util" ) @@ -95,7 +95,7 @@ func machine2PrioritizerExtender(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.H return &result, nil } -func machine2Prioritizer(_ *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { +func machine2Prioritizer(_ *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { result := []schedulerapi.HostPriority{} for _, node := range nodes { score := 1 @@ -117,7 +117,7 @@ type FakeExtender struct { ignorable bool // Cached node information for fake extender - cachedNodeNameToInfo map[string]*schedulercache.NodeInfo + cachedNodeNameToInfo map[string]*schedulernodeinfo.NodeInfo } func (f *FakeExtender) Name() string { @@ -136,7 +136,7 @@ func (f *FakeExtender) SupportsPreemption() bool { func (f *FakeExtender) ProcessPreemption( pod *v1.Pod, nodeToVictims map[*v1.Node]*schedulerapi.Victims, - nodeNameToInfo map[string]*schedulercache.NodeInfo, + nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, ) (map[*v1.Node]*schedulerapi.Victims, error) { nodeToVictimsCopy := map[*v1.Node]*schedulerapi.Victims{} // We don't want to change the original nodeToVictims @@ -175,7 +175,7 @@ func (f *FakeExtender) ProcessPreemption( func (f *FakeExtender) selectVictimsOnNodeByExtender( pod *v1.Pod, node *v1.Node, - nodeNameToInfo map[string]*schedulercache.NodeInfo, + nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, ) ([]*v1.Pod, int, bool, error) { // If a extender support preemption but have no cached node info, let's run filter to make sure // default scheduler's decision still stand with given pod and node. @@ -264,7 +264,7 @@ func (f *FakeExtender) runPredicate(pod *v1.Pod, node *v1.Node) (bool, error) { return fits, nil } -func (f *FakeExtender) Filter(pod *v1.Pod, nodes []*v1.Node, nodeNameToInfo map[string]*schedulercache.NodeInfo) ([]*v1.Node, schedulerapi.FailedNodesMap, error) { +func (f *FakeExtender) Filter(pod *v1.Pod, nodes []*v1.Node, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) ([]*v1.Node, schedulerapi.FailedNodesMap, error) { filtered := []*v1.Node{} failedNodesMap := schedulerapi.FailedNodesMap{} for _, node := range nodes { @@ -507,7 +507,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) { for _, name := range test.nodes { cache.AddNode(createNode(name)) } - queue := internalqueue.NewSchedulingQueue() + queue := internalqueue.NewSchedulingQueue(nil) scheduler := NewGenericScheduler( cache, nil, @@ -516,6 +516,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) { algorithm.EmptyPredicateMetadataProducer, test.prioritizers, algorithm.EmptyPriorityMetadataProducer, + emptyPluginSet, extenders, nil, schedulertesting.FakePersistentVolumeClaimLister{}, diff --git a/pkg/scheduler/core/generic_scheduler.go b/pkg/scheduler/core/generic_scheduler.go index 5f163889c48..32cbe148554 100644 --- a/pkg/scheduler/core/generic_scheduler.go +++ b/pkg/scheduler/core/generic_scheduler.go @@ -39,11 +39,12 @@ import ( "k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" "k8s.io/kubernetes/pkg/scheduler/core/equivalence" schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" "k8s.io/kubernetes/pkg/scheduler/metrics" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" + pluginsv1alpha1 "k8s.io/kubernetes/pkg/scheduler/plugins/v1alpha1" "k8s.io/kubernetes/pkg/scheduler/util" "k8s.io/kubernetes/pkg/scheduler/volumebinder" ) @@ -95,6 +96,24 @@ func (f *FitError) Error() string { return reasonMsg } +// ScheduleAlgorithm is an interface implemented by things that know how to schedule pods +// onto machines. +// TODO: Rename this type. +type ScheduleAlgorithm interface { + Schedule(*v1.Pod, algorithm.NodeLister) (selectedMachine string, err error) + // Preempt receives scheduling errors for a pod and tries to create room for + // the pod by preempting lower priority pods if possible. + // It returns the node where preemption happened, a list of preempted pods, a + // list of pods whose nominated node name should be removed, and error if any. + Preempt(*v1.Pod, algorithm.NodeLister, error) (selectedNode *v1.Node, preemptedPods []*v1.Pod, cleanupNominatedPods []*v1.Pod, err error) + // Predicates() returns a pointer to a map of predicate functions. This is + // exposed for testing. + Predicates() map[string]algorithm.FitPredicate + // Prioritizers returns a slice of priority config. This is exposed for + // testing. + Prioritizers() []algorithm.PriorityConfig +} + type genericScheduler struct { cache schedulerinternalcache.Cache equivalenceCache *equivalence.Cache @@ -103,10 +122,11 @@ type genericScheduler struct { priorityMetaProducer algorithm.PriorityMetadataProducer predicateMetaProducer algorithm.PredicateMetadataProducer prioritizers []algorithm.PriorityConfig + pluginSet pluginsv1alpha1.PluginSet extenders []algorithm.SchedulerExtender lastNodeIndex uint64 alwaysCheckAllPredicates bool - cachedNodeInfoMap map[string]*schedulercache.NodeInfo + cachedNodeInfoMap map[string]*schedulernodeinfo.NodeInfo volumeBinder *volumebinder.VolumeBinder pvcLister corelisters.PersistentVolumeClaimLister pdbLister algorithm.PDBLister @@ -114,7 +134,7 @@ type genericScheduler struct { percentageOfNodesToScore int32 } -// snapshot snapshots equivalane cache and node infos for all fit and priority +// snapshot snapshots equivalence cache and node infos for all fit and priority // functions. func (g *genericScheduler) snapshot() error { // IMPORTANT NOTE: We must snapshot equivalence cache before snapshotting @@ -123,7 +143,7 @@ func (g *genericScheduler) snapshot() error { // 1. snapshot cache // 2. event arrives, updating cache and invalidating predicates or whole node cache // 3. snapshot ecache - // 4. evaludate predicates + // 4. evaluate predicates // 5. stale result will be written to ecache if g.equivalenceCache != nil { g.equivalenceCache.Snapshot() @@ -289,7 +309,7 @@ func (g *genericScheduler) Preempt(pod *v1.Pod, nodeLister algorithm.NodeLister, candidateNode := pickOneNodeForPreemption(nodeToVictims) if candidateNode == nil { - return nil, nil, nil, err + return nil, nil, nil, nil } // Lower priority pods nominated to run on this node, may no longer fit on @@ -298,7 +318,7 @@ func (g *genericScheduler) Preempt(pod *v1.Pod, nodeLister algorithm.NodeLister, // lets scheduler find another place for them. nominatedPods := g.getLowerPriorityNominatedPods(pod, candidateNode.Name) if nodeInfo, ok := g.cachedNodeInfoMap[candidateNode.Name]; ok { - return nodeInfo.Node(), nodeToVictims[candidateNode].Pods, nominatedPods, err + return nodeInfo.Node(), nodeToVictims[candidateNode].Pods, nominatedPods, nil } return nil, nil, nil, fmt.Errorf( @@ -390,7 +410,7 @@ func (g *genericScheduler) findNodesThatFit(pod *v1.Pod, nodes []*v1.Node) ([]*v if len(g.predicates) == 0 { filtered = nodes } else { - allNodes := int32(g.cache.NodeTree().NumNodes) + allNodes := int32(g.cache.NodeTree().NumNodes()) numNodesToFind := g.numFeasibleNodesToFind(allNodes) // Create filtered list with enough space to avoid growing it @@ -495,8 +515,8 @@ func (g *genericScheduler) findNodesThatFit(pod *v1.Pod, nodes []*v1.Node) ([]*v // to run on the node given in nodeInfo to meta and nodeInfo. It returns 1) whether // any pod was found, 2) augmented meta data, 3) augmented nodeInfo. func addNominatedPods(pod *v1.Pod, meta algorithm.PredicateMetadata, - nodeInfo *schedulercache.NodeInfo, queue internalqueue.SchedulingQueue) (bool, algorithm.PredicateMetadata, - *schedulercache.NodeInfo) { + nodeInfo *schedulernodeinfo.NodeInfo, queue internalqueue.SchedulingQueue) (bool, algorithm.PredicateMetadata, + *schedulernodeinfo.NodeInfo) { if queue == nil || nodeInfo == nil || nodeInfo.Node() == nil { // This may happen only in tests. return false, meta, nodeInfo @@ -534,7 +554,7 @@ func addNominatedPods(pod *v1.Pod, meta algorithm.PredicateMetadata, func podFitsOnNode( pod *v1.Pod, meta algorithm.PredicateMetadata, - info *schedulercache.NodeInfo, + info *schedulernodeinfo.NodeInfo, predicateFuncs map[string]algorithm.FitPredicate, nodeCache *equivalence.NodeCache, queue internalqueue.SchedulingQueue, @@ -620,7 +640,7 @@ func podFitsOnNode( // All scores are finally combined (added) to get the total weighted scores of all nodes func PrioritizeNodes( pod *v1.Pod, - nodeNameToInfo map[string]*schedulercache.NodeInfo, + nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, meta interface{}, priorityConfigs []algorithm.PriorityConfig, nodes []*v1.Node, @@ -655,24 +675,26 @@ func PrioritizeNodes( // DEPRECATED: we can remove this when all priorityConfigs implement the // Map-Reduce pattern. - workqueue.ParallelizeUntil(context.TODO(), 16, len(priorityConfigs), func(i int) { - priorityConfig := priorityConfigs[i] - if priorityConfig.Function == nil { + for i := range priorityConfigs { + if priorityConfigs[i].Function != nil { + wg.Add(1) + go func(index int) { + defer wg.Done() + var err error + results[index], err = priorityConfigs[index].Function(pod, nodeNameToInfo, nodes) + if err != nil { + appendError(err) + } + }(i) + } else { results[i] = make(schedulerapi.HostPriorityList, len(nodes)) - return } - - var err error - results[i], err = priorityConfig.Function(pod, nodeNameToInfo, nodes) - if err != nil { - appendError(err) - } - }) + } workqueue.ParallelizeUntil(context.TODO(), 16, len(nodes), func(index int) { nodeInfo := nodeNameToInfo[nodes[index].Name] - for i, priorityConfig := range priorityConfigs { - if priorityConfig.Function != nil { + for i := range priorityConfigs { + if priorityConfigs[i].Function != nil { continue } @@ -685,22 +707,22 @@ func PrioritizeNodes( } }) - for i, priorityConfig := range priorityConfigs { - if priorityConfig.Reduce == nil { + for i := range priorityConfigs { + if priorityConfigs[i].Reduce == nil { continue } wg.Add(1) - go func(index int, config algorithm.PriorityConfig) { + go func(index int) { defer wg.Done() - if err := config.Reduce(pod, meta, nodeNameToInfo, results[index]); err != nil { + if err := priorityConfigs[index].Reduce(pod, meta, nodeNameToInfo, results[index]); err != nil { appendError(err) } if klog.V(10) { for _, hostPriority := range results[index] { - klog.Infof("%v -> %v: %v, Score: (%d)", util.GetPodFullName(pod), hostPriority.Host, config.Name, hostPriority.Score) + klog.Infof("%v -> %v: %v, Score: (%d)", util.GetPodFullName(pod), hostPriority.Host, priorityConfigs[index].Name, hostPriority.Score) } } - }(i, priorityConfig) + }(i) } // Wait for all computations to be finished. wg.Wait() @@ -720,14 +742,14 @@ func PrioritizeNodes( if len(extenders) != 0 && nodes != nil { combinedScores := make(map[string]int, len(nodeNameToInfo)) - for _, extender := range extenders { - if !extender.IsInterested(pod) { + for i := range extenders { + if !extenders[i].IsInterested(pod) { continue } wg.Add(1) - go func(ext algorithm.SchedulerExtender) { + go func(extIndex int) { defer wg.Done() - prioritizedList, weight, err := ext.Prioritize(pod, nodes) + prioritizedList, weight, err := extenders[extIndex].Prioritize(pod, nodes) if err != nil { // Prioritization errors from extender can be ignored, let k8s/other extenders determine the priorities return @@ -736,12 +758,12 @@ func PrioritizeNodes( for i := range *prioritizedList { host, score := (*prioritizedList)[i].Host, (*prioritizedList)[i].Score if klog.V(10) { - klog.Infof("%v -> %v: %v, Score: (%d)", util.GetPodFullName(pod), host, ext.Name(), score) + klog.Infof("%v -> %v: %v, Score: (%d)", util.GetPodFullName(pod), host, extenders[extIndex].Name(), score) } combinedScores[host] += score * weight } mu.Unlock() - }(extender) + }(i) } // wait for all go routines to finish wg.Wait() @@ -759,7 +781,7 @@ func PrioritizeNodes( } // EqualPriorityMap is a prioritizer function that gives an equal weight of one to all nodes -func EqualPriorityMap(_ *v1.Pod, _ interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { +func EqualPriorityMap(_ *v1.Pod, _ interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) { node := nodeInfo.Node() if node == nil { return schedulerapi.HostPriority{}, fmt.Errorf("node not found") @@ -888,7 +910,7 @@ func pickOneNodeForPreemption(nodesToVictims map[*v1.Node]*schedulerapi.Victims) // selectNodesForPreemption finds all the nodes with possible victims for // preemption in parallel. func selectNodesForPreemption(pod *v1.Pod, - nodeNameToInfo map[string]*schedulercache.NodeInfo, + nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, potentialNodes []*v1.Node, predicates map[string]algorithm.FitPredicate, metadataProducer algorithm.PredicateMetadataProducer, @@ -978,7 +1000,7 @@ func filterPodsWithPDBViolation(pods []interface{}, pdbs []*policy.PodDisruption func selectVictimsOnNode( pod *v1.Pod, meta algorithm.PredicateMetadata, - nodeInfo *schedulercache.NodeInfo, + nodeInfo *schedulernodeinfo.NodeInfo, fitPredicates map[string]algorithm.FitPredicate, queue internalqueue.SchedulingQueue, pdbs []*policy.PodDisruptionBudget, @@ -1102,7 +1124,7 @@ func nodesWherePreemptionMightHelp(nodes []*v1.Node, failedPredicatesMap FailedP // considered for preemption. // We look at the node that is nominated for this pod and as long as there are // terminating pods on the node, we don't consider this for preempting more pods. -func podEligibleToPreemptOthers(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) bool { +func podEligibleToPreemptOthers(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) bool { nomNodeName := pod.Status.NominatedNodeName if len(nomNodeName) > 0 { if nodeInfo, found := nodeNameToInfo[nomNodeName]; found { @@ -1152,6 +1174,7 @@ func NewGenericScheduler( predicateMetaProducer algorithm.PredicateMetadataProducer, prioritizers []algorithm.PriorityConfig, priorityMetaProducer algorithm.PriorityMetadataProducer, + pluginSet pluginsv1alpha1.PluginSet, extenders []algorithm.SchedulerExtender, volumeBinder *volumebinder.VolumeBinder, pvcLister corelisters.PersistentVolumeClaimLister, @@ -1159,7 +1182,7 @@ func NewGenericScheduler( alwaysCheckAllPredicates bool, disablePreemption bool, percentageOfNodesToScore int32, -) algorithm.ScheduleAlgorithm { +) ScheduleAlgorithm { return &genericScheduler{ cache: cache, equivalenceCache: eCache, @@ -1168,8 +1191,9 @@ func NewGenericScheduler( predicateMetaProducer: predicateMetaProducer, prioritizers: prioritizers, priorityMetaProducer: priorityMetaProducer, + pluginSet: pluginSet, extenders: extenders, - cachedNodeInfoMap: make(map[string]*schedulercache.NodeInfo), + cachedNodeInfoMap: make(map[string]*schedulernodeinfo.NodeInfo), volumeBinder: volumeBinder, pvcLister: pvcLister, pdbLister: pdbLister, diff --git a/pkg/scheduler/core/generic_scheduler_test.go b/pkg/scheduler/core/generic_scheduler_test.go index d0152a9fbb6..bf6b8dfbbcf 100644 --- a/pkg/scheduler/core/generic_scheduler_test.go +++ b/pkg/scheduler/core/generic_scheduler_test.go @@ -39,10 +39,11 @@ import ( algorithmpriorities "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities" priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" "k8s.io/kubernetes/pkg/scheduler/core/equivalence" schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" + plugins "k8s.io/kubernetes/pkg/scheduler/plugins/v1alpha1" schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing" ) @@ -51,15 +52,15 @@ var ( order = []string{"false", "true", "matches", "nopods", algorithmpredicates.MatchInterPodAffinityPred} ) -func falsePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func falsePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { return false, []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil } -func truePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func truePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { return true, nil, nil } -func matchesPredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func matchesPredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") @@ -70,14 +71,14 @@ func matchesPredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *s return false, []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil } -func hasNoPodsPredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func hasNoPodsPredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { if len(nodeInfo.Pods()) == 0 { return true, nil, nil } return false, []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil } -func numericPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { +func numericPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { result := []schedulerapi.HostPriority{} for _, node := range nodes { score, err := strconv.Atoi(node.Name) @@ -92,7 +93,7 @@ func numericPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.Node return result, nil } -func reverseNumericPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { +func reverseNumericPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { var maxScore float64 minScore := math.MaxFloat64 reverseResult := []schedulerapi.HostPriority{} @@ -115,18 +116,18 @@ func reverseNumericPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulercac return reverseResult, nil } -func trueMapPriority(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { +func trueMapPriority(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) { return schedulerapi.HostPriority{ Host: nodeInfo.Node().Name, Score: 1, }, nil } -func falseMapPriority(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { +func falseMapPriority(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) { return schedulerapi.HostPriority{}, errPrioritize } -func getNodeReducePriority(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulercache.NodeInfo, result schedulerapi.HostPriorityList) error { +func getNodeReducePriority(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, result schedulerapi.HostPriorityList) error { for _, host := range result { if host.Host == "" { return fmt.Errorf("unexpected empty host name") @@ -135,6 +136,28 @@ func getNodeReducePriority(pod *v1.Pod, meta interface{}, nodeNameToInfo map[str return nil } +// EmptyPluginSet is a test plugin set used by the default scheduler. +type EmptyPluginSet struct{} + +var _ plugins.PluginSet = EmptyPluginSet{} + +// ReservePlugins returns a slice of default reserve plugins. +func (r EmptyPluginSet) ReservePlugins() []plugins.ReservePlugin { + return []plugins.ReservePlugin{} +} + +// PrebindPlugins returns a slice of default prebind plugins. +func (r EmptyPluginSet) PrebindPlugins() []plugins.PrebindPlugin { + return []plugins.PrebindPlugin{} +} + +// Data returns a pointer to PluginData. +func (r EmptyPluginSet) Data() *plugins.PluginData { + return &plugins.PluginData{} +} + +var emptyPluginSet = &EmptyPluginSet{} + func makeNodeList(nodeNames []string) []*v1.Node { result := make([]*v1.Node, 0, len(nodeNames)) for _, nodeName := range nodeNames { @@ -449,11 +472,12 @@ func TestGenericScheduler(t *testing.T) { scheduler := NewGenericScheduler( cache, nil, - internalqueue.NewSchedulingQueue(), + internalqueue.NewSchedulingQueue(nil), test.predicates, algorithm.EmptyPredicateMetadataProducer, test.prioritizers, algorithm.EmptyPriorityMetadataProducer, + emptyPluginSet, []algorithm.SchedulerExtender{}, nil, pvcLister, @@ -485,11 +509,12 @@ func makeScheduler(predicates map[string]algorithm.FitPredicate, nodes []*v1.Nod s := NewGenericScheduler( cache, nil, - internalqueue.NewSchedulingQueue(), + internalqueue.NewSchedulingQueue(nil), predicates, algorithm.EmptyPredicateMetadataProducer, prioritizers, algorithm.EmptyPriorityMetadataProducer, + emptyPluginSet, nil, nil, nil, nil, false, false, schedulerapi.DefaultPercentageOfNodesToScore) cache.UpdateNodeNameToInfoMap(s.(*genericScheduler).cachedNodeInfoMap) @@ -704,7 +729,7 @@ func TestZeroRequest(t *testing.T) { pc := algorithm.PriorityConfig{Map: selectorSpreadPriorityMap, Reduce: selectorSpreadPriorityReduce, Weight: 1} priorityConfigs = append(priorityConfigs, pc) - nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes) + nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, test.nodes) metaDataProducer := algorithmpriorities.NewPriorityMetadataFactory( schedulertesting.FakeServiceLister([]*v1.Service{}), @@ -775,7 +800,7 @@ func (n FakeNodeInfo) GetNodeInfo(nodeName string) (*v1.Node, error) { return &node, nil } -func PredicateMetadata(p *v1.Pod, nodeInfo map[string]*schedulercache.NodeInfo) algorithm.PredicateMetadata { +func PredicateMetadata(p *v1.Pod, nodeInfo map[string]*schedulernodeinfo.NodeInfo) algorithm.PredicateMetadata { return algorithmpredicates.NewPredicateMetadataFactory(schedulertesting.FakePodLister{p})(p, nodeInfo) } @@ -959,7 +984,7 @@ func TestSelectNodesForPreemption(t *testing.T) { if test.addAffinityPredicate { test.predicates[algorithmpredicates.MatchInterPodAffinityPred] = algorithmpredicates.NewPodAffinityPredicate(FakeNodeInfo(*nodes[0]), schedulertesting.FakePodLister(test.pods)) } - nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, nodes) + nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, nodes) // newnode simulate a case that a new node is added to the cluster, but nodeNameToInfo // doesn't have it yet. newnode := makeNode("newnode", 1000*5, priorityutil.DefaultMemoryRequest*5) @@ -1124,7 +1149,7 @@ func TestPickOneNodeForPreemption(t *testing.T) { for _, n := range test.nodes { nodes = append(nodes, makeNode(n, priorityutil.DefaultMilliCPURequest*5, priorityutil.DefaultMemoryRequest*5)) } - nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, nodes) + nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, nodes) candidateNodes, _ := selectNodesForPreemption(test.pod, nodeNameToInfo, nodes, test.predicates, PredicateMetadata, nil, nil) node := pickOneNodeForPreemption(candidateNodes) found := false @@ -1392,13 +1417,13 @@ func TestPreempt(t *testing.T) { for _, pod := range test.pods { cache.AddPod(pod) } - cachedNodeInfoMap := map[string]*schedulercache.NodeInfo{} + cachedNodeInfoMap := map[string]*schedulernodeinfo.NodeInfo{} for _, name := range nodeNames { node := makeNode(name, 1000*5, priorityutil.DefaultMemoryRequest*5) cache.AddNode(node) // Set nodeInfo to extenders to mock extenders' cache for preemption. - cachedNodeInfo := schedulercache.NewNodeInfo() + cachedNodeInfo := schedulernodeinfo.NewNodeInfo() cachedNodeInfo.SetNode(node) cachedNodeInfoMap[name] = cachedNodeInfo } @@ -1411,11 +1436,12 @@ func TestPreempt(t *testing.T) { scheduler := NewGenericScheduler( cache, nil, - internalqueue.NewSchedulingQueue(), + internalqueue.NewSchedulingQueue(nil), map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources}, algorithm.EmptyPredicateMetadataProducer, []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}}, algorithm.EmptyPriorityMetadataProducer, + emptyPluginSet, extenders, nil, schedulertesting.FakePersistentVolumeClaimLister{}, @@ -1476,7 +1502,7 @@ type syncingMockCache struct { // // Since UpdateNodeNameToInfoMap is one of the first steps of (*genericScheduler).Schedule, we use // this point to signal to the test that a scheduling cycle has started. -func (c *syncingMockCache) UpdateNodeNameToInfoMap(infoMap map[string]*schedulercache.NodeInfo) error { +func (c *syncingMockCache) UpdateNodeNameToInfoMap(infoMap map[string]*schedulernodeinfo.NodeInfo) error { err := c.Cache.UpdateNodeNameToInfoMap(infoMap) c.once.Do(func() { c.cycleStart <- struct{}{} @@ -1487,14 +1513,14 @@ func (c *syncingMockCache) UpdateNodeNameToInfoMap(infoMap map[string]*scheduler // TestCacheInvalidationRace tests that equivalence cache invalidation is correctly // handled when an invalidation event happens early in a scheduling cycle. Specifically, the event -// occurs after schedulercache is snapshotted and before equivalence cache lock is acquired. +// occurs after schedulernodeinfo is snapshotted and before equivalence cache lock is acquired. func TestCacheInvalidationRace(t *testing.T) { // Create a predicate that returns false the first time and true on subsequent calls. podWillFit := false var callCount int testPredicate := func(pod *v1.Pod, meta algorithm.PredicateMetadata, - nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { + nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { callCount++ if !podWillFit { podWillFit = true @@ -1538,11 +1564,12 @@ func TestCacheInvalidationRace(t *testing.T) { scheduler := NewGenericScheduler( mockCache, eCache, - internalqueue.NewSchedulingQueue(), + internalqueue.NewSchedulingQueue(nil), ps, algorithm.EmptyPredicateMetadataProducer, prioritizers, algorithm.EmptyPriorityMetadataProducer, + emptyPluginSet, nil, nil, pvcLister, pdbLister, true, false, schedulerapi.DefaultPercentageOfNodesToScore) @@ -1578,7 +1605,7 @@ func TestCacheInvalidationRace2(t *testing.T) { ) testPredicate := func(pod *v1.Pod, meta algorithm.PredicateMetadata, - nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { + nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { callCount++ once.Do(func() { cycleStart <- struct{}{} @@ -1621,11 +1648,12 @@ func TestCacheInvalidationRace2(t *testing.T) { scheduler := NewGenericScheduler( cache, eCache, - internalqueue.NewSchedulingQueue(), + internalqueue.NewSchedulingQueue(nil), ps, algorithm.EmptyPredicateMetadataProducer, prioritizers, algorithm.EmptyPriorityMetadataProducer, + emptyPluginSet, nil, nil, pvcLister, pdbLister, true, false, schedulerapi.DefaultPercentageOfNodesToScore) diff --git a/pkg/scheduler/factory/BUILD b/pkg/scheduler/factory/BUILD index 7be893b0606..2c56de6062c 100644 --- a/pkg/scheduler/factory/BUILD +++ b/pkg/scheduler/factory/BUILD @@ -25,6 +25,8 @@ go_library( "//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/internal/cache/debugger:go_default_library", "//pkg/scheduler/internal/queue:go_default_library", + "//pkg/scheduler/plugins:go_default_library", + "//pkg/scheduler/plugins/v1alpha1:go_default_library", "//pkg/scheduler/util:go_default_library", "//pkg/scheduler/volumebinder:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -67,12 +69,12 @@ go_test( "//pkg/scheduler/algorithm/priorities:go_default_library", "//pkg/scheduler/api:go_default_library", "//pkg/scheduler/api/latest:go_default_library", - "//pkg/scheduler/cache:go_default_library", "//pkg/scheduler/internal/cache/fake:go_default_library", "//pkg/scheduler/internal/queue:go_default_library", - "//pkg/scheduler/testing:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", diff --git a/pkg/scheduler/factory/factory.go b/pkg/scheduler/factory/factory.go index aa420bcb173..0338a74ce7c 100644 --- a/pkg/scheduler/factory/factory.go +++ b/pkg/scheduler/factory/factory.go @@ -63,6 +63,8 @@ import ( schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" cachedebugger "k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" + "k8s.io/kubernetes/pkg/scheduler/plugins" + pluginsv1alpha1 "k8s.io/kubernetes/pkg/scheduler/plugins/v1alpha1" "k8s.io/kubernetes/pkg/scheduler/util" "k8s.io/kubernetes/pkg/scheduler/volumebinder" ) @@ -101,7 +103,7 @@ type Config struct { // successfully binding a pod Ecache *equivalence.Cache NodeLister algorithm.NodeLister - Algorithm algorithm.ScheduleAlgorithm + Algorithm core.ScheduleAlgorithm GetBinder func(pod *v1.Pod) Binder // PodConditionUpdater is used only in case of scheduling errors. If we succeed // with scheduling, PodScheduled condition will be updated in apiserver in /bind @@ -109,6 +111,8 @@ type Config struct { PodConditionUpdater PodConditionUpdater // PodPreemptor is used to evict pods and update pod annotations. PodPreemptor PodPreemptor + // PlugingSet has a set of plugins and data used to run them. + PluginSet pluginsv1alpha1.PluginSet // NextPod should be a function that blocks until the next pod // is available. We don't use a channel for this, because scheduling @@ -202,6 +206,8 @@ type configFactory struct { pdbLister policylisters.PodDisruptionBudgetLister // a means to list all StorageClasses storageClassLister storagelisters.StorageClassLister + // pluginRunner has a set of plugins and the context used for running them. + pluginSet pluginsv1alpha1.PluginSet // Close this to stop all reflectors StopEverything <-chan struct{} @@ -277,7 +283,7 @@ func NewConfigFactory(args *ConfigFactoryArgs) Configurator { c := &configFactory{ client: args.Client, podLister: schedulerCache, - podQueue: internalqueue.NewSchedulingQueue(), + podQueue: internalqueue.NewSchedulingQueue(stopEverything), nodeLister: args.NodeInformer.Lister(), pVLister: args.PvInformer.Lister(), pVCLister: args.PvcInformer.Lister(), @@ -992,7 +998,14 @@ func (c *configFactory) updateNodeInCache(oldObj, newObj interface{}) { } c.invalidateCachedPredicatesOnNodeUpdate(newNode, oldNode) - c.podQueue.MoveAllToActiveQueue() + // Only activate unschedulable pods if the node became more schedulable. + // We skip the node property comparison when there is no unschedulable pods in the queue + // to save processing cycles. We still trigger a move to active queue to cover the case + // that a pod being processed by the scheduler is determined unschedulable. We want this + // pod to be reevaluated when a change in the cluster happens. + if c.podQueue.NumUnschedulablePods() == 0 || nodeSchedulingPropertiesChanged(newNode, oldNode) { + c.podQueue.MoveAllToActiveQueue() + } } func (c *configFactory) invalidateCachedPredicatesOnNodeUpdate(newNode *v1.Node, oldNode *v1.Node) { @@ -1064,6 +1077,53 @@ func (c *configFactory) invalidateCachedPredicatesOnNodeUpdate(newNode *v1.Node, } } +func nodeSchedulingPropertiesChanged(newNode *v1.Node, oldNode *v1.Node) bool { + if nodeSpecUnschedulableChanged(newNode, oldNode) { + return true + } + if nodeAllocatableChanged(newNode, oldNode) { + return true + } + if nodeLabelsChanged(newNode, oldNode) { + return true + } + if nodeTaintsChanged(newNode, oldNode) { + return true + } + if nodeConditionsChanged(newNode, oldNode) { + return true + } + + return false +} + +func nodeAllocatableChanged(newNode *v1.Node, oldNode *v1.Node) bool { + return !reflect.DeepEqual(oldNode.Status.Allocatable, newNode.Status.Allocatable) +} + +func nodeLabelsChanged(newNode *v1.Node, oldNode *v1.Node) bool { + return !reflect.DeepEqual(oldNode.GetLabels(), newNode.GetLabels()) +} + +func nodeTaintsChanged(newNode *v1.Node, oldNode *v1.Node) bool { + return !reflect.DeepEqual(newNode.Spec.Taints, oldNode.Spec.Taints) +} + +func nodeConditionsChanged(newNode *v1.Node, oldNode *v1.Node) bool { + strip := func(conditions []v1.NodeCondition) map[v1.NodeConditionType]v1.ConditionStatus { + conditionStatuses := make(map[v1.NodeConditionType]v1.ConditionStatus, len(conditions)) + for i := range conditions { + conditionStatuses[conditions[i].Type] = conditions[i].Status + } + return conditionStatuses + } + return !reflect.DeepEqual(strip(oldNode.Status.Conditions), strip(newNode.Status.Conditions)) +} + +func nodeSpecUnschedulableChanged(newNode *v1.Node, oldNode *v1.Node) bool { + return newNode.Spec.Unschedulable != oldNode.Spec.Unschedulable && newNode.Spec.Unschedulable == false +} + func (c *configFactory) deleteNodeFromCache(obj interface{}) { var node *v1.Node switch t := obj.(type) { @@ -1225,6 +1285,9 @@ func (c *configFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String, return nil, err } + // TODO(bsalamat): the default registrar should be able to process config files. + c.pluginSet = plugins.NewDefaultPluginSet(pluginsv1alpha1.NewPluginContext(), &c.schedulerCache) + // Init equivalence class cache if c.enableEquivalenceClassCache { c.equivalencePodCache = equivalence.NewCache(predicates.Ordering()) @@ -1239,6 +1302,7 @@ func (c *configFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String, predicateMetaProducer, priorityConfigs, priorityMetaProducer, + c.pluginSet, extenders, c.volumeBinder, c.pVCLister, @@ -1258,6 +1322,7 @@ func (c *configFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String, GetBinder: c.getBinderFunc(extenders), PodConditionUpdater: &podConditionUpdater{c.client}, PodPreemptor: &podPreemptor{c.client}, + PluginSet: c.pluginSet, WaitForCacheSync: func() bool { return cache.WaitForCacheSync(c.StopEverything, c.scheduledPodsHasSynced) }, @@ -1482,8 +1547,7 @@ func (c *configFactory) MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue // to run on a node, scheduler takes the pod into account when running // predicates for the node. if !util.PodPriorityEnabled() { - entry := backoff.GetEntry(podID) - if !entry.TryWait(backoff.MaxDuration()) { + if !backoff.TryBackoffAndWait(podID, c.StopEverything) { klog.Warningf("Request for pod %v already in flight, abandoning", podID) return } diff --git a/pkg/scheduler/factory/factory_test.go b/pkg/scheduler/factory/factory_test.go index aa68c716fc0..7f45516e7aa 100644 --- a/pkg/scheduler/factory/factory_test.go +++ b/pkg/scheduler/factory/factory_test.go @@ -24,6 +24,7 @@ import ( "time" "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" @@ -37,10 +38,9 @@ import ( "k8s.io/kubernetes/pkg/scheduler/algorithm" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" latestschedulerapi "k8s.io/kubernetes/pkg/scheduler/api/latest" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" fakecache "k8s.io/kubernetes/pkg/scheduler/internal/cache/fake" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" - schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" "k8s.io/kubernetes/pkg/scheduler/util" ) @@ -230,19 +230,19 @@ func TestCreateFromConfigWithEmptyPredicatesOrPriorities(t *testing.T) { } } -func PredicateOne(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func PredicateOne(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { return true, nil, nil } -func PredicateTwo(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func PredicateTwo(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { return true, nil, nil } -func PriorityOne(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { +func PriorityOne(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { return []schedulerapi.HostPriority{}, nil } -func PriorityTwo(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { +func PriorityTwo(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { return []schedulerapi.HostPriority{}, nil } @@ -366,18 +366,15 @@ func testBind(binding *v1.Binding, t *testing.T) { pod := client.CoreV1().Pods(metav1.NamespaceDefault).(*fakeV1.FakePods) - bind, err := pod.GetBinding(binding.GetName()) + actualBinding, err := pod.GetBinding(binding.GetName()) if err != nil { t.Fatalf("Unexpected error: %v", err) return } - - expectedBody := runtime.EncodeOrDie(schedulertesting.Test.Codec(), binding) - bind.APIVersion = "" - bind.Kind = "" - body := runtime.EncodeOrDie(schedulertesting.Test.Codec(), bind) - if expectedBody != body { - t.Errorf("Expected body %s, Got %s", expectedBody, body) + if !reflect.DeepEqual(binding, actualBinding) { + t.Errorf("Binding did not match expectation") + t.Logf("Expected: %v", binding) + t.Logf("Actual: %v", actualBinding) } } @@ -560,7 +557,7 @@ func (f *fakeExtender) IsIgnorable() bool { func (f *fakeExtender) ProcessPreemption( pod *v1.Pod, nodeToVictims map[*v1.Node]*schedulerapi.Victims, - nodeNameToInfo map[string]*schedulercache.NodeInfo, + nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, ) (map[*v1.Node]*schedulerapi.Victims, error) { return nil, nil } @@ -572,7 +569,7 @@ func (f *fakeExtender) SupportsPreemption() bool { func (f *fakeExtender) Filter( pod *v1.Pod, nodes []*v1.Node, - nodeNameToInfo map[string]*schedulercache.NodeInfo, + nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, ) (filteredNodes []*v1.Node, failedNodesMap schedulerapi.FailedNodesMap, err error) { return nil, nil, nil } @@ -657,3 +654,146 @@ func testGetBinderFunc(expectedBinderType, podName string, extenders []algorithm t.Errorf("Expected binder %q but got %q", expectedBinderType, binderType) } } + +func TestNodeAllocatableChanged(t *testing.T) { + newQuantity := func(value int64) resource.Quantity { + return *resource.NewQuantity(value, resource.BinarySI) + } + for _, c := range []struct { + Name string + Changed bool + OldAllocatable v1.ResourceList + NewAllocatable v1.ResourceList + }{ + { + Name: "no allocatable resources changed", + Changed: false, + OldAllocatable: v1.ResourceList{v1.ResourceMemory: newQuantity(1024)}, + NewAllocatable: v1.ResourceList{v1.ResourceMemory: newQuantity(1024)}, + }, + { + Name: "new node has more allocatable resources", + Changed: true, + OldAllocatable: v1.ResourceList{v1.ResourceMemory: newQuantity(1024)}, + NewAllocatable: v1.ResourceList{v1.ResourceMemory: newQuantity(1024), v1.ResourceStorage: newQuantity(1024)}, + }, + } { + oldNode := &v1.Node{Status: v1.NodeStatus{Allocatable: c.OldAllocatable}} + newNode := &v1.Node{Status: v1.NodeStatus{Allocatable: c.NewAllocatable}} + changed := nodeAllocatableChanged(newNode, oldNode) + if changed != c.Changed { + t.Errorf("nodeAllocatableChanged should be %t, got %t", c.Changed, changed) + } + } +} + +func TestNodeLabelsChanged(t *testing.T) { + for _, c := range []struct { + Name string + Changed bool + OldLabels map[string]string + NewLabels map[string]string + }{ + { + Name: "no labels changed", + Changed: false, + OldLabels: map[string]string{"foo": "bar"}, + NewLabels: map[string]string{"foo": "bar"}, + }, + // Labels changed. + { + Name: "new node has more labels", + Changed: true, + OldLabels: map[string]string{"foo": "bar"}, + NewLabels: map[string]string{"foo": "bar", "test": "value"}, + }, + } { + oldNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: c.OldLabels}} + newNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: c.NewLabels}} + changed := nodeLabelsChanged(newNode, oldNode) + if changed != c.Changed { + t.Errorf("Test case %q failed: should be %t, got %t", c.Name, c.Changed, changed) + } + } +} + +func TestNodeTaintsChanged(t *testing.T) { + for _, c := range []struct { + Name string + Changed bool + OldTaints []v1.Taint + NewTaints []v1.Taint + }{ + { + Name: "no taint changed", + Changed: false, + OldTaints: []v1.Taint{{Key: "key", Value: "value"}}, + NewTaints: []v1.Taint{{Key: "key", Value: "value"}}, + }, + { + Name: "taint value changed", + Changed: true, + OldTaints: []v1.Taint{{Key: "key", Value: "value1"}}, + NewTaints: []v1.Taint{{Key: "key", Value: "value2"}}, + }, + } { + oldNode := &v1.Node{Spec: v1.NodeSpec{Taints: c.OldTaints}} + newNode := &v1.Node{Spec: v1.NodeSpec{Taints: c.NewTaints}} + changed := nodeTaintsChanged(newNode, oldNode) + if changed != c.Changed { + t.Errorf("Test case %q failed: should be %t, not %t", c.Name, c.Changed, changed) + } + } +} + +func TestNodeConditionsChanged(t *testing.T) { + nodeConditionType := reflect.TypeOf(v1.NodeCondition{}) + if nodeConditionType.NumField() != 6 { + t.Errorf("NodeCondition type has changed. The nodeConditionsChanged() function must be reevaluated.") + } + + for _, c := range []struct { + Name string + Changed bool + OldConditions []v1.NodeCondition + NewConditions []v1.NodeCondition + }{ + { + Name: "no condition changed", + Changed: false, + OldConditions: []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue}}, + NewConditions: []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue}}, + }, + { + Name: "only LastHeartbeatTime changed", + Changed: false, + OldConditions: []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue, LastHeartbeatTime: metav1.Unix(1, 0)}}, + NewConditions: []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue, LastHeartbeatTime: metav1.Unix(2, 0)}}, + }, + { + Name: "new node has more healthy conditions", + Changed: true, + OldConditions: []v1.NodeCondition{}, + NewConditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionTrue}}, + }, + { + Name: "new node has less unhealthy conditions", + Changed: true, + OldConditions: []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue}}, + NewConditions: []v1.NodeCondition{}, + }, + { + Name: "condition status changed", + Changed: true, + OldConditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionFalse}}, + NewConditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionTrue}}, + }, + } { + oldNode := &v1.Node{Status: v1.NodeStatus{Conditions: c.OldConditions}} + newNode := &v1.Node{Status: v1.NodeStatus{Conditions: c.NewConditions}} + changed := nodeConditionsChanged(newNode, oldNode) + if changed != c.Changed { + t.Errorf("Test case %q failed: should be %t, got %t", c.Name, c.Changed, changed) + } + } +} diff --git a/pkg/scheduler/internal/cache/BUILD b/pkg/scheduler/internal/cache/BUILD index 61202c8c286..649e1688351 100644 --- a/pkg/scheduler/internal/cache/BUILD +++ b/pkg/scheduler/internal/cache/BUILD @@ -11,7 +11,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/features:go_default_library", - "//pkg/scheduler/cache:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", "//pkg/util/node:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", @@ -26,7 +26,6 @@ go_test( name = "go_default_test", srcs = [ "cache_test.go", - "main_test.go", "node_tree_test.go", ], embed = [":go_default_library"], @@ -34,7 +33,7 @@ go_test( "//pkg/features:go_default_library", "//pkg/kubelet/apis:go_default_library", "//pkg/scheduler/algorithm/priorities/util:go_default_library", - "//pkg/scheduler/cache:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/scheduler/internal/cache/cache.go b/pkg/scheduler/internal/cache/cache.go index 535236e5c1f..734babf652c 100644 --- a/pkg/scheduler/internal/cache/cache.go +++ b/pkg/scheduler/internal/cache/cache.go @@ -27,7 +27,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/kubernetes/pkg/features" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" "k8s.io/klog" ) @@ -58,7 +58,7 @@ type schedulerCache struct { assumedPods map[string]bool // a map from pod key to podState. podStates map[string]*podState - nodes map[string]*schedulercache.NodeInfo + nodes map[string]*schedulernodeinfo.NodeInfo nodeTree *NodeTree // A map from image name to its imageState. imageStates map[string]*imageState @@ -80,8 +80,8 @@ type imageState struct { } // createImageStateSummary returns a summarizing snapshot of the given image's state. -func (cache *schedulerCache) createImageStateSummary(state *imageState) *schedulercache.ImageStateSummary { - return &schedulercache.ImageStateSummary{ +func (cache *schedulerCache) createImageStateSummary(state *imageState) *schedulernodeinfo.ImageStateSummary { + return &schedulernodeinfo.ImageStateSummary{ Size: state.size, NumNodes: len(state.nodes), } @@ -93,7 +93,7 @@ func newSchedulerCache(ttl, period time.Duration, stop <-chan struct{}) *schedul period: period, stop: stop, - nodes: make(map[string]*schedulercache.NodeInfo), + nodes: make(map[string]*schedulernodeinfo.NodeInfo), nodeTree: newNodeTree(nil), assumedPods: make(map[string]bool), podStates: make(map[string]*podState), @@ -107,7 +107,7 @@ func (cache *schedulerCache) Snapshot() *Snapshot { cache.mu.RLock() defer cache.mu.RUnlock() - nodes := make(map[string]*schedulercache.NodeInfo) + nodes := make(map[string]*schedulernodeinfo.NodeInfo) for k, v := range cache.nodes { nodes[k] = v.Clone() } @@ -123,7 +123,7 @@ func (cache *schedulerCache) Snapshot() *Snapshot { } } -func (cache *schedulerCache) UpdateNodeNameToInfoMap(nodeNameToInfo map[string]*schedulercache.NodeInfo) error { +func (cache *schedulerCache) UpdateNodeNameToInfoMap(nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) error { cache.mu.Lock() defer cache.mu.Unlock() @@ -171,7 +171,7 @@ func (cache *schedulerCache) FilteredList(podFilter PodFilter, selector labels.S } func (cache *schedulerCache) AssumePod(pod *v1.Pod) error { - key, err := schedulercache.GetPodKey(pod) + key, err := schedulernodeinfo.GetPodKey(pod) if err != nil { return err } @@ -197,7 +197,7 @@ func (cache *schedulerCache) FinishBinding(pod *v1.Pod) error { // finishBinding exists to make tests determinitistic by injecting now as an argument func (cache *schedulerCache) finishBinding(pod *v1.Pod, now time.Time) error { - key, err := schedulercache.GetPodKey(pod) + key, err := schedulernodeinfo.GetPodKey(pod) if err != nil { return err } @@ -216,7 +216,7 @@ func (cache *schedulerCache) finishBinding(pod *v1.Pod, now time.Time) error { } func (cache *schedulerCache) ForgetPod(pod *v1.Pod) error { - key, err := schedulercache.GetPodKey(pod) + key, err := schedulernodeinfo.GetPodKey(pod) if err != nil { return err } @@ -248,7 +248,7 @@ func (cache *schedulerCache) ForgetPod(pod *v1.Pod) error { func (cache *schedulerCache) addPod(pod *v1.Pod) { n, ok := cache.nodes[pod.Spec.NodeName] if !ok { - n = schedulercache.NewNodeInfo() + n = schedulernodeinfo.NewNodeInfo() cache.nodes[pod.Spec.NodeName] = n } n.AddPod(pod) @@ -276,7 +276,7 @@ func (cache *schedulerCache) removePod(pod *v1.Pod) error { } func (cache *schedulerCache) AddPod(pod *v1.Pod) error { - key, err := schedulercache.GetPodKey(pod) + key, err := schedulernodeinfo.GetPodKey(pod) if err != nil { return err } @@ -311,7 +311,7 @@ func (cache *schedulerCache) AddPod(pod *v1.Pod) error { } func (cache *schedulerCache) UpdatePod(oldPod, newPod *v1.Pod) error { - key, err := schedulercache.GetPodKey(oldPod) + key, err := schedulernodeinfo.GetPodKey(oldPod) if err != nil { return err } @@ -339,7 +339,7 @@ func (cache *schedulerCache) UpdatePod(oldPod, newPod *v1.Pod) error { } func (cache *schedulerCache) RemovePod(pod *v1.Pod) error { - key, err := schedulercache.GetPodKey(pod) + key, err := schedulernodeinfo.GetPodKey(pod) if err != nil { return err } @@ -368,7 +368,7 @@ func (cache *schedulerCache) RemovePod(pod *v1.Pod) error { } func (cache *schedulerCache) IsAssumedPod(pod *v1.Pod) (bool, error) { - key, err := schedulercache.GetPodKey(pod) + key, err := schedulernodeinfo.GetPodKey(pod) if err != nil { return false, err } @@ -384,7 +384,7 @@ func (cache *schedulerCache) IsAssumedPod(pod *v1.Pod) (bool, error) { } func (cache *schedulerCache) GetPod(pod *v1.Pod) (*v1.Pod, error) { - key, err := schedulercache.GetPodKey(pod) + key, err := schedulernodeinfo.GetPodKey(pod) if err != nil { return nil, err } @@ -406,7 +406,7 @@ func (cache *schedulerCache) AddNode(node *v1.Node) error { n, ok := cache.nodes[node.Name] if !ok { - n = schedulercache.NewNodeInfo() + n = schedulernodeinfo.NewNodeInfo() cache.nodes[node.Name] = n } else { cache.removeNodeImageStates(n.Node()) @@ -423,7 +423,7 @@ func (cache *schedulerCache) UpdateNode(oldNode, newNode *v1.Node) error { n, ok := cache.nodes[newNode.Name] if !ok { - n = schedulercache.NewNodeInfo() + n = schedulernodeinfo.NewNodeInfo() cache.nodes[newNode.Name] = n } else { cache.removeNodeImageStates(n.Node()) @@ -457,8 +457,8 @@ func (cache *schedulerCache) RemoveNode(node *v1.Node) error { // addNodeImageStates adds states of the images on given node to the given nodeInfo and update the imageStates in // scheduler cache. This function assumes the lock to scheduler cache has been acquired. -func (cache *schedulerCache) addNodeImageStates(node *v1.Node, nodeInfo *schedulercache.NodeInfo) { - newSum := make(map[string]*schedulercache.ImageStateSummary) +func (cache *schedulerCache) addNodeImageStates(node *v1.Node, nodeInfo *schedulernodeinfo.NodeInfo) { + newSum := make(map[string]*schedulernodeinfo.ImageStateSummary) for _, image := range node.Status.Images { for _, name := range image.Names { diff --git a/pkg/scheduler/internal/cache/cache_test.go b/pkg/scheduler/internal/cache/cache_test.go index 97301725151..e7f15291c65 100644 --- a/pkg/scheduler/internal/cache/cache_test.go +++ b/pkg/scheduler/internal/cache/cache_test.go @@ -32,10 +32,10 @@ import ( utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" "k8s.io/kubernetes/pkg/features" priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) -func deepEqualWithoutGeneration(t *testing.T, testcase int, actual, expected *schedulercache.NodeInfo) { +func deepEqualWithoutGeneration(t *testing.T, testcase int, actual, expected *schedulernodeinfo.NodeInfo) { // Ignore generation field. if actual != nil { actual.SetGeneration(0) @@ -66,21 +66,21 @@ func (b *hostPortInfoBuilder) add(protocol, ip string, port int32) *hostPortInfo return b } -func (b *hostPortInfoBuilder) build() schedulercache.HostPortInfo { - res := make(schedulercache.HostPortInfo) +func (b *hostPortInfoBuilder) build() schedulernodeinfo.HostPortInfo { + res := make(schedulernodeinfo.HostPortInfo) for _, param := range b.inputs { res.Add(param.ip, param.protocol, param.port) } return res } -func newNodeInfo(requestedResource *schedulercache.Resource, - nonzeroRequest *schedulercache.Resource, +func newNodeInfo(requestedResource *schedulernodeinfo.Resource, + nonzeroRequest *schedulernodeinfo.Resource, pods []*v1.Pod, - usedPorts schedulercache.HostPortInfo, - imageStates map[string]*schedulercache.ImageStateSummary, -) *schedulercache.NodeInfo { - nodeInfo := schedulercache.NewNodeInfo(pods...) + usedPorts schedulernodeinfo.HostPortInfo, + imageStates map[string]*schedulernodeinfo.ImageStateSummary, +) *schedulernodeinfo.NodeInfo { + nodeInfo := schedulernodeinfo.NewNodeInfo(pods...) nodeInfo.SetRequestedResource(requestedResource) nodeInfo.SetNonZeroRequest(nonzeroRequest) nodeInfo.SetUsedPorts(usedPorts) @@ -108,98 +108,98 @@ func TestAssumePodScheduled(t *testing.T) { tests := []struct { pods []*v1.Pod - wNodeInfo *schedulercache.NodeInfo + wNodeInfo *schedulernodeinfo.NodeInfo }{{ pods: []*v1.Pod{testPods[0]}, wNodeInfo: newNodeInfo( - &schedulercache.Resource{ + &schedulernodeinfo.Resource{ MilliCPU: 100, Memory: 500, }, - &schedulercache.Resource{ + &schedulernodeinfo.Resource{ MilliCPU: 100, Memory: 500, }, []*v1.Pod{testPods[0]}, newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(), - make(map[string]*schedulercache.ImageStateSummary), + make(map[string]*schedulernodeinfo.ImageStateSummary), ), }, { pods: []*v1.Pod{testPods[1], testPods[2]}, wNodeInfo: newNodeInfo( - &schedulercache.Resource{ + &schedulernodeinfo.Resource{ MilliCPU: 300, Memory: 1524, }, - &schedulercache.Resource{ + &schedulernodeinfo.Resource{ MilliCPU: 300, Memory: 1524, }, []*v1.Pod{testPods[1], testPods[2]}, newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).add("TCP", "127.0.0.1", 8080).build(), - make(map[string]*schedulercache.ImageStateSummary), + make(map[string]*schedulernodeinfo.ImageStateSummary), ), }, { // test non-zero request pods: []*v1.Pod{testPods[3]}, wNodeInfo: newNodeInfo( - &schedulercache.Resource{ + &schedulernodeinfo.Resource{ MilliCPU: 0, Memory: 0, }, - &schedulercache.Resource{ + &schedulernodeinfo.Resource{ MilliCPU: priorityutil.DefaultMilliCPURequest, Memory: priorityutil.DefaultMemoryRequest, }, []*v1.Pod{testPods[3]}, newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(), - make(map[string]*schedulercache.ImageStateSummary), + make(map[string]*schedulernodeinfo.ImageStateSummary), ), }, { pods: []*v1.Pod{testPods[4]}, wNodeInfo: newNodeInfo( - &schedulercache.Resource{ + &schedulernodeinfo.Resource{ MilliCPU: 100, Memory: 500, ScalarResources: map[v1.ResourceName]int64{"example.com/foo": 3}, }, - &schedulercache.Resource{ + &schedulernodeinfo.Resource{ MilliCPU: 100, Memory: 500, }, []*v1.Pod{testPods[4]}, newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(), - make(map[string]*schedulercache.ImageStateSummary), + make(map[string]*schedulernodeinfo.ImageStateSummary), ), }, { pods: []*v1.Pod{testPods[4], testPods[5]}, wNodeInfo: newNodeInfo( - &schedulercache.Resource{ + &schedulernodeinfo.Resource{ MilliCPU: 300, Memory: 1524, ScalarResources: map[v1.ResourceName]int64{"example.com/foo": 8}, }, - &schedulercache.Resource{ + &schedulernodeinfo.Resource{ MilliCPU: 300, Memory: 1524, }, []*v1.Pod{testPods[4], testPods[5]}, newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).add("TCP", "127.0.0.1", 8080).build(), - make(map[string]*schedulercache.ImageStateSummary), + make(map[string]*schedulernodeinfo.ImageStateSummary), ), }, { pods: []*v1.Pod{testPods[6]}, wNodeInfo: newNodeInfo( - &schedulercache.Resource{ + &schedulernodeinfo.Resource{ MilliCPU: 100, Memory: 500, }, - &schedulercache.Resource{ + &schedulernodeinfo.Resource{ MilliCPU: 100, Memory: 500, }, []*v1.Pod{testPods[6]}, newHostPortInfoBuilder().build(), - make(map[string]*schedulercache.ImageStateSummary), + make(map[string]*schedulernodeinfo.ImageStateSummary), ), }, } @@ -253,7 +253,7 @@ func TestExpirePod(t *testing.T) { pods []*testExpirePodStruct cleanupTime time.Time - wNodeInfo *schedulercache.NodeInfo + wNodeInfo *schedulernodeinfo.NodeInfo }{{ // assumed pod would expires pods: []*testExpirePodStruct{ {pod: testPods[0], assumedTime: now}, @@ -267,17 +267,17 @@ func TestExpirePod(t *testing.T) { }, cleanupTime: now.Add(2 * ttl), wNodeInfo: newNodeInfo( - &schedulercache.Resource{ + &schedulernodeinfo.Resource{ MilliCPU: 200, Memory: 1024, }, - &schedulercache.Resource{ + &schedulernodeinfo.Resource{ MilliCPU: 200, Memory: 1024, }, []*v1.Pod{testPods[1]}, newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(), - make(map[string]*schedulercache.ImageStateSummary), + make(map[string]*schedulernodeinfo.ImageStateSummary), ), }} @@ -313,22 +313,22 @@ func TestAddPodWillConfirm(t *testing.T) { podsToAssume []*v1.Pod podsToAdd []*v1.Pod - wNodeInfo *schedulercache.NodeInfo + wNodeInfo *schedulernodeinfo.NodeInfo }{{ // two pod were assumed at same time. But first one is called Add() and gets confirmed. podsToAssume: []*v1.Pod{testPods[0], testPods[1]}, podsToAdd: []*v1.Pod{testPods[0]}, wNodeInfo: newNodeInfo( - &schedulercache.Resource{ + &schedulernodeinfo.Resource{ MilliCPU: 100, Memory: 500, }, - &schedulercache.Resource{ + &schedulernodeinfo.Resource{ MilliCPU: 100, Memory: 500, }, []*v1.Pod{testPods[0]}, newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(), - make(map[string]*schedulercache.ImageStateSummary), + make(map[string]*schedulernodeinfo.ImageStateSummary), ), }} @@ -405,25 +405,25 @@ func TestAddPodWillReplaceAssumed(t *testing.T) { podsToAdd []*v1.Pod podsToUpdate [][]*v1.Pod - wNodeInfo map[string]*schedulercache.NodeInfo + wNodeInfo map[string]*schedulernodeinfo.NodeInfo }{{ podsToAssume: []*v1.Pod{assumedPod.DeepCopy()}, podsToAdd: []*v1.Pod{addedPod.DeepCopy()}, podsToUpdate: [][]*v1.Pod{{addedPod.DeepCopy(), updatedPod.DeepCopy()}}, - wNodeInfo: map[string]*schedulercache.NodeInfo{ + wNodeInfo: map[string]*schedulernodeinfo.NodeInfo{ "assumed-node": nil, "actual-node": newNodeInfo( - &schedulercache.Resource{ + &schedulernodeinfo.Resource{ MilliCPU: 200, Memory: 500, }, - &schedulercache.Resource{ + &schedulernodeinfo.Resource{ MilliCPU: 200, Memory: 500, }, []*v1.Pod{updatedPod.DeepCopy()}, newHostPortInfoBuilder().add("TCP", "0.0.0.0", 90).build(), - make(map[string]*schedulercache.ImageStateSummary), + make(map[string]*schedulernodeinfo.ImageStateSummary), ), }, }} @@ -463,21 +463,21 @@ func TestAddPodAfterExpiration(t *testing.T) { tests := []struct { pod *v1.Pod - wNodeInfo *schedulercache.NodeInfo + wNodeInfo *schedulernodeinfo.NodeInfo }{{ pod: basePod, wNodeInfo: newNodeInfo( - &schedulercache.Resource{ + &schedulernodeinfo.Resource{ MilliCPU: 100, Memory: 500, }, - &schedulercache.Resource{ + &schedulernodeinfo.Resource{ MilliCPU: 100, Memory: 500, }, []*v1.Pod{basePod}, newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(), - make(map[string]*schedulercache.ImageStateSummary), + make(map[string]*schedulernodeinfo.ImageStateSummary), ), }} @@ -516,34 +516,34 @@ func TestUpdatePod(t *testing.T) { podsToAdd []*v1.Pod podsToUpdate []*v1.Pod - wNodeInfo []*schedulercache.NodeInfo + wNodeInfo []*schedulernodeinfo.NodeInfo }{{ // add a pod and then update it twice podsToAdd: []*v1.Pod{testPods[0]}, podsToUpdate: []*v1.Pod{testPods[0], testPods[1], testPods[0]}, - wNodeInfo: []*schedulercache.NodeInfo{newNodeInfo( - &schedulercache.Resource{ + wNodeInfo: []*schedulernodeinfo.NodeInfo{newNodeInfo( + &schedulernodeinfo.Resource{ MilliCPU: 200, Memory: 1024, }, - &schedulercache.Resource{ + &schedulernodeinfo.Resource{ MilliCPU: 200, Memory: 1024, }, []*v1.Pod{testPods[1]}, newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(), - make(map[string]*schedulercache.ImageStateSummary), + make(map[string]*schedulernodeinfo.ImageStateSummary), ), newNodeInfo( - &schedulercache.Resource{ + &schedulernodeinfo.Resource{ MilliCPU: 100, Memory: 500, }, - &schedulercache.Resource{ + &schedulernodeinfo.Resource{ MilliCPU: 100, Memory: 500, }, []*v1.Pod{testPods[0]}, newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(), - make(map[string]*schedulercache.ImageStateSummary), + make(map[string]*schedulernodeinfo.ImageStateSummary), )}, }} @@ -643,35 +643,35 @@ func TestExpireAddUpdatePod(t *testing.T) { podsToAdd []*v1.Pod podsToUpdate []*v1.Pod - wNodeInfo []*schedulercache.NodeInfo + wNodeInfo []*schedulernodeinfo.NodeInfo }{{ // Pod is assumed, expired, and added. Then it would be updated twice. podsToAssume: []*v1.Pod{testPods[0]}, podsToAdd: []*v1.Pod{testPods[0]}, podsToUpdate: []*v1.Pod{testPods[0], testPods[1], testPods[0]}, - wNodeInfo: []*schedulercache.NodeInfo{newNodeInfo( - &schedulercache.Resource{ + wNodeInfo: []*schedulernodeinfo.NodeInfo{newNodeInfo( + &schedulernodeinfo.Resource{ MilliCPU: 200, Memory: 1024, }, - &schedulercache.Resource{ + &schedulernodeinfo.Resource{ MilliCPU: 200, Memory: 1024, }, []*v1.Pod{testPods[1]}, newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(), - make(map[string]*schedulercache.ImageStateSummary), + make(map[string]*schedulernodeinfo.ImageStateSummary), ), newNodeInfo( - &schedulercache.Resource{ + &schedulernodeinfo.Resource{ MilliCPU: 100, Memory: 500, }, - &schedulercache.Resource{ + &schedulernodeinfo.Resource{ MilliCPU: 100, Memory: 500, }, []*v1.Pod{testPods[0]}, newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(), - make(map[string]*schedulercache.ImageStateSummary), + make(map[string]*schedulernodeinfo.ImageStateSummary), )}, }} @@ -733,21 +733,21 @@ func TestEphemeralStorageResource(t *testing.T) { podE := makePodWithEphemeralStorage(nodeName, "500") tests := []struct { pod *v1.Pod - wNodeInfo *schedulercache.NodeInfo + wNodeInfo *schedulernodeinfo.NodeInfo }{ { pod: podE, wNodeInfo: newNodeInfo( - &schedulercache.Resource{ + &schedulernodeinfo.Resource{ EphemeralStorage: 500, }, - &schedulercache.Resource{ + &schedulernodeinfo.Resource{ MilliCPU: priorityutil.DefaultMilliCPURequest, Memory: priorityutil.DefaultMemoryRequest, }, []*v1.Pod{podE}, - schedulercache.HostPortInfo{}, - make(map[string]*schedulercache.ImageStateSummary), + schedulernodeinfo.HostPortInfo{}, + make(map[string]*schedulernodeinfo.ImageStateSummary), ), }, } @@ -778,21 +778,21 @@ func TestRemovePod(t *testing.T) { basePod := makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}) tests := []struct { pod *v1.Pod - wNodeInfo *schedulercache.NodeInfo + wNodeInfo *schedulernodeinfo.NodeInfo }{{ pod: basePod, wNodeInfo: newNodeInfo( - &schedulercache.Resource{ + &schedulernodeinfo.Resource{ MilliCPU: 100, Memory: 500, }, - &schedulercache.Resource{ + &schedulernodeinfo.Resource{ MilliCPU: 100, Memory: 500, }, []*v1.Pod{basePod}, newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(), - make(map[string]*schedulercache.ImageStateSummary), + make(map[string]*schedulernodeinfo.ImageStateSummary), ), }} @@ -872,7 +872,7 @@ func TestForgetPod(t *testing.T) { // getResourceRequest returns the resource request of all containers in Pods; // excuding initContainers. func getResourceRequest(pod *v1.Pod) v1.ResourceList { - result := &schedulercache.Resource{} + result := &schedulernodeinfo.Resource{} for _, container := range pod.Spec.Containers { result.Add(container.Resources.Requests) } @@ -881,13 +881,13 @@ func getResourceRequest(pod *v1.Pod) v1.ResourceList { } // buildNodeInfo creates a NodeInfo by simulating node operations in cache. -func buildNodeInfo(node *v1.Node, pods []*v1.Pod) *schedulercache.NodeInfo { - expected := schedulercache.NewNodeInfo() +func buildNodeInfo(node *v1.Node, pods []*v1.Pod) *schedulernodeinfo.NodeInfo { + expected := schedulernodeinfo.NewNodeInfo() // Simulate SetNode. expected.SetNode(node) - expected.SetAllocatableResource(schedulercache.NewResource(node.Status.Allocatable)) + expected.SetAllocatableResource(schedulernodeinfo.NewResource(node.Status.Allocatable)) expected.SetTaints(node.Spec.Taints) expected.SetGeneration(expected.GetGeneration() + 1) @@ -1057,7 +1057,7 @@ func TestNodeOperators(t *testing.T) { if !found { t.Errorf("Failed to find node %v in schedulerinternalcache.", node.Name) } - if cache.nodeTree.NumNodes != 1 || cache.nodeTree.Next() != node.Name { + if cache.nodeTree.NumNodes() != 1 || cache.nodeTree.Next() != node.Name { t.Errorf("cache.nodeTree is not updated correctly after adding node: %v", node.Name) } @@ -1068,7 +1068,7 @@ func TestNodeOperators(t *testing.T) { } // Case 2: dump cached nodes successfully. - cachedNodes := map[string]*schedulercache.NodeInfo{} + cachedNodes := map[string]*schedulernodeinfo.NodeInfo{} cache.UpdateNodeNameToInfoMap(cachedNodes) newNode, found := cachedNodes[node.Name] if !found || len(cachedNodes) != 1 { @@ -1089,7 +1089,7 @@ func TestNodeOperators(t *testing.T) { cache.UpdateNode(nil, node) got, found = cache.nodes[node.Name] if !found { - t.Errorf("Failed to find node %v in schedulercache after UpdateNode.", node.Name) + t.Errorf("Failed to find node %v in schedulernodeinfo after UpdateNode.", node.Name) } if got.GetGeneration() <= expected.GetGeneration() { t.Errorf("Generation is not incremented. got: %v, expected: %v", got.GetGeneration(), expected.GetGeneration()) @@ -1097,10 +1097,10 @@ func TestNodeOperators(t *testing.T) { expected.SetGeneration(got.GetGeneration()) if !reflect.DeepEqual(got, expected) { - t.Errorf("Failed to update node in schedulercache:\n got: %+v \nexpected: %+v", got, expected) + t.Errorf("Failed to update node in schedulernodeinfo:\n got: %+v \nexpected: %+v", got, expected) } // Check nodeTree after update - if cache.nodeTree.NumNodes != 1 || cache.nodeTree.Next() != node.Name { + if cache.nodeTree.NumNodes() != 1 || cache.nodeTree.Next() != node.Name { t.Errorf("unexpected cache.nodeTree after updating node: %v", node.Name) } @@ -1111,7 +1111,7 @@ func TestNodeOperators(t *testing.T) { } // Check nodeTree after remove. The node should be removed from the nodeTree even if there are // still pods on it. - if cache.nodeTree.NumNodes != 0 || cache.nodeTree.Next() != "" { + if cache.nodeTree.NumNodes() != 0 || cache.nodeTree.Next() != "" { t.Errorf("unexpected cache.nodeTree after removing node: %v", node.Name) } } @@ -1131,7 +1131,7 @@ func BenchmarkUpdate1kNodes30kPods(b *testing.B) { cache := setupCacheOf1kNodes30kPods(b) b.ResetTimer() for n := 0; n < b.N; n++ { - cachedNodes := map[string]*schedulercache.NodeInfo{} + cachedNodes := map[string]*schedulernodeinfo.NodeInfo{} cache.UpdateNodeNameToInfoMap(cachedNodes) } } diff --git a/pkg/scheduler/internal/cache/debugger/BUILD b/pkg/scheduler/internal/cache/debugger/BUILD index 320c9734fba..1d9cfaf116b 100644 --- a/pkg/scheduler/internal/cache/debugger/BUILD +++ b/pkg/scheduler/internal/cache/debugger/BUILD @@ -10,9 +10,9 @@ go_library( importpath = "k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger", visibility = ["//pkg/scheduler:__subpackages__"], deps = [ - "//pkg/scheduler/cache:go_default_library", "//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/internal/queue:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", @@ -25,7 +25,7 @@ go_test( srcs = ["comparer_test.go"], embed = [":go_default_library"], deps = [ - "//pkg/scheduler/cache:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", ], diff --git a/pkg/scheduler/internal/cache/debugger/comparer.go b/pkg/scheduler/internal/cache/debugger/comparer.go index e78df11184a..434e7e511b4 100644 --- a/pkg/scheduler/internal/cache/debugger/comparer.go +++ b/pkg/scheduler/internal/cache/debugger/comparer.go @@ -24,9 +24,9 @@ import ( "k8s.io/apimachinery/pkg/labels" corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/klog" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) // CacheComparer is an implementation of the Scheduler's cache comparer. @@ -68,7 +68,7 @@ func (c *CacheComparer) Compare() error { } // CompareNodes compares actual nodes with cached nodes. -func (c *CacheComparer) CompareNodes(nodes []*v1.Node, nodeinfos map[string]*schedulercache.NodeInfo) (missed, redundant []string) { +func (c *CacheComparer) CompareNodes(nodes []*v1.Node, nodeinfos map[string]*schedulernodeinfo.NodeInfo) (missed, redundant []string) { actual := []string{} for _, node := range nodes { actual = append(actual, node.Name) @@ -83,7 +83,7 @@ func (c *CacheComparer) CompareNodes(nodes []*v1.Node, nodeinfos map[string]*sch } // ComparePods compares actual pods with cached pods. -func (c *CacheComparer) ComparePods(pods, waitingPods []*v1.Pod, nodeinfos map[string]*schedulercache.NodeInfo) (missed, redundant []string) { +func (c *CacheComparer) ComparePods(pods, waitingPods []*v1.Pod, nodeinfos map[string]*schedulernodeinfo.NodeInfo) (missed, redundant []string) { actual := []string{} for _, pod := range pods { actual = append(actual, string(pod.UID)) diff --git a/pkg/scheduler/internal/cache/debugger/comparer_test.go b/pkg/scheduler/internal/cache/debugger/comparer_test.go index 967b4027b5d..ab1e1ee5e47 100644 --- a/pkg/scheduler/internal/cache/debugger/comparer_test.go +++ b/pkg/scheduler/internal/cache/debugger/comparer_test.go @@ -22,7 +22,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) func TestCompareNodes(t *testing.T) { @@ -72,9 +72,9 @@ func testCompareNodes(actual, cached, missing, redundant []string, t *testing.T) nodes = append(nodes, node) } - nodeInfo := make(map[string]*schedulercache.NodeInfo) + nodeInfo := make(map[string]*schedulernodeinfo.NodeInfo) for _, nodeName := range cached { - nodeInfo[nodeName] = &schedulercache.NodeInfo{} + nodeInfo[nodeName] = &schedulernodeinfo.NodeInfo{} } m, r := compare.CompareNodes(nodes, nodeInfo) @@ -170,14 +170,14 @@ func testComparePods(actual, cached, queued, missing, redundant []string, t *tes queuedPods = append(queuedPods, pod) } - nodeInfo := make(map[string]*schedulercache.NodeInfo) + nodeInfo := make(map[string]*schedulernodeinfo.NodeInfo) for _, uid := range cached { pod := &v1.Pod{} pod.UID = types.UID(uid) pod.Namespace = "ns" pod.Name = uid - nodeInfo[uid] = schedulercache.NewNodeInfo(pod) + nodeInfo[uid] = schedulernodeinfo.NewNodeInfo(pod) } m, r := compare.ComparePods(pods, queuedPods, nodeInfo) diff --git a/pkg/scheduler/internal/cache/debugger/dumper.go b/pkg/scheduler/internal/cache/debugger/dumper.go index b9084d377d9..5e94c06560e 100644 --- a/pkg/scheduler/internal/cache/debugger/dumper.go +++ b/pkg/scheduler/internal/cache/debugger/dumper.go @@ -23,9 +23,9 @@ import ( "k8s.io/klog" "k8s.io/api/core/v1" - "k8s.io/kubernetes/pkg/scheduler/cache" internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" "k8s.io/kubernetes/pkg/scheduler/internal/queue" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) // CacheDumper writes some information from the scheduler cache and the scheduling queue to the @@ -61,7 +61,7 @@ func (d *CacheDumper) dumpSchedulingQueue() { } // printNodeInfo writes parts of NodeInfo to a string. -func printNodeInfo(n *cache.NodeInfo) string { +func printNodeInfo(n *schedulernodeinfo.NodeInfo) string { var nodeData strings.Builder nodeData.WriteString(fmt.Sprintf("\nNode name: %+v\nRequested Resources: %+v\nAllocatable Resources:%+v\nNumber of Pods: %v\nPods:\n", n.Node().Name, n.RequestedResource(), n.AllocatableResource(), len(n.Pods()))) diff --git a/pkg/scheduler/internal/cache/fake/BUILD b/pkg/scheduler/internal/cache/fake/BUILD index ba642f5ea71..aab474548d7 100644 --- a/pkg/scheduler/internal/cache/fake/BUILD +++ b/pkg/scheduler/internal/cache/fake/BUILD @@ -6,8 +6,8 @@ go_library( importpath = "k8s.io/kubernetes/pkg/scheduler/internal/cache/fake", visibility = ["//pkg/scheduler:__subpackages__"], deps = [ - "//pkg/scheduler/cache:go_default_library", "//pkg/scheduler/internal/cache:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", ], diff --git a/pkg/scheduler/internal/cache/fake/fake_cache.go b/pkg/scheduler/internal/cache/fake/fake_cache.go index 82419e9600f..3188ff2fef6 100644 --- a/pkg/scheduler/internal/cache/fake/fake_cache.go +++ b/pkg/scheduler/internal/cache/fake/fake_cache.go @@ -19,8 +19,8 @@ package fake import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) // Cache is used for testing @@ -75,7 +75,7 @@ func (c *Cache) UpdateNode(oldNode, newNode *v1.Node) error { return nil } func (c *Cache) RemoveNode(node *v1.Node) error { return nil } // UpdateNodeNameToInfoMap is a fake method for testing. -func (c *Cache) UpdateNodeNameToInfoMap(infoMap map[string]*schedulercache.NodeInfo) error { +func (c *Cache) UpdateNodeNameToInfoMap(infoMap map[string]*schedulernodeinfo.NodeInfo) error { return nil } diff --git a/pkg/scheduler/internal/cache/interface.go b/pkg/scheduler/internal/cache/interface.go index 878c2aa0741..19088fd25ac 100644 --- a/pkg/scheduler/internal/cache/interface.go +++ b/pkg/scheduler/internal/cache/interface.go @@ -19,7 +19,7 @@ package cache import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) // PodFilter is a function to filter a pod. If pod passed return true else return false. @@ -100,7 +100,7 @@ type Cache interface { // UpdateNodeNameToInfoMap updates the passed infoMap to the current contents of Cache. // The node info contains aggregated information of pods scheduled (including assumed to be) // on this node. - UpdateNodeNameToInfoMap(infoMap map[string]*schedulercache.NodeInfo) error + UpdateNodeNameToInfoMap(infoMap map[string]*schedulernodeinfo.NodeInfo) error // List lists all cached pods (including assumed ones). List(labels.Selector) ([]*v1.Pod, error) @@ -118,5 +118,5 @@ type Cache interface { // Snapshot is a snapshot of cache state type Snapshot struct { AssumedPods map[string]bool - Nodes map[string]*schedulercache.NodeInfo + Nodes map[string]*schedulernodeinfo.NodeInfo } diff --git a/pkg/scheduler/internal/cache/main_test.go b/pkg/scheduler/internal/cache/main_test.go deleted file mode 100644 index e29bc63f43a..00000000000 --- a/pkg/scheduler/internal/cache/main_test.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cache - -import ( - "testing" - - utilfeature "k8s.io/apiserver/pkg/util/feature" - utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" - _ "k8s.io/kubernetes/pkg/features" -) - -func TestMain(m *testing.M) { - utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run) -} diff --git a/pkg/scheduler/internal/cache/node_tree.go b/pkg/scheduler/internal/cache/node_tree.go index 80ce6d195fa..f29024d0ed5 100644 --- a/pkg/scheduler/internal/cache/node_tree.go +++ b/pkg/scheduler/internal/cache/node_tree.go @@ -32,7 +32,7 @@ type NodeTree struct { tree map[string]*nodeArray // a map from zone (region-zone) to an array of nodes in the zone. zones []string // a list of all the zones in the tree (keys) zoneIndex int - NumNodes int + numNodes int mu sync.RWMutex } @@ -91,7 +91,7 @@ func (nt *NodeTree) addNode(n *v1.Node) { nt.tree[zone] = &nodeArray{nodes: []string{n.Name}, lastIndex: 0} } klog.V(5).Infof("Added node %v in group %v to NodeTree", n.Name, zone) - nt.NumNodes++ + nt.numNodes++ } // RemoveNode removes a node from the NodeTree. @@ -111,7 +111,7 @@ func (nt *NodeTree) removeNode(n *v1.Node) error { nt.removeZone(zone) } klog.V(5).Infof("Removed node %v in group %v from NodeTree", n.Name, zone) - nt.NumNodes-- + nt.numNodes-- return nil } } @@ -184,3 +184,10 @@ func (nt *NodeTree) Next() string { } } } + +// NumNodes returns the number of nodes. +func (nt *NodeTree) NumNodes() int { + nt.mu.RLock() + defer nt.mu.RUnlock() + return nt.numNodes +} diff --git a/pkg/scheduler/internal/cache/node_tree_test.go b/pkg/scheduler/internal/cache/node_tree_test.go index ee489870dd5..9b4371589fa 100644 --- a/pkg/scheduler/internal/cache/node_tree_test.go +++ b/pkg/scheduler/internal/cache/node_tree_test.go @@ -116,8 +116,8 @@ func verifyNodeTree(t *testing.T, nt *NodeTree, expectedTree map[string]*nodeArr for _, na := range expectedTree { expectedNumNodes += len(na.nodes) } - if nt.NumNodes != expectedNumNodes { - t.Errorf("unexpected NodeTree.numNodes. Expected: %v, Got: %v", expectedNumNodes, nt.NumNodes) + if numNodes := nt.NumNodes(); numNodes != expectedNumNodes { + t.Errorf("unexpected NodeTree.numNodes. Expected: %v, Got: %v", expectedNumNodes, numNodes) } if !reflect.DeepEqual(nt.tree, expectedTree) { t.Errorf("The node tree is not the same as expected. Expected: %v, Got: %v", expectedTree, nt.tree) diff --git a/pkg/scheduler/internal/queue/BUILD b/pkg/scheduler/internal/queue/BUILD index c675f859248..ea7b66fe3f4 100644 --- a/pkg/scheduler/internal/queue/BUILD +++ b/pkg/scheduler/internal/queue/BUILD @@ -12,6 +12,8 @@ go_library( "//pkg/scheduler/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], @@ -22,9 +24,11 @@ go_test( srcs = ["scheduling_queue_test.go"], embed = [":go_default_library"], deps = [ + "//pkg/api/v1/pod:go_default_library", "//pkg/scheduler/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", ], ) diff --git a/pkg/scheduler/internal/queue/scheduling_queue.go b/pkg/scheduler/internal/queue/scheduling_queue.go index 6f5aa682c98..cb185fbf0bb 100644 --- a/pkg/scheduler/internal/queue/scheduling_queue.go +++ b/pkg/scheduler/internal/queue/scheduling_queue.go @@ -27,15 +27,17 @@ limitations under the License. package queue import ( - "container/heap" "fmt" "reflect" "sync" + "time" "k8s.io/klog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ktypes "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/cache" podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" @@ -69,13 +71,15 @@ type SchedulingQueue interface { Close() // DeleteNominatedPodIfExists deletes nominatedPod from internal cache DeleteNominatedPodIfExists(pod *v1.Pod) + // NumUnschedulablePods returns the number of unschedulable pods exist in the SchedulingQueue. + NumUnschedulablePods() int } // NewSchedulingQueue initializes a new scheduling queue. If pod priority is // enabled a priority queue is returned. If it is disabled, a FIFO is returned. -func NewSchedulingQueue() SchedulingQueue { +func NewSchedulingQueue(stop <-chan struct{}) SchedulingQueue { if util.PodPriorityEnabled() { - return NewPriorityQueue() + return NewPriorityQueue(stop) } return NewFIFO() } @@ -162,6 +166,11 @@ func (f *FIFO) Close() { // DeleteNominatedPodIfExists does nothing in FIFO. func (f *FIFO) DeleteNominatedPodIfExists(pod *v1.Pod) {} +// NumUnschedulablePods returns the number of unschedulable pods exist in the SchedulingQueue. +func (f *FIFO) NumUnschedulablePods() int { + return 0 +} + // NewFIFO creates a FIFO object. func NewFIFO() *FIFO { return &FIFO{FIFO: cache.NewFIFO(cache.MetaNamespaceKeyFunc)} @@ -179,12 +188,20 @@ func NominatedNodeName(pod *v1.Pod) string { // pods that are already tried and are determined to be unschedulable. The latter // is called unschedulableQ. type PriorityQueue struct { + stop <-chan struct{} + clock util.Clock + // podBackoff tracks backoff for pods attempting to be rescheduled + podBackoff *util.PodBackoff + lock sync.RWMutex cond sync.Cond // activeQ is heap structure that scheduler actively looks at to find pods to // schedule. Head of heap is the highest priority pod. - activeQ *Heap + activeQ *util.Heap + // podBackoffQ is a heap ordered by backoff expiry. Pods which have completed backoff + // are popped from this heap before the scheduler looks at activeQ + podBackoffQ *util.Heap // unschedulableQ holds pods that have been tried and determined unschedulable. unschedulableQ *UnschedulablePodsMap // nominatedPods is a map keyed by a node name and the value is a list of @@ -206,46 +223,87 @@ type PriorityQueue struct { // Making sure that PriorityQueue implements SchedulingQueue. var _ = SchedulingQueue(&PriorityQueue{}) +// podTimeStamp returns pod's last schedule time or its creation time if the +// scheduler has never tried scheduling it. +func podTimestamp(pod *v1.Pod) *metav1.Time { + _, condition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled) + if condition == nil { + return &pod.CreationTimestamp + } + return &condition.LastTransitionTime +} + +// activeQComp is the function used by the activeQ heap algorithm to sort pods. +// It sorts pods based on their priority. When priorities are equal, it uses +// podTimestamp. +func activeQComp(pod1, pod2 interface{}) bool { + p1 := pod1.(*v1.Pod) + p2 := pod2.(*v1.Pod) + prio1 := util.GetPodPriority(p1) + prio2 := util.GetPodPriority(p2) + return (prio1 > prio2) || (prio1 == prio2 && podTimestamp(p1).Before(podTimestamp(p2))) +} + // NewPriorityQueue creates a PriorityQueue object. -func NewPriorityQueue() *PriorityQueue { +func NewPriorityQueue(stop <-chan struct{}) *PriorityQueue { + return NewPriorityQueueWithClock(stop, util.RealClock{}) +} + +// NewPriorityQueueWithClock creates a PriorityQueue which uses the passed clock for time. +func NewPriorityQueueWithClock(stop <-chan struct{}, clock util.Clock) *PriorityQueue { pq := &PriorityQueue{ - activeQ: newHeap(cache.MetaNamespaceKeyFunc, util.HigherPriorityPod), + clock: clock, + stop: stop, + podBackoff: util.CreatePodBackoffWithClock(1*time.Second, 10*time.Second, clock), + activeQ: util.NewHeap(cache.MetaNamespaceKeyFunc, activeQComp), unschedulableQ: newUnschedulablePodsMap(), nominatedPods: map[string][]*v1.Pod{}, } pq.cond.L = &pq.lock + pq.podBackoffQ = util.NewHeap(cache.MetaNamespaceKeyFunc, pq.podsCompareBackoffCompleted) + + pq.run() + return pq } +// run starts the goroutine to pump from podBackoffQ to activeQ +func (p *PriorityQueue) run() { + go wait.Until(p.flushBackoffQCompleted, 1.0*time.Second, p.stop) +} + // addNominatedPodIfNeeded adds a pod to nominatedPods if it has a NominatedNodeName and it does not // already exist in the map. Adding an existing pod is not going to update the pod. func (p *PriorityQueue) addNominatedPodIfNeeded(pod *v1.Pod) { nnn := NominatedNodeName(pod) - if len(nnn) > 0 { - for _, np := range p.nominatedPods[nnn] { - if np.UID == pod.UID { - klog.V(4).Infof("Pod %v/%v already exists in the nominated map!", pod.Namespace, pod.Name) - return - } - } - p.nominatedPods[nnn] = append(p.nominatedPods[nnn], pod) + if len(nnn) <= 0 { + return } + for _, np := range p.nominatedPods[nnn] { + if np.UID == pod.UID { + klog.V(4).Infof("Pod %v/%v already exists in the nominated map!", pod.Namespace, pod.Name) + return + } + } + p.nominatedPods[nnn] = append(p.nominatedPods[nnn], pod) } // deleteNominatedPodIfExists deletes a pod from the nominatedPods. // NOTE: this function assumes lock has been acquired in caller. func (p *PriorityQueue) deleteNominatedPodIfExists(pod *v1.Pod) { nnn := NominatedNodeName(pod) - if len(nnn) > 0 { - for i, np := range p.nominatedPods[nnn] { - if np.UID == pod.UID { - p.nominatedPods[nnn] = append(p.nominatedPods[nnn][:i], p.nominatedPods[nnn][i+1:]...) - if len(p.nominatedPods[nnn]) == 0 { - delete(p.nominatedPods, nnn) - } - break - } + if len(nnn) <= 0 { + return + } + for i, np := range p.nominatedPods[nnn] { + if np.UID != pod.UID { + continue } + p.nominatedPods[nnn] = append(p.nominatedPods[nnn][:i], p.nominatedPods[nnn][i+1:]...) + if len(p.nominatedPods[nnn]) == 0 { + delete(p.nominatedPods, nnn) + } + break } } @@ -258,23 +316,27 @@ func (p *PriorityQueue) updateNominatedPod(oldPod, newPod *v1.Pod) { } // Add adds a pod to the active queue. It should be called only when a new pod -// is added so there is no chance the pod is already in either queue. +// is added so there is no chance the pod is already in active/unschedulable/backoff queues func (p *PriorityQueue) Add(pod *v1.Pod) error { p.lock.Lock() defer p.lock.Unlock() - err := p.activeQ.Add(pod) - if err != nil { + if err := p.activeQ.Add(pod); err != nil { klog.Errorf("Error adding pod %v/%v to the scheduling queue: %v", pod.Namespace, pod.Name, err) - } else { - if p.unschedulableQ.get(pod) != nil { - klog.Errorf("Error: pod %v/%v is already in the unschedulable queue.", pod.Namespace, pod.Name) - p.deleteNominatedPodIfExists(pod) - p.unschedulableQ.delete(pod) - } - p.addNominatedPodIfNeeded(pod) - p.cond.Broadcast() + return err } - return err + if p.unschedulableQ.get(pod) != nil { + klog.Errorf("Error: pod %v/%v is already in the unschedulable queue.", pod.Namespace, pod.Name) + p.deleteNominatedPodIfExists(pod) + p.unschedulableQ.delete(pod) + } + // Delete pod from backoffQ if it is backing off + if err := p.podBackoffQ.Delete(pod); err == nil { + klog.Errorf("Error: pod %v/%v is already in the podBackoff queue.", pod.Namespace, pod.Name) + } + p.addNominatedPodIfNeeded(pod) + p.cond.Broadcast() + + return nil } // AddIfNotPresent adds a pod to the active queue if it is not present in any of @@ -288,6 +350,9 @@ func (p *PriorityQueue) AddIfNotPresent(pod *v1.Pod) error { if _, exists, _ := p.activeQ.Get(pod); exists { return nil } + if _, exists, _ := p.podBackoffQ.Get(pod); exists { + return nil + } err := p.activeQ.Add(pod) if err != nil { klog.Errorf("Error adding pod %v/%v to the scheduling queue: %v", pod.Namespace, pod.Name, err) @@ -303,6 +368,40 @@ func isPodUnschedulable(pod *v1.Pod) bool { return cond != nil && cond.Status == v1.ConditionFalse && cond.Reason == v1.PodReasonUnschedulable } +// nsNameForPod returns a namespacedname for a pod +func nsNameForPod(pod *v1.Pod) ktypes.NamespacedName { + return ktypes.NamespacedName{ + Namespace: pod.Namespace, + Name: pod.Name, + } +} + +// clearPodBackoff clears all backoff state for a pod (resets expiry) +func (p *PriorityQueue) clearPodBackoff(pod *v1.Pod) { + p.podBackoff.ClearPodBackoff(nsNameForPod(pod)) +} + +// isPodBackingOff returns whether a pod is currently undergoing backoff in the podBackoff structure +func (p *PriorityQueue) isPodBackingOff(pod *v1.Pod) bool { + boTime, exists := p.podBackoff.GetBackoffTime(nsNameForPod(pod)) + if !exists { + return false + } + return boTime.After(p.clock.Now()) +} + +// backoffPod checks if pod is currently undergoing backoff. If it is not it updates the backoff +// timeout otherwise it does nothing. +func (p *PriorityQueue) backoffPod(pod *v1.Pod) { + p.podBackoff.Gc() + + podID := nsNameForPod(pod) + boTime, found := p.podBackoff.GetBackoffTime(podID) + if !found || boTime.Before(p.clock.Now()) { + p.podBackoff.BackoffPod(podID) + } +} + // AddUnschedulableIfNotPresent does nothing if the pod is present in either // queue. Otherwise it adds the pod to the unschedulable queue if // p.receivedMoveRequest is false, and to the activeQ if p.receivedMoveRequest is true. @@ -315,11 +414,27 @@ func (p *PriorityQueue) AddUnschedulableIfNotPresent(pod *v1.Pod) error { if _, exists, _ := p.activeQ.Get(pod); exists { return fmt.Errorf("pod is already present in the activeQ") } + if _, exists, _ := p.podBackoffQ.Get(pod); exists { + return fmt.Errorf("pod is already present in the backoffQ") + } if !p.receivedMoveRequest && isPodUnschedulable(pod) { + p.backoffPod(pod) p.unschedulableQ.addOrUpdate(pod) p.addNominatedPodIfNeeded(pod) return nil } + + // If a move request has been received and the pod is subject to backoff, move it to the BackoffQ. + if p.isPodBackingOff(pod) && isPodUnschedulable(pod) { + err := p.podBackoffQ.Add(pod) + if err != nil { + klog.Errorf("Error adding pod %v to the backoff queue: %v", pod.Name, err) + } else { + p.addNominatedPodIfNeeded(pod) + } + return err + } + err := p.activeQ.Add(pod) if err == nil { p.addNominatedPodIfNeeded(pod) @@ -328,13 +443,46 @@ func (p *PriorityQueue) AddUnschedulableIfNotPresent(pod *v1.Pod) error { return err } +// flushBackoffQCompleted Moves all pods from backoffQ which have completed backoff in to activeQ +func (p *PriorityQueue) flushBackoffQCompleted() { + p.lock.Lock() + defer p.lock.Unlock() + + for { + rawPod := p.podBackoffQ.Peek() + if rawPod == nil { + return + } + pod := rawPod.(*v1.Pod) + boTime, found := p.podBackoff.GetBackoffTime(nsNameForPod(pod)) + if !found { + klog.Errorf("Unable to find backoff value for pod %v in backoffQ", nsNameForPod(pod)) + p.podBackoffQ.Pop() + p.activeQ.Add(pod) + defer p.cond.Broadcast() + continue + } + + if boTime.After(p.clock.Now()) { + return + } + _, err := p.podBackoffQ.Pop() + if err != nil { + klog.Errorf("Unable to pop pod %v from backoffQ despite backoff completion.", nsNameForPod(pod)) + return + } + p.activeQ.Add(pod) + defer p.cond.Broadcast() + } +} + // Pop removes the head of the active queue and returns it. It blocks if the // activeQ is empty and waits until a new item is added to the queue. It also // clears receivedMoveRequest to mark the beginning of a new scheduling cycle. func (p *PriorityQueue) Pop() (*v1.Pod, error) { p.lock.Lock() defer p.lock.Unlock() - for len(p.activeQ.data.queue) == 0 { + for p.activeQ.Len() == 0 { // When the queue is empty, invocation of Pop() is blocked until new item is enqueued. // When Close() is called, the p.closed is set and the condition is broadcast, // which causes this loop to continue and return from the Pop(). @@ -371,16 +519,33 @@ func isPodUpdated(oldPod, newPod *v1.Pod) bool { func (p *PriorityQueue) Update(oldPod, newPod *v1.Pod) error { p.lock.Lock() defer p.lock.Unlock() - // If the pod is already in the active queue, just update it there. - if _, exists, _ := p.activeQ.Get(newPod); exists { - p.updateNominatedPod(oldPod, newPod) - err := p.activeQ.Update(newPod) - return err + + if oldPod != nil { + // If the pod is already in the active queue, just update it there. + if _, exists, _ := p.activeQ.Get(oldPod); exists { + p.updateNominatedPod(oldPod, newPod) + err := p.activeQ.Update(newPod) + return err + } + + // If the pod is in the backoff queue, update it there. + if _, exists, _ := p.podBackoffQ.Get(oldPod); exists { + p.updateNominatedPod(oldPod, newPod) + p.podBackoffQ.Delete(newPod) + err := p.activeQ.Add(newPod) + if err == nil { + p.cond.Broadcast() + } + return err + } } + // If the pod is in the unschedulable queue, updating it may make it schedulable. if usPod := p.unschedulableQ.get(newPod); usPod != nil { p.updateNominatedPod(oldPod, newPod) if isPodUpdated(oldPod, newPod) { + // If the pod is updated reset backoff + p.clearPodBackoff(newPod) p.unschedulableQ.delete(usPod) err := p.activeQ.Add(newPod) if err == nil { @@ -388,6 +553,7 @@ func (p *PriorityQueue) Update(oldPod, newPod *v1.Pod) error { } return err } + // Pod is already in unschedulable queue and hasnt updated, no need to backoff again p.unschedulableQ.addOrUpdate(newPod) return nil } @@ -408,6 +574,8 @@ func (p *PriorityQueue) Delete(pod *v1.Pod) error { p.deleteNominatedPodIfExists(pod) err := p.activeQ.Delete(pod) if err != nil { // The item was probably not found in the activeQ. + p.clearPodBackoff(pod) + p.podBackoffQ.Delete(pod) p.unschedulableQ.delete(pod) } return nil @@ -433,16 +601,18 @@ func (p *PriorityQueue) AssignedPodUpdated(pod *v1.Pod) { // function adds all pods and then signals the condition variable to ensure that // if Pop() is waiting for an item, it receives it after all the pods are in the // queue and the head is the highest priority pod. -// TODO(bsalamat): We should add a back-off mechanism here so that a high priority -// pod which is unschedulable does not go to the head of the queue frequently. For -// example in a cluster where a lot of pods being deleted, such a high priority -// pod can deprive other pods from getting scheduled. func (p *PriorityQueue) MoveAllToActiveQueue() { p.lock.Lock() defer p.lock.Unlock() for _, pod := range p.unschedulableQ.pods { - if err := p.activeQ.Add(pod); err != nil { - klog.Errorf("Error adding pod %v/%v to the scheduling queue: %v", pod.Namespace, pod.Name, err) + if p.isPodBackingOff(pod) { + if err := p.podBackoffQ.Add(pod); err != nil { + klog.Errorf("Error adding pod %v to the backoff queue: %v", pod.Name, err) + } + } else { + if err := p.activeQ.Add(pod); err != nil { + klog.Errorf("Error adding pod %v to the scheduling queue: %v", pod.Name, err) + } } } p.unschedulableQ.clear() @@ -453,11 +623,16 @@ func (p *PriorityQueue) MoveAllToActiveQueue() { // NOTE: this function assumes lock has been acquired in caller func (p *PriorityQueue) movePodsToActiveQueue(pods []*v1.Pod) { for _, pod := range pods { - if err := p.activeQ.Add(pod); err == nil { - p.unschedulableQ.delete(pod) + if p.isPodBackingOff(pod) { + if err := p.podBackoffQ.Add(pod); err != nil { + klog.Errorf("Error adding pod %v to the backoff queue: %v", pod.Name, err) + } } else { - klog.Errorf("Error adding pod %v/%v to the scheduling queue: %v", pod.Namespace, pod.Name, err) + if err := p.activeQ.Add(pod); err != nil { + klog.Errorf("Error adding pod %v to the scheduling queue: %v", pod.Name, err) + } } + p.unschedulableQ.delete(pod) } p.receivedMoveRequest = true p.cond.Broadcast() @@ -530,6 +705,19 @@ func (p *PriorityQueue) DeleteNominatedPodIfExists(pod *v1.Pod) { p.lock.Unlock() } +func (p *PriorityQueue) podsCompareBackoffCompleted(p1, p2 interface{}) bool { + bo1, _ := p.podBackoff.GetBackoffTime(nsNameForPod(p1.(*v1.Pod))) + bo2, _ := p.podBackoff.GetBackoffTime(nsNameForPod(p2.(*v1.Pod))) + return bo1.Before(bo2) +} + +// NumUnschedulablePods returns the number of unschedulable pods exist in the SchedulingQueue. +func (p *PriorityQueue) NumUnschedulablePods() int { + p.lock.RLock() + defer p.lock.RUnlock() + return len(p.unschedulableQ.pods) +} + // UnschedulablePodsMap holds pods that cannot be scheduled. This data structure // is used to implement unschedulableQ. type UnschedulablePodsMap struct { @@ -570,200 +758,3 @@ func newUnschedulablePodsMap() *UnschedulablePodsMap { keyFunc: util.GetPodFullName, } } - -// Below is the implementation of the a heap. The logic is pretty much the same -// as cache.heap, however, this heap does not perform synchronization. It leaves -// synchronization to the SchedulingQueue. - -// LessFunc is a function type to compare two objects. -type LessFunc func(interface{}, interface{}) bool - -// KeyFunc is a function type to get the key from an object. -type KeyFunc func(obj interface{}) (string, error) - -type heapItem struct { - obj interface{} // The object which is stored in the heap. - index int // The index of the object's key in the Heap.queue. -} - -type itemKeyValue struct { - key string - obj interface{} -} - -// heapData is an internal struct that implements the standard heap interface -// and keeps the data stored in the heap. -type heapData struct { - // items is a map from key of the objects to the objects and their index. - // We depend on the property that items in the map are in the queue and vice versa. - items map[string]*heapItem - // queue implements a heap data structure and keeps the order of elements - // according to the heap invariant. The queue keeps the keys of objects stored - // in "items". - queue []string - - // keyFunc is used to make the key used for queued item insertion and retrieval, and - // should be deterministic. - keyFunc KeyFunc - // lessFunc is used to compare two objects in the heap. - lessFunc LessFunc -} - -var ( - _ = heap.Interface(&heapData{}) // heapData is a standard heap -) - -// Less compares two objects and returns true if the first one should go -// in front of the second one in the heap. -func (h *heapData) Less(i, j int) bool { - if i > len(h.queue) || j > len(h.queue) { - return false - } - itemi, ok := h.items[h.queue[i]] - if !ok { - return false - } - itemj, ok := h.items[h.queue[j]] - if !ok { - return false - } - return h.lessFunc(itemi.obj, itemj.obj) -} - -// Len returns the number of items in the Heap. -func (h *heapData) Len() int { return len(h.queue) } - -// Swap implements swapping of two elements in the heap. This is a part of standard -// heap interface and should never be called directly. -func (h *heapData) Swap(i, j int) { - h.queue[i], h.queue[j] = h.queue[j], h.queue[i] - item := h.items[h.queue[i]] - item.index = i - item = h.items[h.queue[j]] - item.index = j -} - -// Push is supposed to be called by heap.Push only. -func (h *heapData) Push(kv interface{}) { - keyValue := kv.(*itemKeyValue) - n := len(h.queue) - h.items[keyValue.key] = &heapItem{keyValue.obj, n} - h.queue = append(h.queue, keyValue.key) -} - -// Pop is supposed to be called by heap.Pop only. -func (h *heapData) Pop() interface{} { - key := h.queue[len(h.queue)-1] - h.queue = h.queue[0 : len(h.queue)-1] - item, ok := h.items[key] - if !ok { - // This is an error - return nil - } - delete(h.items, key) - return item.obj -} - -// Heap is a producer/consumer queue that implements a heap data structure. -// It can be used to implement priority queues and similar data structures. -type Heap struct { - // data stores objects and has a queue that keeps their ordering according - // to the heap invariant. - data *heapData -} - -// Add inserts an item, and puts it in the queue. The item is updated if it -// already exists. -func (h *Heap) Add(obj interface{}) error { - key, err := h.data.keyFunc(obj) - if err != nil { - return cache.KeyError{Obj: obj, Err: err} - } - if _, exists := h.data.items[key]; exists { - h.data.items[key].obj = obj - heap.Fix(h.data, h.data.items[key].index) - } else { - heap.Push(h.data, &itemKeyValue{key, obj}) - } - return nil -} - -// AddIfNotPresent inserts an item, and puts it in the queue. If an item with -// the key is present in the map, no changes is made to the item. -func (h *Heap) AddIfNotPresent(obj interface{}) error { - key, err := h.data.keyFunc(obj) - if err != nil { - return cache.KeyError{Obj: obj, Err: err} - } - if _, exists := h.data.items[key]; !exists { - heap.Push(h.data, &itemKeyValue{key, obj}) - } - return nil -} - -// Update is the same as Add in this implementation. When the item does not -// exist, it is added. -func (h *Heap) Update(obj interface{}) error { - return h.Add(obj) -} - -// Delete removes an item. -func (h *Heap) Delete(obj interface{}) error { - key, err := h.data.keyFunc(obj) - if err != nil { - return cache.KeyError{Obj: obj, Err: err} - } - if item, ok := h.data.items[key]; ok { - heap.Remove(h.data, item.index) - return nil - } - return fmt.Errorf("object not found") -} - -// Pop returns the head of the heap. -func (h *Heap) Pop() (interface{}, error) { - obj := heap.Pop(h.data) - if obj != nil { - return obj, nil - } - return nil, fmt.Errorf("object was removed from heap data") -} - -// Get returns the requested item, or sets exists=false. -func (h *Heap) Get(obj interface{}) (interface{}, bool, error) { - key, err := h.data.keyFunc(obj) - if err != nil { - return nil, false, cache.KeyError{Obj: obj, Err: err} - } - return h.GetByKey(key) -} - -// GetByKey returns the requested item, or sets exists=false. -func (h *Heap) GetByKey(key string) (interface{}, bool, error) { - item, exists := h.data.items[key] - if !exists { - return nil, false, nil - } - return item.obj, true, nil -} - -// List returns a list of all the items. -func (h *Heap) List() []interface{} { - list := make([]interface{}, 0, len(h.data.items)) - for _, item := range h.data.items { - list = append(list, item.obj) - } - return list -} - -// newHeap returns a Heap which can be used to queue up items to process. -func newHeap(keyFn KeyFunc, lessFn LessFunc) *Heap { - return &Heap{ - data: &heapData{ - items: map[string]*heapItem{}, - queue: []string{}, - keyFunc: keyFn, - lessFunc: lessFn, - }, - } -} diff --git a/pkg/scheduler/internal/queue/scheduling_queue_test.go b/pkg/scheduler/internal/queue/scheduling_queue_test.go index ca0e3369656..60c0d7b30d6 100644 --- a/pkg/scheduler/internal/queue/scheduling_queue_test.go +++ b/pkg/scheduler/internal/queue/scheduling_queue_test.go @@ -24,6 +24,8 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/scheduler/util" ) @@ -93,10 +95,16 @@ var highPriorityPod, highPriNominatedPod, medPriorityPod, unschedulablePod = v1. } func TestPriorityQueue_Add(t *testing.T) { - q := NewPriorityQueue() - q.Add(&medPriorityPod) - q.Add(&unschedulablePod) - q.Add(&highPriorityPod) + q := NewPriorityQueue(nil) + if err := q.Add(&medPriorityPod); err != nil { + t.Errorf("add failed: %v", err) + } + if err := q.Add(&unschedulablePod); err != nil { + t.Errorf("add failed: %v", err) + } + if err := q.Add(&highPriorityPod); err != nil { + t.Errorf("add failed: %v", err) + } expectedNominatedPods := map[string][]*v1.Pod{ "node1": {&medPriorityPod, &unschedulablePod}, } @@ -118,7 +126,7 @@ func TestPriorityQueue_Add(t *testing.T) { } func TestPriorityQueue_AddIfNotPresent(t *testing.T) { - q := NewPriorityQueue() + q := NewPriorityQueue(nil) q.unschedulableQ.addOrUpdate(&highPriNominatedPod) q.AddIfNotPresent(&highPriNominatedPod) // Must not add anything. q.AddIfNotPresent(&medPriorityPod) @@ -144,7 +152,7 @@ func TestPriorityQueue_AddIfNotPresent(t *testing.T) { } func TestPriorityQueue_AddUnschedulableIfNotPresent(t *testing.T) { - q := NewPriorityQueue() + q := NewPriorityQueue(nil) q.Add(&highPriNominatedPod) q.AddUnschedulableIfNotPresent(&highPriNominatedPod) // Must not add anything. q.AddUnschedulableIfNotPresent(&medPriorityPod) // This should go to activeQ. @@ -170,7 +178,7 @@ func TestPriorityQueue_AddUnschedulableIfNotPresent(t *testing.T) { } func TestPriorityQueue_Pop(t *testing.T) { - q := NewPriorityQueue() + q := NewPriorityQueue(nil) wg := sync.WaitGroup{} wg.Add(1) go func() { @@ -187,7 +195,7 @@ func TestPriorityQueue_Pop(t *testing.T) { } func TestPriorityQueue_Update(t *testing.T) { - q := NewPriorityQueue() + q := NewPriorityQueue(nil) q.Update(nil, &highPriorityPod) if _, exists, _ := q.activeQ.Get(&highPriorityPod); !exists { t.Errorf("Expected %v to be added to activeQ.", highPriorityPod.Name) @@ -197,7 +205,7 @@ func TestPriorityQueue_Update(t *testing.T) { } // Update highPriorityPod and add a nominatedNodeName to it. q.Update(&highPriorityPod, &highPriNominatedPod) - if q.activeQ.data.Len() != 1 { + if q.activeQ.Len() != 1 { t.Error("Expected only one item in activeQ.") } if len(q.nominatedPods) != 1 { @@ -223,10 +231,12 @@ func TestPriorityQueue_Update(t *testing.T) { } func TestPriorityQueue_Delete(t *testing.T) { - q := NewPriorityQueue() + q := NewPriorityQueue(nil) q.Update(&highPriorityPod, &highPriNominatedPod) q.Add(&unschedulablePod) - q.Delete(&highPriNominatedPod) + if err := q.Delete(&highPriNominatedPod); err != nil { + t.Errorf("delete failed: %v", err) + } if _, exists, _ := q.activeQ.Get(&unschedulablePod); !exists { t.Errorf("Expected %v to be in activeQ.", unschedulablePod.Name) } @@ -236,19 +246,21 @@ func TestPriorityQueue_Delete(t *testing.T) { if len(q.nominatedPods) != 1 { t.Errorf("Expected nomindatePods to have only 'unschedulablePod': %v", q.nominatedPods) } - q.Delete(&unschedulablePod) + if err := q.Delete(&unschedulablePod); err != nil { + t.Errorf("delete failed: %v", err) + } if len(q.nominatedPods) != 0 { t.Errorf("Expected nomindatePods to be empty: %v", q.nominatedPods) } } func TestPriorityQueue_MoveAllToActiveQueue(t *testing.T) { - q := NewPriorityQueue() + q := NewPriorityQueue(nil) q.Add(&medPriorityPod) q.unschedulableQ.addOrUpdate(&unschedulablePod) q.unschedulableQ.addOrUpdate(&highPriorityPod) q.MoveAllToActiveQueue() - if q.activeQ.data.Len() != 3 { + if q.activeQ.Len() != 3 { t.Error("Expected all items to be in activeQ.") } } @@ -289,7 +301,7 @@ func TestPriorityQueue_AssignedPodAdded(t *testing.T) { Spec: v1.PodSpec{NodeName: "machine1"}, } - q := NewPriorityQueue() + q := NewPriorityQueue(nil) q.Add(&medPriorityPod) // Add a couple of pods to the unschedulableQ. q.unschedulableQ.addOrUpdate(&unschedulablePod) @@ -310,7 +322,7 @@ func TestPriorityQueue_AssignedPodAdded(t *testing.T) { } func TestPriorityQueue_WaitingPodsForNode(t *testing.T) { - q := NewPriorityQueue() + q := NewPriorityQueue(nil) q.Add(&medPriorityPod) q.Add(&unschedulablePod) q.Add(&highPriorityPod) @@ -489,7 +501,7 @@ func TestSchedulingQueue_Close(t *testing.T) { }, { name: "PriorityQueue close", - q: NewPriorityQueue(), + q: NewPriorityQueue(nil), expectedErr: fmt.Errorf(queueClosed), }, } @@ -512,3 +524,119 @@ func TestSchedulingQueue_Close(t *testing.T) { }) } } + +// TestRecentlyTriedPodsGoBack tests that pods which are recently tried and are +// unschedulable go behind other pods with the same priority. This behavior +// ensures that an unschedulable pod does not block head of the queue when there +// are frequent events that move pods to the active queue. +func TestRecentlyTriedPodsGoBack(t *testing.T) { + q := NewPriorityQueue(nil) + // Add a few pods to priority queue. + for i := 0; i < 5; i++ { + p := v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("test-pod-%v", i), + Namespace: "ns1", + UID: types.UID(fmt.Sprintf("tp00%v", i)), + }, + Spec: v1.PodSpec{ + Priority: &highPriority, + }, + Status: v1.PodStatus{ + NominatedNodeName: "node1", + }, + } + q.Add(&p) + } + // Simulate a pod being popped by the scheduler, determined unschedulable, and + // then moved back to the active queue. + p1, err := q.Pop() + if err != nil { + t.Errorf("Error while popping the head of the queue: %v", err) + } + // Update pod condition to unschedulable. + podutil.UpdatePodCondition(&p1.Status, &v1.PodCondition{ + Type: v1.PodScheduled, + Status: v1.ConditionFalse, + Reason: v1.PodReasonUnschedulable, + Message: "fake scheduling failure", + }) + // Put in the unschedulable queue. + q.AddUnschedulableIfNotPresent(p1) + // Move all unschedulable pods to the active queue. + q.MoveAllToActiveQueue() + // Simulation is over. Now let's pop all pods. The pod popped first should be + // the last one we pop here. + for i := 0; i < 5; i++ { + p, err := q.Pop() + if err != nil { + t.Errorf("Error while popping pods from the queue: %v", err) + } + if (i == 4) != (p1 == p) { + t.Errorf("A pod tried before is not the last pod popped: i: %v, pod name: %v", i, p.Name) + } + } +} + +// TestHighPriorityBackoff tests that a high priority pod does not block +// other pods if it is unschedulable +func TestHighProirotyBackoff(t *testing.T) { + q := NewPriorityQueue(nil) + + midPod := v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-midpod", + Namespace: "ns1", + UID: types.UID("tp-mid"), + }, + Spec: v1.PodSpec{ + Priority: &midPriority, + }, + Status: v1.PodStatus{ + NominatedNodeName: "node1", + }, + } + highPod := v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-highpod", + Namespace: "ns1", + UID: types.UID("tp-high"), + }, + Spec: v1.PodSpec{ + Priority: &highPriority, + }, + Status: v1.PodStatus{ + NominatedNodeName: "node1", + }, + } + q.Add(&midPod) + q.Add(&highPod) + // Simulate a pod being popped by the scheduler, determined unschedulable, and + // then moved back to the active queue. + p, err := q.Pop() + if err != nil { + t.Errorf("Error while popping the head of the queue: %v", err) + } + if p != &highPod { + t.Errorf("Expected to get high prority pod, got: %v", p) + } + // Update pod condition to unschedulable. + podutil.UpdatePodCondition(&p.Status, &v1.PodCondition{ + Type: v1.PodScheduled, + Status: v1.ConditionFalse, + Reason: v1.PodReasonUnschedulable, + Message: "fake scheduling failure", + }) + // Put in the unschedulable queue. + q.AddUnschedulableIfNotPresent(p) + // Move all unschedulable pods to the active queue. + q.MoveAllToActiveQueue() + + p, err = q.Pop() + if err != nil { + t.Errorf("Error while popping the head of the queue: %v", err) + } + if p != &midPod { + t.Errorf("Expected to get mid prority pod, got: %v", p) + } +} diff --git a/pkg/scheduler/main_test.go b/pkg/scheduler/main_test.go deleted file mode 100644 index 7644879e952..00000000000 --- a/pkg/scheduler/main_test.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package scheduler - -import ( - "testing" - - utilfeature "k8s.io/apiserver/pkg/util/feature" - utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" - _ "k8s.io/kubernetes/pkg/features" -) - -func TestMain(m *testing.M) { - utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run) -} diff --git a/pkg/scheduler/cache/BUILD b/pkg/scheduler/nodeinfo/BUILD similarity index 96% rename from pkg/scheduler/cache/BUILD rename to pkg/scheduler/nodeinfo/BUILD index ba0ac4da9a7..c9631738f6f 100644 --- a/pkg/scheduler/cache/BUILD +++ b/pkg/scheduler/nodeinfo/BUILD @@ -7,7 +7,7 @@ go_library( "node_info.go", "util.go", ], - importpath = "k8s.io/kubernetes/pkg/scheduler/cache", + importpath = "k8s.io/kubernetes/pkg/scheduler/nodeinfo", visibility = ["//visibility:public"], deps = [ "//pkg/apis/core/v1/helper:go_default_library", diff --git a/pkg/scheduler/cache/host_ports.go b/pkg/scheduler/nodeinfo/host_ports.go similarity index 99% rename from pkg/scheduler/cache/host_ports.go rename to pkg/scheduler/nodeinfo/host_ports.go index e96c6be374a..8f1090ff706 100644 --- a/pkg/scheduler/cache/host_ports.go +++ b/pkg/scheduler/nodeinfo/host_ports.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cache +package nodeinfo import ( "k8s.io/api/core/v1" diff --git a/pkg/scheduler/cache/host_ports_test.go b/pkg/scheduler/nodeinfo/host_ports_test.go similarity index 99% rename from pkg/scheduler/cache/host_ports_test.go rename to pkg/scheduler/nodeinfo/host_ports_test.go index 390d4d1510f..53a1c4ebbf0 100644 --- a/pkg/scheduler/cache/host_ports_test.go +++ b/pkg/scheduler/nodeinfo/host_ports_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cache +package nodeinfo import ( "testing" diff --git a/pkg/scheduler/cache/node_info.go b/pkg/scheduler/nodeinfo/node_info.go similarity index 99% rename from pkg/scheduler/cache/node_info.go rename to pkg/scheduler/nodeinfo/node_info.go index 8b623c72ca3..6a6703a4fa6 100644 --- a/pkg/scheduler/cache/node_info.go +++ b/pkg/scheduler/nodeinfo/node_info.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cache +package nodeinfo import ( "errors" diff --git a/pkg/scheduler/cache/node_info_test.go b/pkg/scheduler/nodeinfo/node_info_test.go similarity index 99% rename from pkg/scheduler/cache/node_info_test.go rename to pkg/scheduler/nodeinfo/node_info_test.go index 38c498a3d79..24744e1ac2e 100644 --- a/pkg/scheduler/cache/node_info_test.go +++ b/pkg/scheduler/nodeinfo/node_info_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cache +package nodeinfo import ( "fmt" diff --git a/pkg/scheduler/cache/util.go b/pkg/scheduler/nodeinfo/util.go similarity index 99% rename from pkg/scheduler/cache/util.go rename to pkg/scheduler/nodeinfo/util.go index 5a252b6402e..bb1fd0ce612 100644 --- a/pkg/scheduler/cache/util.go +++ b/pkg/scheduler/nodeinfo/util.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cache +package nodeinfo import ( "k8s.io/api/core/v1" diff --git a/pkg/scheduler/cache/util_test.go b/pkg/scheduler/nodeinfo/util_test.go similarity index 99% rename from pkg/scheduler/cache/util_test.go rename to pkg/scheduler/nodeinfo/util_test.go index 7b0b5f111c8..0e108773e2b 100644 --- a/pkg/scheduler/cache/util_test.go +++ b/pkg/scheduler/nodeinfo/util_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cache +package nodeinfo import ( "reflect" diff --git a/pkg/scheduler/plugins/BUILD b/pkg/scheduler/plugins/BUILD new file mode 100644 index 00000000000..8de218f021c --- /dev/null +++ b/pkg/scheduler/plugins/BUILD @@ -0,0 +1,30 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["registrar.go"], + importpath = "k8s.io/kubernetes/pkg/scheduler/plugins", + visibility = ["//visibility:public"], + deps = [ + "//pkg/scheduler/internal/cache:go_default_library", + "//pkg/scheduler/plugins/v1alpha1:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/scheduler/plugins/examples:all-srcs", + "//pkg/scheduler/plugins/v1alpha1:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/pkg/scheduler/plugins/examples/BUILD b/pkg/scheduler/plugins/examples/BUILD new file mode 100644 index 00000000000..bc26941c8eb --- /dev/null +++ b/pkg/scheduler/plugins/examples/BUILD @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "multipoint.go", + "prebind.go", + "stateful.go", + ], + importpath = "k8s.io/kubernetes/pkg/scheduler/plugins/examples", + visibility = ["//visibility:public"], + deps = [ + "//pkg/scheduler/plugins/v1alpha1:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/pkg/scheduler/plugins/examples/multipoint.go b/pkg/scheduler/plugins/examples/multipoint.go new file mode 100644 index 00000000000..3b82f219802 --- /dev/null +++ b/pkg/scheduler/plugins/examples/multipoint.go @@ -0,0 +1,62 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package examples + +import ( + "fmt" + + "k8s.io/api/core/v1" + plugins "k8s.io/kubernetes/pkg/scheduler/plugins/v1alpha1" +) + +// MultipointCommunicatingPlugin is an example of a plugin that implements two +// extension points. It communicates through pluginContext with another function. +type MultipointCommunicatingPlugin struct{} + +var _ = plugins.ReservePlugin(MultipointCommunicatingPlugin{}) + +// Name returns name of the plugin. It is used in logs, etc. +func (mc MultipointCommunicatingPlugin) Name() string { + return "multipoint-communicating-plugin" +} + +// Reserve is the functions invoked by the framework at "reserve" extension point. +func (mc MultipointCommunicatingPlugin) Reserve(ps plugins.PluginSet, pod *v1.Pod, nodeName string) error { + if pod == nil { + return fmt.Errorf("pod cannot be nil") + } + if pod.Name == "my-test-pod" { + ps.Data().Ctx.SyncWrite(plugins.ContextKey(pod.Name), "never bind") + } + return nil +} + +// Prebind is the functions invoked by the framework at "prebind" extension point. +func (mc MultipointCommunicatingPlugin) Prebind(ps plugins.PluginSet, pod *v1.Pod, nodeName string) (bool, error) { + if pod == nil { + return false, fmt.Errorf("pod cannot be nil") + } + if v, e := ps.Data().Ctx.SyncRead(plugins.ContextKey(pod.Name)); e == nil && v == "never bind" { + return false, nil + } + return true, nil +} + +// NewMultipointCommunicatingPlugin initializes a new plugin and returns it. +func NewMultipointCommunicatingPlugin() *MultipointCommunicatingPlugin { + return &MultipointCommunicatingPlugin{} +} diff --git a/pkg/scheduler/plugins/examples/prebind.go b/pkg/scheduler/plugins/examples/prebind.go new file mode 100644 index 00000000000..13e71eb0bd3 --- /dev/null +++ b/pkg/scheduler/plugins/examples/prebind.go @@ -0,0 +1,48 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package examples + +import ( + "fmt" + + "k8s.io/api/core/v1" + plugins "k8s.io/kubernetes/pkg/scheduler/plugins/v1alpha1" +) + +// StatelessPrebindExample is an example of a simple plugin that has no state +// and implements only one hook for prebind. +type StatelessPrebindExample struct{} + +var _ = plugins.PrebindPlugin(StatelessPrebindExample{}) + +// Name returns name of the plugin. It is used in logs, etc. +func (sr StatelessPrebindExample) Name() string { + return "stateless-prebind-plugin-example" +} + +// Prebind is the functions invoked by the framework at "prebind" extension point. +func (sr StatelessPrebindExample) Prebind(ps plugins.PluginSet, pod *v1.Pod, nodeName string) (bool, error) { + if pod == nil { + return false, fmt.Errorf("pod cannot be nil") + } + return true, nil +} + +// NewStatelessPrebindExample initializes a new plugin and returns it. +func NewStatelessPrebindExample() *StatelessPrebindExample { + return &StatelessPrebindExample{} +} diff --git a/pkg/scheduler/plugins/examples/stateful.go b/pkg/scheduler/plugins/examples/stateful.go new file mode 100644 index 00000000000..2b8b210305e --- /dev/null +++ b/pkg/scheduler/plugins/examples/stateful.go @@ -0,0 +1,68 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package examples + +import ( + "fmt" + "k8s.io/klog" + + "k8s.io/api/core/v1" + plugins "k8s.io/kubernetes/pkg/scheduler/plugins/v1alpha1" +) + +// StatefulMultipointExample is an example plugin that is executed at multiple extension points. +// This plugin is stateful. It receives arguments at initialization (NewMultipointPlugin) +// and changes its state when it is executed. +type StatefulMultipointExample struct { + mpState map[int]string + numRuns int +} + +var _ = plugins.ReservePlugin(&StatefulMultipointExample{}) +var _ = plugins.PrebindPlugin(&StatefulMultipointExample{}) + +// Name returns name of the plugin. It is used in logs, etc. +func (mp *StatefulMultipointExample) Name() string { + return "multipoint-plugin-example" +} + +// Reserve is the functions invoked by the framework at "reserve" extension point. +func (mp *StatefulMultipointExample) Reserve(ps plugins.PluginSet, pod *v1.Pod, nodeName string) error { + mp.numRuns++ + return nil +} + +// Prebind is the functions invoked by the framework at "prebind" extension point. +func (mp *StatefulMultipointExample) Prebind(ps plugins.PluginSet, pod *v1.Pod, nodeName string) (bool, error) { + mp.numRuns++ + if pod == nil { + return false, fmt.Errorf("pod must not be nil") + } + return true, nil +} + +// NewStatefulMultipointExample initializes a new plugin and returns it. +func NewStatefulMultipointExample(initState ...interface{}) *StatefulMultipointExample { + if len(initState) == 0 { + klog.Error("StatefulMultipointExample needs exactly one argument for initialization") + return nil + } + mp := StatefulMultipointExample{ + mpState: initState[0].(map[int]string), + } + return &mp +} diff --git a/pkg/scheduler/plugins/registrar.go b/pkg/scheduler/plugins/registrar.go new file mode 100644 index 00000000000..4eab86ecc15 --- /dev/null +++ b/pkg/scheduler/plugins/registrar.go @@ -0,0 +1,77 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plugins + +import ( + "k8s.io/kubernetes/pkg/scheduler/internal/cache" + plugins "k8s.io/kubernetes/pkg/scheduler/plugins/v1alpha1" +) + +// DefaultPluginSet is the default plugin registrar used by the default scheduler. +type DefaultPluginSet struct { + data *plugins.PluginData + reservePlugins []plugins.ReservePlugin + prebindPlugins []plugins.PrebindPlugin +} + +var _ = plugins.PluginSet(&DefaultPluginSet{}) + +// ReservePlugins returns a slice of default reserve plugins. +func (r *DefaultPluginSet) ReservePlugins() []plugins.ReservePlugin { + return r.reservePlugins +} + +// PrebindPlugins returns a slice of default prebind plugins. +func (r *DefaultPluginSet) PrebindPlugins() []plugins.PrebindPlugin { + return r.prebindPlugins +} + +// Data returns a pointer to PluginData. +func (r *DefaultPluginSet) Data() *plugins.PluginData { + return r.data +} + +// NewDefaultPluginSet initializes default plugin set and returns its pointer. +func NewDefaultPluginSet(ctx *plugins.PluginContext, schedulerCache *cache.Cache) *DefaultPluginSet { + defaultRegistrar := DefaultPluginSet{ + data: &plugins.PluginData{ + Ctx: ctx, + SchedulerCache: schedulerCache, + }, + } + defaultRegistrar.registerReservePlugins() + defaultRegistrar.registerPrebindPlugins() + return &defaultRegistrar +} + +func (r DefaultPluginSet) registerReservePlugins() { + r.reservePlugins = []plugins.ReservePlugin{ + // Init functions of all reserve plugins go here. They are called in the + // same order that they are registered. + // Example: + // examples.NewStatefulMultipointExample(map[int]string{1: "test1", 2: "test2"}), + } +} + +func (r DefaultPluginSet) registerPrebindPlugins() { + r.prebindPlugins = []plugins.PrebindPlugin{ + // Init functions of all prebind plugins go here. They are called in the + // same order that they are registered. + // Example: + // examples.NewStatelessPrebindExample(), + } +} diff --git a/vendor/golang.org/x/exp/inotify/BUILD b/pkg/scheduler/plugins/v1alpha1/BUILD similarity index 59% rename from vendor/golang.org/x/exp/inotify/BUILD rename to pkg/scheduler/plugins/v1alpha1/BUILD index e9eae189ec5..619bd4685a9 100644 --- a/vendor/golang.org/x/exp/inotify/BUILD +++ b/pkg/scheduler/plugins/v1alpha1/BUILD @@ -2,10 +2,16 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", - srcs = ["inotify_linux.go"], - importmap = "k8s.io/kubernetes/vendor/golang.org/x/exp/inotify", - importpath = "golang.org/x/exp/inotify", + srcs = [ + "context.go", + "interface.go", + ], + importpath = "k8s.io/kubernetes/pkg/scheduler/plugins/v1alpha1", visibility = ["//visibility:public"], + deps = [ + "//pkg/scheduler/internal/cache:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + ], ) filegroup( diff --git a/pkg/scheduler/plugins/v1alpha1/context.go b/pkg/scheduler/plugins/v1alpha1/context.go new file mode 100644 index 00000000000..0631b5f0d8b --- /dev/null +++ b/pkg/scheduler/plugins/v1alpha1/context.go @@ -0,0 +1,94 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "errors" + "sync" +) + +const ( + // NotFound is the not found error message. + NotFound = "not found" +) + +// ContextData is a generic type for arbitrary data stored in PluginContext. +type ContextData interface{} + +// ContextKey is the type of keys stored in PluginContext. +type ContextKey string + +// PluginContext provides a mechanism for plugins to store and retrieve arbitrary data. +// ContextData stored by one plugin can be read, altered, or deleted by another plugin. +// PluginContext does not provide any data protection, as all plugins are assumed to be +// trusted. +type PluginContext struct { + Mx sync.RWMutex + storage map[ContextKey]ContextData +} + +// NewPluginContext initializes a new PluginContext and returns its pointer. +func NewPluginContext() *PluginContext { + return &PluginContext{ + storage: make(map[ContextKey]ContextData), + } +} + +// Read retrieves data with the given "key" from PluginContext. If the key is not +// present an error is returned. +func (c *PluginContext) Read(key ContextKey) (ContextData, error) { + if v, ok := c.storage[key]; ok { + return v, nil + } + return nil, errors.New(NotFound) +} + +// SyncRead is the thread safe version of Read(...). +func (c *PluginContext) SyncRead(key ContextKey) (ContextData, error) { + c.Mx.RLock() + defer c.Mx.RUnlock() + return c.Read(key) +} + +// Write stores the given "val" in PluginContext with the given "key". +func (c *PluginContext) Write(key ContextKey, val ContextData) { + c.storage[key] = val +} + +// SyncWrite is the thread safe version of Write(...). +func (c *PluginContext) SyncWrite(key ContextKey, val ContextData) { + c.Mx.Lock() + defer c.Mx.Unlock() + c.Write(key, val) +} + +// Delete deletes data with the given key from PluginContext. +func (c *PluginContext) Delete(key ContextKey) { + delete(c.storage, key) +} + +// SyncDelete is the thread safe version of Write(...). +func (c *PluginContext) SyncDelete(key ContextKey) { + c.Mx.Lock() + defer c.Mx.Unlock() + c.Delete(key) +} + +// Reset removes all the information in the PluginContext. +func (c *PluginContext) Reset() { + c.storage = make(map[ContextKey]ContextData) +} diff --git a/pkg/scheduler/plugins/v1alpha1/interface.go b/pkg/scheduler/plugins/v1alpha1/interface.go new file mode 100644 index 00000000000..0d0c90b43e6 --- /dev/null +++ b/pkg/scheduler/plugins/v1alpha1/interface.go @@ -0,0 +1,63 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file defines the scheduling framework plugin interfaces. + +package v1alpha1 + +import ( + "k8s.io/api/core/v1" + "k8s.io/kubernetes/pkg/scheduler/internal/cache" +) + +// PluginData carries information that plugins may need. +type PluginData struct { + Ctx *PluginContext + SchedulerCache *cache.Cache + // We may want to add the scheduling queue here too. +} + +// Plugin is the parent type for all the scheduling framework plugins. +type Plugin interface { + Name() string +} + +// ReservePlugin is an interface for Reserve plugins. These plugins are called +// at the reservation point, AKA "assume". These are meant to updated the state +// of the plugin. They do not return any value (other than error). +type ReservePlugin interface { + Plugin + // Reserve is called by the scheduling framework when the scheduler cache is + // updated. + Reserve(ps PluginSet, p *v1.Pod, nodeName string) error +} + +// PrebindPlugin is an interface that must be implemented by "prebind" plugins. +// These plugins are called before a pod being scheduled +type PrebindPlugin interface { + Plugin + // Prebind is called before binding a pod. All prebind plugins must return + // or the pod will not be sent for binding. + Prebind(ps PluginSet, p *v1.Pod, nodeName string) (bool, error) +} + +// PluginSet registers plugins used by the scheduling framework. +// The plugins registered are called at specified points in an scheduling cycle. +type PluginSet interface { + Data() *PluginData + ReservePlugins() []ReservePlugin + PrebindPlugins() []PrebindPlugin +} diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index 05aa16b6ab5..c2c60508b65 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -17,11 +17,14 @@ limitations under the License. package scheduler import ( + "errors" "fmt" "io/ioutil" "os" "time" + "k8s.io/klog" + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -44,13 +47,13 @@ import ( schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" "k8s.io/kubernetes/pkg/scheduler/metrics" "k8s.io/kubernetes/pkg/scheduler/util" - - "k8s.io/klog" ) const ( // BindTimeoutSeconds defines the default bind timeout BindTimeoutSeconds = 100 + // SchedulerError is the reason recorded for events when an error occurs during scheduling a pod. + SchedulerError = "SchedulerError" ) // Scheduler watches for new unscheduled pods. It attempts to find @@ -286,19 +289,26 @@ func (sched *Scheduler) Config() *factory.Config { return sched.config } +// recordFailedSchedulingEvent records an event for the pod that indicates the +// pod has failed to schedule. +// NOTE: This function modifies "pod". "pod" should be copied before being passed. +func (sched *Scheduler) recordSchedulingFailure(pod *v1.Pod, err error, reason string, message string) { + sched.config.Error(pod, err) + sched.config.Recorder.Event(pod, v1.EventTypeWarning, "FailedScheduling", message) + sched.config.PodConditionUpdater.Update(pod, &v1.PodCondition{ + Type: v1.PodScheduled, + Status: v1.ConditionFalse, + Reason: reason, + Message: err.Error(), + }) +} + // schedule implements the scheduling algorithm and returns the suggested host. func (sched *Scheduler) schedule(pod *v1.Pod) (string, error) { host, err := sched.config.Algorithm.Schedule(pod, sched.config.NodeLister) if err != nil { pod = pod.DeepCopy() - sched.config.Error(pod, err) - sched.config.Recorder.Eventf(pod, v1.EventTypeWarning, "FailedScheduling", "%v", err) - sched.config.PodConditionUpdater.Update(pod, &v1.PodCondition{ - Type: v1.PodScheduled, - Status: v1.ConditionFalse, - Reason: v1.PodReasonUnschedulable, - Message: err.Error(), - }) + sched.recordSchedulingFailure(pod, err, v1.PodReasonUnschedulable, err.Error()) return "", err } return host, err @@ -308,11 +318,6 @@ func (sched *Scheduler) schedule(pod *v1.Pod) (string, error) { // If it succeeds, it adds the name of the node where preemption has happened to the pod annotations. // It returns the node name and an error if any. func (sched *Scheduler) preempt(preemptor *v1.Pod, scheduleErr error) (string, error) { - if !util.PodPriorityEnabled() || sched.config.DisablePreemption { - klog.V(3).Infof("Pod priority feature is not enabled or preemption is disabled by scheduler configuration." + - " No preemption is performed.") - return "", nil - } preemptor, err := sched.config.PodPreemptor.GetUpdatedPod(preemptor) if err != nil { klog.Errorf("Error getting the updated preemptor pod object: %v", err) @@ -320,7 +325,6 @@ func (sched *Scheduler) preempt(preemptor *v1.Pod, scheduleErr error) (string, e } node, victims, nominatedPodsToClear, err := sched.config.Algorithm.Preempt(preemptor, sched.config.NodeLister, scheduleErr) - metrics.PreemptionVictims.Set(float64(len(victims))) if err != nil { klog.Errorf("Error preempting victims to make room for %v/%v.", preemptor.Namespace, preemptor.Name) return "", err @@ -340,6 +344,7 @@ func (sched *Scheduler) preempt(preemptor *v1.Pod, scheduleErr error) (string, e } sched.config.Recorder.Eventf(victim, v1.EventTypeNormal, "Preempted", "by %v/%v on node %v", preemptor.Namespace, preemptor.Name, nodeName) } + metrics.PreemptionVictims.Set(float64(len(victims))) } // Clearing nominated pods should happen outside of "if node != nil". Node could // be nil when a pod with nominated node name is eligible to preempt again, @@ -362,14 +367,8 @@ func (sched *Scheduler) assumeVolumes(assumed *v1.Pod, host string) (allBound bo if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) { allBound, err = sched.config.VolumeBinder.Binder.AssumePodVolumes(assumed, host) if err != nil { - sched.config.Error(assumed, err) - sched.config.Recorder.Eventf(assumed, v1.EventTypeWarning, "FailedScheduling", "AssumePodVolumes failed: %v", err) - sched.config.PodConditionUpdater.Update(assumed, &v1.PodCondition{ - Type: v1.PodScheduled, - Status: v1.ConditionFalse, - Reason: "SchedulerError", - Message: err.Error(), - }) + sched.recordSchedulingFailure(assumed, err, SchedulerError, + fmt.Sprintf("AssumePodVolumes failed: %v", err)) } // Invalidate ecache because assumed volumes could have affected the cached // pvs for other pods @@ -387,9 +386,6 @@ func (sched *Scheduler) assumeVolumes(assumed *v1.Pod, host string) (allBound bo // If binding errors, times out or gets undone, then an error will be returned to // retry scheduling. func (sched *Scheduler) bindVolumes(assumed *v1.Pod) error { - var reason string - var eventType string - klog.V(5).Infof("Trying to bind volumes for pod \"%v/%v\"", assumed.Namespace, assumed.Name) err := sched.config.VolumeBinder.Binder.BindPodVolumes(assumed) if err != nil { @@ -400,15 +396,11 @@ func (sched *Scheduler) bindVolumes(assumed *v1.Pod) error { klog.Errorf("scheduler cache ForgetPod failed: %v", forgetErr) } - reason = "VolumeBindingFailed" - eventType = v1.EventTypeWarning - sched.config.Error(assumed, err) - sched.config.Recorder.Eventf(assumed, eventType, "FailedScheduling", "%v", err) - sched.config.PodConditionUpdater.Update(assumed, &v1.PodCondition{ - Type: v1.PodScheduled, - Status: v1.ConditionFalse, - Reason: reason, - }) + // Volumes may be bound by PV controller asynchronously, we must clear + // stale pod binding cache. + sched.config.VolumeBinder.DeletePodBindings(assumed) + + sched.recordSchedulingFailure(assumed, err, "VolumeBindingFailed", err.Error()) return err } @@ -437,14 +429,8 @@ func (sched *Scheduler) assume(assumed *v1.Pod, host string) error { // This relies on the fact that Error will check if the pod has been bound // to a node and if so will not add it back to the unscheduled pods queue // (otherwise this would cause an infinite loop). - sched.config.Error(assumed, err) - sched.config.Recorder.Eventf(assumed, v1.EventTypeWarning, "FailedScheduling", "AssumePod failed: %v", err) - sched.config.PodConditionUpdater.Update(assumed, &v1.PodCondition{ - Type: v1.PodScheduled, - Status: v1.ConditionFalse, - Reason: "SchedulerError", - Message: err.Error(), - }) + sched.recordSchedulingFailure(assumed, err, SchedulerError, + fmt.Sprintf("AssumePod failed: %v", err)) return err } // if "assumed" is a nominated pod, we should remove it from internal cache @@ -476,13 +462,8 @@ func (sched *Scheduler) bind(assumed *v1.Pod, b *v1.Binding) error { if err := sched.config.SchedulerCache.ForgetPod(assumed); err != nil { klog.Errorf("scheduler cache ForgetPod failed: %v", err) } - sched.config.Error(assumed, err) - sched.config.Recorder.Eventf(assumed, v1.EventTypeWarning, "FailedScheduling", "Binding rejected: %v", err) - sched.config.PodConditionUpdater.Update(assumed, &v1.PodCondition{ - Type: v1.PodScheduled, - Status: v1.ConditionFalse, - Reason: "BindingRejected", - }) + sched.recordSchedulingFailure(assumed, err, SchedulerError, + fmt.Sprintf("Binding rejected: %v", err)) return err } @@ -494,6 +475,12 @@ func (sched *Scheduler) bind(assumed *v1.Pod, b *v1.Binding) error { // scheduleOne does the entire scheduling workflow for a single pod. It is serialized on the scheduling algorithm's host fitting. func (sched *Scheduler) scheduleOne() { + plugins := sched.config.PluginSet + // Remove all plugin context data at the beginning of a scheduling cycle. + if plugins.Data().Ctx != nil { + plugins.Data().Ctx.Reset() + } + pod := sched.config.NextPod() // pod could be nil when schedulerQueue is closed if pod == nil { @@ -516,11 +503,16 @@ func (sched *Scheduler) scheduleOne() { // will fit due to the preemption. It is also possible that a different pod will schedule // into the resources that were preempted, but this is harmless. if fitError, ok := err.(*core.FitError); ok { - preemptionStartTime := time.Now() - sched.preempt(pod, fitError) - metrics.PreemptionAttempts.Inc() - metrics.SchedulingAlgorithmPremptionEvaluationDuration.Observe(metrics.SinceInMicroseconds(preemptionStartTime)) - metrics.SchedulingLatency.WithLabelValues(metrics.PreemptionEvaluation).Observe(metrics.SinceInSeconds(preemptionStartTime)) + if !util.PodPriorityEnabled() || sched.config.DisablePreemption { + klog.V(3).Infof("Pod priority feature is not enabled or preemption is disabled by scheduler configuration." + + " No preemption is performed.") + } else { + preemptionStartTime := time.Now() + sched.preempt(pod, fitError) + metrics.PreemptionAttempts.Inc() + metrics.SchedulingAlgorithmPremptionEvaluationDuration.Observe(metrics.SinceInMicroseconds(preemptionStartTime)) + metrics.SchedulingLatency.WithLabelValues(metrics.PreemptionEvaluation).Observe(metrics.SinceInSeconds(preemptionStartTime)) + } // Pod did not fit anywhere, so it is counted as a failure. If preemption // succeeds, the pod should get counted as a success the next time we try to // schedule it. (hopefully) @@ -550,6 +542,16 @@ func (sched *Scheduler) scheduleOne() { return } + // Run "reserve" plugins. + for _, pl := range plugins.ReservePlugins() { + if err := pl.Reserve(plugins, assumedPod, suggestedHost); err != nil { + klog.Errorf("error while running %v reserve plugin for pod %v: %v", pl.Name(), assumedPod.Name, err) + sched.recordSchedulingFailure(assumedPod, err, SchedulerError, + fmt.Sprintf("reserve plugin %v failed", pl.Name())) + metrics.PodScheduleErrors.Inc() + return + } + } // assume modifies `assumedPod` by setting NodeName=suggestedHost err = sched.assume(assumedPod, suggestedHost) if err != nil { @@ -569,6 +571,30 @@ func (sched *Scheduler) scheduleOne() { } } + // Run "prebind" plugins. + for _, pl := range plugins.PrebindPlugins() { + approved, err := pl.Prebind(plugins, assumedPod, suggestedHost) + if err != nil { + approved = false + klog.Errorf("error while running %v prebind plugin for pod %v: %v", pl.Name(), assumedPod.Name, err) + metrics.PodScheduleErrors.Inc() + } + if !approved { + sched.Cache().ForgetPod(assumedPod) + var reason string + if err == nil { + msg := fmt.Sprintf("prebind plugin %v rejected pod %v.", pl.Name(), assumedPod.Name) + klog.V(4).Infof(msg) + err = errors.New(msg) + reason = v1.PodReasonUnschedulable + } else { + reason = SchedulerError + } + sched.recordSchedulingFailure(assumedPod, err, reason, err.Error()) + return + } + } + err := sched.bind(assumedPod, &v1.Binding{ ObjectMeta: metav1.ObjectMeta{Namespace: assumedPod.Namespace, Name: assumedPod.Name, UID: assumedPod.UID}, Target: v1.ObjectReference{ diff --git a/pkg/scheduler/scheduler_test.go b/pkg/scheduler/scheduler_test.go index 4dac1a7bd2d..51bae5d7ed0 100644 --- a/pkg/scheduler/scheduler_test.go +++ b/pkg/scheduler/scheduler_test.go @@ -46,11 +46,11 @@ import ( "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" "k8s.io/kubernetes/pkg/scheduler/api" kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" "k8s.io/kubernetes/pkg/scheduler/core" "k8s.io/kubernetes/pkg/scheduler/factory" schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" fakecache "k8s.io/kubernetes/pkg/scheduler/internal/cache/fake" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" "k8s.io/kubernetes/pkg/scheduler/volumebinder" ) @@ -136,11 +136,11 @@ func podWithResources(id, desiredHost string, limits v1.ResourceList, requests v return pod } -func PredicateOne(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func PredicateOne(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { return true, nil, nil } -func PriorityOne(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (api.HostPriorityList, error) { +func PriorityOne(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (api.HostPriorityList, error) { return []api.HostPriority{}, nil } @@ -211,7 +211,7 @@ func TestScheduler(t *testing.T) { name string injectBindError error sendPod *v1.Pod - algo algorithm.ScheduleAlgorithm + algo core.ScheduleAlgorithm expectErrorPod *v1.Pod expectForgetPod *v1.Pod expectAssumedPod *v1.Pod @@ -295,6 +295,7 @@ func TestScheduler(t *testing.T) { NextPod: func() *v1.Pod { return item.sendPod }, + PluginSet: &EmptyPluginSet{}, Recorder: eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: "scheduler"}), VolumeBinder: volumebinder.NewFakeVolumeBinder(&persistentvolume.FakeVolumeBinderConfig{AllBound: true}), }, @@ -424,8 +425,8 @@ func TestSchedulerNoPhantomPodAfterDelete(t *testing.T) { } // We mimic the workflow of cache behavior when a pod is removed by user. - // Note: if the schedulercache timeout would be super short, the first pod would expire - // and would be removed itself (without any explicit actions on schedulercache). Even in that case, + // Note: if the schedulernodeinfo timeout would be super short, the first pod would expire + // and would be removed itself (without any explicit actions on schedulernodeinfo). Even in that case, // explicitly AddPod will as well correct the behavior. firstPod.Spec.NodeName = node.Name if err := scache.AddPod(firstPod); err != nil { @@ -643,6 +644,7 @@ func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache schedulerintern algorithm.EmptyPredicateMetadataProducer, []algorithm.PriorityConfig{}, algorithm.EmptyPriorityMetadataProducer, + &EmptyPluginSet{}, []algorithm.SchedulerExtender{}, nil, informerFactory.Core().V1().PersistentVolumeClaims().Lister(), @@ -672,6 +674,7 @@ func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache schedulerintern Recorder: &record.FakeRecorder{}, PodConditionUpdater: fakePodConditionUpdater{}, PodPreemptor: fakePodPreemptor{}, + PluginSet: &EmptyPluginSet{}, VolumeBinder: volumebinder.NewFakeVolumeBinder(&persistentvolume.FakeVolumeBinderConfig{AllBound: true}), }, } @@ -694,6 +697,7 @@ func setupTestSchedulerLongBindingWithRetry(queuedPodStore *clientcache.FIFO, sc algorithm.EmptyPredicateMetadataProducer, []algorithm.PriorityConfig{}, algorithm.EmptyPriorityMetadataProducer, + &EmptyPluginSet{}, []algorithm.SchedulerExtender{}, nil, informerFactory.Core().V1().PersistentVolumeClaims().Lister(), @@ -727,6 +731,7 @@ func setupTestSchedulerLongBindingWithRetry(queuedPodStore *clientcache.FIFO, sc PodConditionUpdater: fakePodConditionUpdater{}, PodPreemptor: fakePodPreemptor{}, StopEverything: stop, + PluginSet: &EmptyPluginSet{}, VolumeBinder: volumebinder.NewFakeVolumeBinder(&persistentvolume.FakeVolumeBinderConfig{AllBound: true}), }, } diff --git a/pkg/scheduler/testing/BUILD b/pkg/scheduler/testing/BUILD index 91acca08c54..b64d47c514c 100644 --- a/pkg/scheduler/testing/BUILD +++ b/pkg/scheduler/testing/BUILD @@ -1,22 +1,12 @@ package(default_visibility = ["//visibility:public"]) -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", - srcs = [ - "fake_lister.go", - "util.go", - ], + srcs = ["fake_lister.go"], importpath = "k8s.io/kubernetes/pkg/scheduler/testing", deps = [ - "//pkg/api/legacyscheme:go_default_library", - "//pkg/apis/core:go_default_library", - "//pkg/apis/core/install:go_default_library", "//pkg/scheduler/algorithm:go_default_library", "//pkg/scheduler/internal/cache:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library", @@ -24,8 +14,6 @@ go_library( "//staging/src/k8s.io/api/policy/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", ], ) @@ -42,13 +30,3 @@ filegroup( srcs = [":package-srcs"], tags = ["automanaged"], ) - -go_test( - name = "go_default_test", - srcs = ["util_test.go"], - embed = [":go_default_library"], - deps = [ - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", - ], -) diff --git a/pkg/scheduler/testing/util.go b/pkg/scheduler/testing/util.go deleted file mode 100644 index 25921a2e058..00000000000 --- a/pkg/scheduler/testing/util.go +++ /dev/null @@ -1,173 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package testing - -import ( - "fmt" - "mime" - "os" - "reflect" - "strings" - - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/kubernetes/pkg/api/legacyscheme" - api "k8s.io/kubernetes/pkg/apis/core" - - // Init the core api installation - _ "k8s.io/kubernetes/pkg/apis/core/install" -) - -// TestGroup defines a api group for testing. -type TestGroup struct { - externalGroupVersion schema.GroupVersion - internalGroupVersion schema.GroupVersion - internalTypes map[string]reflect.Type - externalTypes map[string]reflect.Type -} - -var ( - // Groups defines a TestGroup map. - Groups = make(map[string]TestGroup) - // Test defines a TestGroup object. - Test TestGroup - - serializer runtime.SerializerInfo -) - -func init() { - if apiMediaType := os.Getenv("KUBE_TEST_API_TYPE"); len(apiMediaType) > 0 { - var ok bool - mediaType, _, err := mime.ParseMediaType(apiMediaType) - if err != nil { - panic(err) - } - serializer, ok = runtime.SerializerInfoForMediaType(legacyscheme.Codecs.SupportedMediaTypes(), mediaType) - if !ok { - panic(fmt.Sprintf("no serializer for %s", apiMediaType)) - } - } - - kubeTestAPI := os.Getenv("KUBE_TEST_API") - if len(kubeTestAPI) != 0 { - // priority is "first in list preferred", so this has to run in reverse order - testGroupVersions := strings.Split(kubeTestAPI, ",") - for i := len(testGroupVersions) - 1; i >= 0; i-- { - gvString := testGroupVersions[i] - groupVersion, err := schema.ParseGroupVersion(gvString) - if err != nil { - panic(fmt.Sprintf("Error parsing groupversion %v: %v", gvString, err)) - } - - internalGroupVersion := schema.GroupVersion{Group: groupVersion.Group, Version: runtime.APIVersionInternal} - Groups[groupVersion.Group] = TestGroup{ - externalGroupVersion: groupVersion, - internalGroupVersion: internalGroupVersion, - internalTypes: legacyscheme.Scheme.KnownTypes(internalGroupVersion), - externalTypes: legacyscheme.Scheme.KnownTypes(groupVersion), - } - } - } - - if _, ok := Groups[api.GroupName]; !ok { - externalGroupVersion := schema.GroupVersion{Group: api.GroupName, Version: "v1"} - Groups[api.GroupName] = TestGroup{ - externalGroupVersion: externalGroupVersion, - internalGroupVersion: api.SchemeGroupVersion, - internalTypes: legacyscheme.Scheme.KnownTypes(api.SchemeGroupVersion), - externalTypes: legacyscheme.Scheme.KnownTypes(externalGroupVersion), - } - } - - Test = Groups[api.GroupName] -} - -// Codec returns the codec for the API version to test against, as set by the -// KUBE_TEST_API_TYPE env var. -func (g TestGroup) Codec() runtime.Codec { - if serializer.Serializer == nil { - return legacyscheme.Codecs.LegacyCodec(g.externalGroupVersion) - } - return legacyscheme.Codecs.CodecForVersions(serializer.Serializer, legacyscheme.Codecs.UniversalDeserializer(), schema.GroupVersions{g.externalGroupVersion}, nil) -} - -// SelfLink returns a self link that will appear to be for the version Version(). -// 'resource' should be the resource path, e.g. "pods" for the Pod type. 'name' should be -// empty for lists. -func (g TestGroup) SelfLink(resource, name string) string { - if g.externalGroupVersion.Group == api.GroupName { - if name == "" { - return fmt.Sprintf("/api/%s/%s", g.externalGroupVersion.Version, resource) - } - return fmt.Sprintf("/api/%s/%s/%s", g.externalGroupVersion.Version, resource, name) - } - - // TODO: will need a /apis prefix once we have proper multi-group - // support - if name == "" { - return fmt.Sprintf("/apis/%s/%s/%s", g.externalGroupVersion.Group, g.externalGroupVersion.Version, resource) - } - return fmt.Sprintf("/apis/%s/%s/%s/%s", g.externalGroupVersion.Group, g.externalGroupVersion.Version, resource, name) -} - -// ResourcePathWithPrefix returns the appropriate path for the given prefix (watch, proxy, redirect, etc), resource, namespace and name. -// For ex, this is of the form: -// /api/v1/watch/namespaces/foo/pods/pod0 for v1. -func (g TestGroup) ResourcePathWithPrefix(prefix, resource, namespace, name string) string { - var path string - if g.externalGroupVersion.Group == api.GroupName { - path = "/api/" + g.externalGroupVersion.Version - } else { - // TODO: switch back once we have proper multiple group support - // path = "/apis/" + g.Group + "/" + Version(group...) - path = "/apis/" + g.externalGroupVersion.Group + "/" + g.externalGroupVersion.Version - } - - if prefix != "" { - path = path + "/" + prefix - } - if namespace != "" { - path = path + "/namespaces/" + namespace - } - // Resource names are lower case. - resource = strings.ToLower(resource) - if resource != "" { - path = path + "/" + resource - } - if name != "" { - path = path + "/" + name - } - return path -} - -// ResourcePath returns the appropriate path for the given resource, namespace and name. -// For example, this is of the form: -// /api/v1/namespaces/foo/pods/pod0 for v1. -func (g TestGroup) ResourcePath(resource, namespace, name string) string { - return g.ResourcePathWithPrefix("", resource, namespace, name) -} - -// SubResourcePath returns the appropriate path for the given resource, namespace, -// name and subresource. -func (g TestGroup) SubResourcePath(resource, namespace, name, sub string) string { - path := g.ResourcePathWithPrefix("", resource, namespace, name) - if sub != "" { - path = path + "/" + sub - } - - return path -} diff --git a/pkg/scheduler/testing/util_test.go b/pkg/scheduler/testing/util_test.go deleted file mode 100644 index e6545b00559..00000000000 --- a/pkg/scheduler/testing/util_test.go +++ /dev/null @@ -1,214 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package testing - -import ( - "encoding/json" - "reflect" - "testing" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" -) - -func TestResourcePathWithPrefix(t *testing.T) { - testCases := []struct { - prefix string - resource string - namespace string - name string - expected string - }{ - {"prefix", "resource", "mynamespace", "myresource", "/api/" + Test.externalGroupVersion.Version + "/prefix/namespaces/mynamespace/resource/myresource"}, - {"prefix", "resource", "", "myresource", "/api/" + Test.externalGroupVersion.Version + "/prefix/resource/myresource"}, - {"prefix", "resource", "mynamespace", "", "/api/" + Test.externalGroupVersion.Version + "/prefix/namespaces/mynamespace/resource"}, - {"prefix", "resource", "", "", "/api/" + Test.externalGroupVersion.Version + "/prefix/resource"}, - {"", "resource", "mynamespace", "myresource", "/api/" + Test.externalGroupVersion.Version + "/namespaces/mynamespace/resource/myresource"}, - } - for _, item := range testCases { - if actual := Test.ResourcePathWithPrefix(item.prefix, item.resource, item.namespace, item.name); actual != item.expected { - t.Errorf("Expected: %s, got: %s for prefix: %s, resource: %s, namespace: %s and name: %s", item.expected, actual, item.prefix, item.resource, item.namespace, item.name) - } - } - - TestGroup := Test - TestGroup.externalGroupVersion.Group = "TestGroup" - - testGroupCases := []struct { - prefix string - resource string - namespace string - name string - expected string - }{ - {"prefix", "resource", "mynamespace", "myresource", "/apis/" + TestGroup.externalGroupVersion.Group + "/" + TestGroup.externalGroupVersion.Version + "/prefix/namespaces/mynamespace/resource/myresource"}, - {"prefix", "resource", "", "myresource", "/apis/" + TestGroup.externalGroupVersion.Group + "/" + TestGroup.externalGroupVersion.Version + "/prefix/resource/myresource"}, - {"prefix", "resource", "mynamespace", "", "/apis/" + TestGroup.externalGroupVersion.Group + "/" + TestGroup.externalGroupVersion.Version + "/prefix/namespaces/mynamespace/resource"}, - {"prefix", "resource", "", "", "/apis/" + TestGroup.externalGroupVersion.Group + "/" + TestGroup.externalGroupVersion.Version + "/prefix/resource"}, - {"", "resource", "mynamespace", "myresource", "/apis/" + TestGroup.externalGroupVersion.Group + "/" + TestGroup.externalGroupVersion.Version + "/namespaces/mynamespace/resource/myresource"}, - } - for _, item := range testGroupCases { - if actual := TestGroup.ResourcePathWithPrefix(item.prefix, item.resource, item.namespace, item.name); actual != item.expected { - t.Errorf("Expected: %s, got: %s for prefix: %s, resource: %s, namespace: %s and name: %s", item.expected, actual, item.prefix, item.resource, item.namespace, item.name) - } - } - -} - -func TestResourcePath(t *testing.T) { - testCases := []struct { - resource string - namespace string - name string - expected string - }{ - {"resource", "mynamespace", "myresource", "/api/" + Test.externalGroupVersion.Version + "/namespaces/mynamespace/resource/myresource"}, - {"resource", "", "myresource", "/api/" + Test.externalGroupVersion.Version + "/resource/myresource"}, - {"resource", "mynamespace", "", "/api/" + Test.externalGroupVersion.Version + "/namespaces/mynamespace/resource"}, - {"resource", "", "", "/api/" + Test.externalGroupVersion.Version + "/resource"}, - } - for _, item := range testCases { - if actual := Test.ResourcePath(item.resource, item.namespace, item.name); actual != item.expected { - t.Errorf("Expected: %s, got: %s for resource: %s, namespace: %s and name: %s", item.expected, actual, item.resource, item.namespace, item.name) - } - } - - TestGroup := Test - TestGroup.externalGroupVersion.Group = "TestGroup" - - testGroupCases := []struct { - resource string - namespace string - name string - expected string - }{ - {"resource", "mynamespace", "myresource", "/apis/" + TestGroup.externalGroupVersion.Group + "/" + TestGroup.externalGroupVersion.Version + "/namespaces/mynamespace/resource/myresource"}, - {"resource", "", "myresource", "/apis/" + TestGroup.externalGroupVersion.Group + "/" + TestGroup.externalGroupVersion.Version + "/resource/myresource"}, - {"resource", "mynamespace", "", "/apis/" + TestGroup.externalGroupVersion.Group + "/" + TestGroup.externalGroupVersion.Version + "/namespaces/mynamespace/resource"}, - {"resource", "", "", "/apis/" + TestGroup.externalGroupVersion.Group + "/" + TestGroup.externalGroupVersion.Version + "/resource"}, - } - for _, item := range testGroupCases { - if actual := TestGroup.ResourcePath(item.resource, item.namespace, item.name); actual != item.expected { - t.Errorf("Expected: %s, got: %s for resource: %s, namespace: %s and name: %s", item.expected, actual, item.resource, item.namespace, item.name) - } - } - -} - -func TestSubResourcePath(t *testing.T) { - testCases := []struct { - resource string - namespace string - name string - sub string - expected string - }{ - {"resource", "mynamespace", "myresource", "subresource", "/api/" + Test.externalGroupVersion.Version + "/namespaces/mynamespace/resource/myresource/subresource"}, - {"resource", "mynamespace", "myresource", "", "/api/" + Test.externalGroupVersion.Version + "/namespaces/mynamespace/resource/myresource"}, - } - for _, item := range testCases { - if actual := Test.SubResourcePath(item.resource, item.namespace, item.name, item.sub); actual != item.expected { - t.Errorf("Expected: %s, got: %s for resource: %s, namespace: %s, name: %s and sub: %s", item.expected, actual, item.resource, item.namespace, item.name, item.sub) - } - } - - TestGroup := Test - TestGroup.externalGroupVersion.Group = "TestGroup" - - testGroupCases := []struct { - resource string - namespace string - name string - sub string - expected string - }{ - {"resource", "mynamespace", "myresource", "subresource", "/apis/" + TestGroup.externalGroupVersion.Group + "/" + TestGroup.externalGroupVersion.Version + "/namespaces/mynamespace/resource/myresource/subresource"}, - {"resource", "mynamespace", "myresource", "", "/apis/" + TestGroup.externalGroupVersion.Group + "/" + TestGroup.externalGroupVersion.Version + "/namespaces/mynamespace/resource/myresource"}, - } - for _, item := range testGroupCases { - if actual := TestGroup.SubResourcePath(item.resource, item.namespace, item.name, item.sub); actual != item.expected { - t.Errorf("Expected: %s, got: %s for resource: %s, namespace: %s, name: %s and sub: %s", item.expected, actual, item.resource, item.namespace, item.name, item.sub) - } - } - -} - -func TestSelfLink(t *testing.T) { - testCases := []struct { - resource string - name string - expected string - }{ - {"resource", "name", "/api/" + Test.externalGroupVersion.Version + "/resource/name"}, - {"resource", "", "/api/" + Test.externalGroupVersion.Version + "/resource"}, - } - for _, item := range testCases { - if actual := Test.SelfLink(item.resource, item.name); actual != item.expected { - t.Errorf("Expected: %s, got: %s for resource: %s and name: %s", item.expected, actual, item.resource, item.name) - } - } - - TestGroup := Test - TestGroup.externalGroupVersion.Group = "TestGroup" - - testGroupCases := []struct { - resource string - name string - expected string - }{ - {"resource", "name", "/apis/" + TestGroup.externalGroupVersion.Group + "/" + TestGroup.externalGroupVersion.Version + "/resource/name"}, - {"resource", "", "/apis/" + TestGroup.externalGroupVersion.Group + "/" + TestGroup.externalGroupVersion.Version + "/resource"}, - } - for _, item := range testGroupCases { - if actual := TestGroup.SelfLink(item.resource, item.name); actual != item.expected { - t.Errorf("Expected: %s, got: %s for resource: %s and name: %s", item.expected, actual, item.resource, item.name) - } - } -} - -var status = &metav1.Status{ - Status: metav1.StatusFailure, - Code: 200, - Reason: metav1.StatusReasonUnknown, - Message: "", -} - -func TestV1EncodeDecodeStatus(t *testing.T) { - v1Codec := Test.Codec() - - encoded, err := runtime.Encode(v1Codec, status) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - typeMeta := metav1.TypeMeta{} - if err := json.Unmarshal(encoded, &typeMeta); err != nil { - t.Errorf("unexpected error: %v", err) - } - if typeMeta.Kind != "Status" { - t.Errorf("Kind is not set to \"Status\". Got %v", string(encoded)) - } - if typeMeta.APIVersion != "v1" { - t.Errorf("APIVersion is not set to \"v1\". Got %v", string(encoded)) - } - decoded, err := runtime.Decode(v1Codec, encoded) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - if !reflect.DeepEqual(status, decoded) { - t.Errorf("expected: %#v, got: %#v", status, decoded) - } -} diff --git a/pkg/scheduler/testutil.go b/pkg/scheduler/testutil.go index 0c101a54573..b495fdefa16 100644 --- a/pkg/scheduler/testutil.go +++ b/pkg/scheduler/testutil.go @@ -27,6 +27,7 @@ import ( schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" "k8s.io/kubernetes/pkg/scheduler/factory" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" + plugins "k8s.io/kubernetes/pkg/scheduler/plugins/v1alpha1" "k8s.io/kubernetes/pkg/scheduler/util" ) @@ -89,3 +90,26 @@ func (fc *FakeConfigurator) CreateFromConfig(policy schedulerapi.Policy) (*facto func (fc *FakeConfigurator) CreateFromKeys(predicateKeys, priorityKeys sets.String, extenders []algorithm.SchedulerExtender) (*factory.Config, error) { return fc.Config, nil } + +// EmptyPluginSet is the default plugin restirar used by the default scheduler. +type EmptyPluginSet struct{} + +var _ = plugins.PluginSet(EmptyPluginSet{}) + +// ReservePlugins returns a slice of default reserve plugins. +func (r EmptyPluginSet) ReservePlugins() []plugins.ReservePlugin { + return []plugins.ReservePlugin{} +} + +// PrebindPlugins returns a slice of default prebind plugins. +func (r EmptyPluginSet) PrebindPlugins() []plugins.PrebindPlugin { + return []plugins.PrebindPlugin{} +} + +// Data returns a pointer to PluginData. +func (r EmptyPluginSet) Data() *plugins.PluginData { + return &plugins.PluginData{ + Ctx: nil, + SchedulerCache: nil, + } +} diff --git a/pkg/scheduler/util/BUILD b/pkg/scheduler/util/BUILD index 810d2c5cb0a..53649692f2a 100644 --- a/pkg/scheduler/util/BUILD +++ b/pkg/scheduler/util/BUILD @@ -10,6 +10,7 @@ go_test( name = "go_default_test", srcs = [ "backoff_utils_test.go", + "heap_test.go", "utils_test.go", ], embed = [":go_default_library"], @@ -25,6 +26,8 @@ go_library( name = "go_default_library", srcs = [ "backoff_utils.go", + "clock.go", + "heap.go", "utils.go", ], importpath = "k8s.io/kubernetes/pkg/scheduler/util", @@ -34,6 +37,7 @@ go_library( "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/scheduler/util/backoff_utils.go b/pkg/scheduler/util/backoff_utils.go index 506cd1270ac..618f93772f9 100644 --- a/pkg/scheduler/util/backoff_utils.go +++ b/pkg/scheduler/util/backoff_utils.go @@ -37,10 +37,11 @@ func (realClock) Now() time.Time { return time.Now() } -// BackoffEntry is single threaded. in particular, it only allows a single action to be waiting on backoff at a time. -// It is expected that all users will only use the public TryWait(...) method +// backoffEntry is single threaded. in particular, it only allows a single action to be waiting on backoff at a time. // It is also not safe to copy this object. -type BackoffEntry struct { +type backoffEntry struct { + initialized bool + podName ktypes.NamespacedName backoff time.Duration lastUpdate time.Time reqInFlight int32 @@ -48,45 +49,41 @@ type BackoffEntry struct { // tryLock attempts to acquire a lock via atomic compare and swap. // returns true if the lock was acquired, false otherwise -func (b *BackoffEntry) tryLock() bool { +func (b *backoffEntry) tryLock() bool { return atomic.CompareAndSwapInt32(&b.reqInFlight, 0, 1) } // unlock returns the lock. panics if the lock isn't held -func (b *BackoffEntry) unlock() { +func (b *backoffEntry) unlock() { if !atomic.CompareAndSwapInt32(&b.reqInFlight, 1, 0) { panic(fmt.Sprintf("unexpected state on unlocking: %+v", b)) } } -// TryWait tries to acquire the backoff lock, maxDuration is the maximum allowed period to wait for. -func (b *BackoffEntry) TryWait(maxDuration time.Duration) bool { - if !b.tryLock() { - return false - } - defer b.unlock() - b.wait(maxDuration) - return true +// backoffTime returns the Time when a backoffEntry completes backoff +func (b *backoffEntry) backoffTime() time.Time { + return b.lastUpdate.Add(b.backoff) } -func (b *BackoffEntry) getBackoff(maxDuration time.Duration) time.Duration { - duration := b.backoff - newDuration := time.Duration(duration) * 2 +// getBackoff returns the duration until this entry completes backoff +func (b *backoffEntry) getBackoff(maxDuration time.Duration) time.Duration { + if !b.initialized { + b.initialized = true + return b.backoff + } + newDuration := b.backoff * 2 if newDuration > maxDuration { newDuration = maxDuration } b.backoff = newDuration - klog.V(4).Infof("Backing off %s", duration.String()) - return duration -} - -func (b *BackoffEntry) wait(maxDuration time.Duration) { - time.Sleep(b.getBackoff(maxDuration)) + klog.V(4).Infof("Backing off %s", newDuration.String()) + return newDuration } // PodBackoff is used to restart a pod with back-off delay. type PodBackoff struct { - perPodBackoff map[ktypes.NamespacedName]*BackoffEntry + // expiryQ stores backoffEntry orderedy by lastUpdate until they reach maxDuration and are GC'd + expiryQ *Heap lock sync.Mutex clock clock defaultDuration time.Duration @@ -111,24 +108,58 @@ func CreatePodBackoff(defaultDuration, maxDuration time.Duration) *PodBackoff { // CreatePodBackoffWithClock creates a pod back-off object by default duration, max duration and clock. func CreatePodBackoffWithClock(defaultDuration, maxDuration time.Duration, clock clock) *PodBackoff { return &PodBackoff{ - perPodBackoff: map[ktypes.NamespacedName]*BackoffEntry{}, + expiryQ: NewHeap(backoffEntryKeyFunc, backoffEntryCompareUpdate), clock: clock, defaultDuration: defaultDuration, maxDuration: maxDuration, } } -// GetEntry returns a back-off entry by Pod ID. -func (p *PodBackoff) GetEntry(podID ktypes.NamespacedName) *BackoffEntry { +// getEntry returns the backoffEntry for a given podID +func (p *PodBackoff) getEntry(podID ktypes.NamespacedName) *backoffEntry { + entry, exists, _ := p.expiryQ.GetByKey(podID.String()) + var be *backoffEntry + if !exists { + be = &backoffEntry{ + initialized: false, + podName: podID, + backoff: p.defaultDuration, + } + p.expiryQ.Update(be) + } else { + be = entry.(*backoffEntry) + } + return be +} + +// BackoffPod updates the backoff for a podId and returns the duration until backoff completion +func (p *PodBackoff) BackoffPod(podID ktypes.NamespacedName) time.Duration { p.lock.Lock() defer p.lock.Unlock() - entry, ok := p.perPodBackoff[podID] - if !ok { - entry = &BackoffEntry{backoff: p.defaultDuration} - p.perPodBackoff[podID] = entry - } + entry := p.getEntry(podID) entry.lastUpdate = p.clock.Now() - return entry + p.expiryQ.Update(entry) + return entry.getBackoff(p.maxDuration) +} + +// TryBackoffAndWait tries to acquire the backoff lock +func (p *PodBackoff) TryBackoffAndWait(podID ktypes.NamespacedName, stop <-chan struct{}) bool { + p.lock.Lock() + entry := p.getEntry(podID) + + if !entry.tryLock() { + p.lock.Unlock() + return false + } + defer entry.unlock() + duration := entry.getBackoff(p.maxDuration) + p.lock.Unlock() + select { + case <-time.After(duration): + return true + case <-stop: + return false + } } // Gc execute garbage collection on the pod back-off. @@ -136,9 +167,54 @@ func (p *PodBackoff) Gc() { p.lock.Lock() defer p.lock.Unlock() now := p.clock.Now() - for podID, entry := range p.perPodBackoff { - if now.Sub(entry.lastUpdate) > p.maxDuration { - delete(p.perPodBackoff, podID) + var be *backoffEntry + for { + entry := p.expiryQ.Peek() + if entry == nil { + break + } + be = entry.(*backoffEntry) + if now.Sub(be.lastUpdate) > p.maxDuration { + p.expiryQ.Pop() + } else { + break } } } + +// GetBackoffTime returns the time that podID completes backoff +func (p *PodBackoff) GetBackoffTime(podID ktypes.NamespacedName) (time.Time, bool) { + p.lock.Lock() + defer p.lock.Unlock() + rawBe, exists, _ := p.expiryQ.GetByKey(podID.String()) + if !exists { + return time.Time{}, false + } + be := rawBe.(*backoffEntry) + return be.lastUpdate.Add(be.backoff), true +} + +// ClearPodBackoff removes all tracking information for podID (clears expiry) +func (p *PodBackoff) ClearPodBackoff(podID ktypes.NamespacedName) bool { + p.lock.Lock() + defer p.lock.Unlock() + entry, exists, _ := p.expiryQ.GetByKey(podID.String()) + if exists { + err := p.expiryQ.Delete(entry) + return err == nil + } + return false +} + +// backoffEntryKeyFunc is the keying function used for mapping a backoffEntry to string for heap +func backoffEntryKeyFunc(b interface{}) (string, error) { + be := b.(*backoffEntry) + return be.podName.String(), nil +} + +// backoffEntryCompareUpdate returns true when b1's backoff time is before b2's +func backoffEntryCompareUpdate(b1, b2 interface{}) bool { + be1 := b1.(*backoffEntry) + be2 := b2.(*backoffEntry) + return be1.lastUpdate.Before(be2.lastUpdate) +} diff --git a/pkg/scheduler/util/backoff_utils_test.go b/pkg/scheduler/util/backoff_utils_test.go index 8f61b637e7d..b99c9498f58 100644 --- a/pkg/scheduler/util/backoff_utils_test.go +++ b/pkg/scheduler/util/backoff_utils_test.go @@ -31,7 +31,7 @@ func (f *fakeClock) Now() time.Time { return f.t } -func TestBackoff(t *testing.T) { +func TestBackoffPod(t *testing.T) { clock := fakeClock{} backoff := CreatePodBackoffWithClock(1*time.Second, 60*time.Second, &clock) tests := []struct { @@ -64,23 +64,75 @@ func TestBackoff(t *testing.T) { } for _, test := range tests { - duration := backoff.GetEntry(test.podID).getBackoff(backoff.maxDuration) + duration := backoff.BackoffPod(test.podID) if duration != test.expectedDuration { - t.Errorf("expected: %s, got %s for %s", test.expectedDuration.String(), duration.String(), test.podID) + t.Errorf("expected: %s, got %s for pod %s", test.expectedDuration.String(), duration.String(), test.podID) + } + if boTime, _ := backoff.GetBackoffTime(test.podID); boTime != clock.Now().Add(test.expectedDuration) { + t.Errorf("expected GetBackoffTime %s, got %s for pod %s", test.expectedDuration.String(), boTime.String(), test.podID) } clock.t = clock.t.Add(test.advanceClock) backoff.Gc() } fooID := ktypes.NamespacedName{Namespace: "default", Name: "foo"} - backoff.perPodBackoff[fooID].backoff = 60 * time.Second - duration := backoff.GetEntry(fooID).getBackoff(backoff.maxDuration) + be := backoff.getEntry(fooID) + be.backoff = 60 * time.Second + duration := backoff.BackoffPod(fooID) if duration != 60*time.Second { t.Errorf("expected: 60, got %s", duration.String()) } // Verify that we split on namespaces correctly, same name, different namespace fooID.Namespace = "other" - duration = backoff.GetEntry(fooID).getBackoff(backoff.maxDuration) + duration = backoff.BackoffPod(fooID) if duration != 1*time.Second { t.Errorf("expected: 1, got %s", duration.String()) } } + +func TestClearPodBackoff(t *testing.T) { + clock := fakeClock{} + backoff := CreatePodBackoffWithClock(1*time.Second, 60*time.Second, &clock) + + if backoff.ClearPodBackoff(ktypes.NamespacedName{Namespace: "ns", Name: "nonexist"}) { + t.Error("Expected ClearPodBackoff failure for unknown pod, got success.") + } + + podID := ktypes.NamespacedName{Namespace: "ns", Name: "foo"} + if dur := backoff.BackoffPod(podID); dur != 1*time.Second { + t.Errorf("Expected backoff of 1s for pod %s, got %s", podID, dur.String()) + } + + if !backoff.ClearPodBackoff(podID) { + t.Errorf("Failed to clear backoff for pod %v", podID) + } + + expectBoTime := clock.Now() + if boTime, _ := backoff.GetBackoffTime(podID); boTime != expectBoTime { + t.Errorf("Expected backoff time for pod %s of %s, got %s", podID, expectBoTime, boTime) + } +} + +func TestTryBackoffAndWait(t *testing.T) { + clock := fakeClock{} + backoff := CreatePodBackoffWithClock(1*time.Second, 60*time.Second, &clock) + + stopCh := make(chan struct{}) + podID := ktypes.NamespacedName{Namespace: "ns", Name: "pod"} + if !backoff.TryBackoffAndWait(podID, stopCh) { + t.Error("Expected TryBackoffAndWait success for new pod, got failure.") + } + + be := backoff.getEntry(podID) + if !be.tryLock() { + t.Error("Failed to acquire lock for backoffentry") + } + + if backoff.TryBackoffAndWait(podID, stopCh) { + t.Error("Expected TryBackoffAndWait failure with lock acquired, got success.") + } + + close(stopCh) + if backoff.TryBackoffAndWait(podID, stopCh) { + t.Error("Expected TryBackoffAndWait failure with closed stopCh, got success.") + } +} diff --git a/pkg/controller/daemon/util/main_test.go b/pkg/scheduler/util/clock.go similarity index 68% rename from pkg/controller/daemon/util/main_test.go rename to pkg/scheduler/util/clock.go index 6af02d0a11d..e17c759dbac 100644 --- a/pkg/controller/daemon/util/main_test.go +++ b/pkg/scheduler/util/clock.go @@ -17,13 +17,18 @@ limitations under the License. package util import ( - "testing" - - utilfeature "k8s.io/apiserver/pkg/util/feature" - utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" - _ "k8s.io/kubernetes/pkg/features" + "time" ) -func TestMain(m *testing.M) { - utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run) +// Clock provides an interface for getting the current time +type Clock interface { + Now() time.Time +} + +// RealClock implements a clock using time +type RealClock struct{} + +// Now returns the current time with time.Now +func (RealClock) Now() time.Time { + return time.Now() } diff --git a/pkg/scheduler/util/heap.go b/pkg/scheduler/util/heap.go new file mode 100644 index 00000000000..0f15652c654 --- /dev/null +++ b/pkg/scheduler/util/heap.go @@ -0,0 +1,236 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Below is the implementation of the a heap. The logic is pretty much the same +// as cache.heap, however, this heap does not perform synchronization. It leaves +// synchronization to the SchedulingQueue. + +package util + +import ( + "container/heap" + "fmt" + + "k8s.io/client-go/tools/cache" +) + +// KeyFunc is a function type to get the key from an object. +type KeyFunc func(obj interface{}) (string, error) + +type heapItem struct { + obj interface{} // The object which is stored in the heap. + index int // The index of the object's key in the Heap.queue. +} + +type itemKeyValue struct { + key string + obj interface{} +} + +// heapData is an internal struct that implements the standard heap interface +// and keeps the data stored in the heap. +type heapData struct { + // items is a map from key of the objects to the objects and their index. + // We depend on the property that items in the map are in the queue and vice versa. + items map[string]*heapItem + // queue implements a heap data structure and keeps the order of elements + // according to the heap invariant. The queue keeps the keys of objects stored + // in "items". + queue []string + + // keyFunc is used to make the key used for queued item insertion and retrieval, and + // should be deterministic. + keyFunc KeyFunc + // lessFunc is used to compare two objects in the heap. + lessFunc LessFunc +} + +var ( + _ = heap.Interface(&heapData{}) // heapData is a standard heap +) + +// Less compares two objects and returns true if the first one should go +// in front of the second one in the heap. +func (h *heapData) Less(i, j int) bool { + if i > len(h.queue) || j > len(h.queue) { + return false + } + itemi, ok := h.items[h.queue[i]] + if !ok { + return false + } + itemj, ok := h.items[h.queue[j]] + if !ok { + return false + } + return h.lessFunc(itemi.obj, itemj.obj) +} + +// Len returns the number of items in the Heap. +func (h *heapData) Len() int { return len(h.queue) } + +// Swap implements swapping of two elements in the heap. This is a part of standard +// heap interface and should never be called directly. +func (h *heapData) Swap(i, j int) { + h.queue[i], h.queue[j] = h.queue[j], h.queue[i] + item := h.items[h.queue[i]] + item.index = i + item = h.items[h.queue[j]] + item.index = j +} + +// Push is supposed to be called by heap.Push only. +func (h *heapData) Push(kv interface{}) { + keyValue := kv.(*itemKeyValue) + n := len(h.queue) + h.items[keyValue.key] = &heapItem{keyValue.obj, n} + h.queue = append(h.queue, keyValue.key) +} + +// Pop is supposed to be called by heap.Pop only. +func (h *heapData) Pop() interface{} { + key := h.queue[len(h.queue)-1] + h.queue = h.queue[0 : len(h.queue)-1] + item, ok := h.items[key] + if !ok { + // This is an error + return nil + } + delete(h.items, key) + return item.obj +} + +// Peek is supposed to be called by heap.Peek only. +func (h *heapData) Peek() interface{} { + if len(h.queue) > 0 { + return h.items[h.queue[0]].obj + } + return nil +} + +// Heap is a producer/consumer queue that implements a heap data structure. +// It can be used to implement priority queues and similar data structures. +type Heap struct { + // data stores objects and has a queue that keeps their ordering according + // to the heap invariant. + data *heapData +} + +// Add inserts an item, and puts it in the queue. The item is updated if it +// already exists. +func (h *Heap) Add(obj interface{}) error { + key, err := h.data.keyFunc(obj) + if err != nil { + return cache.KeyError{Obj: obj, Err: err} + } + if _, exists := h.data.items[key]; exists { + h.data.items[key].obj = obj + heap.Fix(h.data, h.data.items[key].index) + } else { + heap.Push(h.data, &itemKeyValue{key, obj}) + } + return nil +} + +// AddIfNotPresent inserts an item, and puts it in the queue. If an item with +// the key is present in the map, no changes is made to the item. +func (h *Heap) AddIfNotPresent(obj interface{}) error { + key, err := h.data.keyFunc(obj) + if err != nil { + return cache.KeyError{Obj: obj, Err: err} + } + if _, exists := h.data.items[key]; !exists { + heap.Push(h.data, &itemKeyValue{key, obj}) + } + return nil +} + +// Update is the same as Add in this implementation. When the item does not +// exist, it is added. +func (h *Heap) Update(obj interface{}) error { + return h.Add(obj) +} + +// Delete removes an item. +func (h *Heap) Delete(obj interface{}) error { + key, err := h.data.keyFunc(obj) + if err != nil { + return cache.KeyError{Obj: obj, Err: err} + } + if item, ok := h.data.items[key]; ok { + heap.Remove(h.data, item.index) + return nil + } + return fmt.Errorf("object not found") +} + +// Peek returns the head of the heap without removing it. +func (h *Heap) Peek() interface{} { + return h.data.Peek() +} + +// Pop returns the head of the heap and removes it. +func (h *Heap) Pop() (interface{}, error) { + obj := heap.Pop(h.data) + if obj != nil { + return obj, nil + } + return nil, fmt.Errorf("object was removed from heap data") +} + +// Get returns the requested item, or sets exists=false. +func (h *Heap) Get(obj interface{}) (interface{}, bool, error) { + key, err := h.data.keyFunc(obj) + if err != nil { + return nil, false, cache.KeyError{Obj: obj, Err: err} + } + return h.GetByKey(key) +} + +// GetByKey returns the requested item, or sets exists=false. +func (h *Heap) GetByKey(key string) (interface{}, bool, error) { + item, exists := h.data.items[key] + if !exists { + return nil, false, nil + } + return item.obj, true, nil +} + +// List returns a list of all the items. +func (h *Heap) List() []interface{} { + list := make([]interface{}, 0, len(h.data.items)) + for _, item := range h.data.items { + list = append(list, item.obj) + } + return list +} + +// Len returns the number of items in the heap. +func (h *Heap) Len() int { + return len(h.data.queue) +} + +// NewHeap returns a Heap which can be used to queue up items to process. +func NewHeap(keyFn KeyFunc, lessFn LessFunc) *Heap { + return &Heap{ + data: &heapData{ + items: map[string]*heapItem{}, + queue: []string{}, + keyFunc: keyFn, + lessFunc: lessFn, + }, + } +} diff --git a/pkg/scheduler/util/heap_test.go b/pkg/scheduler/util/heap_test.go new file mode 100644 index 00000000000..62812ec4c91 --- /dev/null +++ b/pkg/scheduler/util/heap_test.go @@ -0,0 +1,271 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was copied from client-go/tools/cache/heap.go and modified +// for our non thread-safe heap + +package util + +import ( + "testing" +) + +func testHeapObjectKeyFunc(obj interface{}) (string, error) { + return obj.(testHeapObject).name, nil +} + +type testHeapObject struct { + name string + val interface{} +} + +func mkHeapObj(name string, val interface{}) testHeapObject { + return testHeapObject{name: name, val: val} +} + +func compareInts(val1 interface{}, val2 interface{}) bool { + first := val1.(testHeapObject).val.(int) + second := val2.(testHeapObject).val.(int) + return first < second +} + +// TestHeapBasic tests Heap invariant +func TestHeapBasic(t *testing.T) { + h := NewHeap(testHeapObjectKeyFunc, compareInts) + const amount = 500 + var i int + + for i = amount; i > 0; i-- { + h.Add(mkHeapObj(string([]rune{'a', rune(i)}), i)) + } + + // Make sure that the numbers are popped in ascending order. + prevNum := 0 + for i := 0; i < amount; i++ { + obj, err := h.Pop() + num := obj.(testHeapObject).val.(int) + // All the items must be sorted. + if err != nil || prevNum > num { + t.Errorf("got %v out of order, last was %v", obj, prevNum) + } + prevNum = num + } +} + +// Tests Heap.Add and ensures that heap invariant is preserved after adding items. +func TestHeap_Add(t *testing.T) { + h := NewHeap(testHeapObjectKeyFunc, compareInts) + h.Add(mkHeapObj("foo", 10)) + h.Add(mkHeapObj("bar", 1)) + h.Add(mkHeapObj("baz", 11)) + h.Add(mkHeapObj("zab", 30)) + h.Add(mkHeapObj("foo", 13)) // This updates "foo". + + item, err := h.Pop() + if e, a := 1, item.(testHeapObject).val; err != nil || a != e { + t.Fatalf("expected %d, got %d", e, a) + } + item, err = h.Pop() + if e, a := 11, item.(testHeapObject).val; err != nil || a != e { + t.Fatalf("expected %d, got %d", e, a) + } + h.Delete(mkHeapObj("baz", 11)) // Nothing is deleted. + h.Add(mkHeapObj("foo", 14)) // foo is updated. + item, err = h.Pop() + if e, a := 14, item.(testHeapObject).val; err != nil || a != e { + t.Fatalf("expected %d, got %d", e, a) + } + item, err = h.Pop() + if e, a := 30, item.(testHeapObject).val; err != nil || a != e { + t.Fatalf("expected %d, got %d", e, a) + } +} + +// TestHeap_AddIfNotPresent tests Heap.AddIfNotPresent and ensures that heap +// invariant is preserved after adding items. +func TestHeap_AddIfNotPresent(t *testing.T) { + h := NewHeap(testHeapObjectKeyFunc, compareInts) + h.AddIfNotPresent(mkHeapObj("foo", 10)) + h.AddIfNotPresent(mkHeapObj("bar", 1)) + h.AddIfNotPresent(mkHeapObj("baz", 11)) + h.AddIfNotPresent(mkHeapObj("zab", 30)) + h.AddIfNotPresent(mkHeapObj("foo", 13)) // This is not added. + + if len := len(h.data.items); len != 4 { + t.Errorf("unexpected number of items: %d", len) + } + if val := h.data.items["foo"].obj.(testHeapObject).val; val != 10 { + t.Errorf("unexpected value: %d", val) + } + item, err := h.Pop() + if e, a := 1, item.(testHeapObject).val; err != nil || a != e { + t.Fatalf("expected %d, got %d", e, a) + } + item, err = h.Pop() + if e, a := 10, item.(testHeapObject).val; err != nil || a != e { + t.Fatalf("expected %d, got %d", e, a) + } + // bar is already popped. Let's add another one. + h.AddIfNotPresent(mkHeapObj("bar", 14)) + item, err = h.Pop() + if e, a := 11, item.(testHeapObject).val; err != nil || a != e { + t.Fatalf("expected %d, got %d", e, a) + } + item, err = h.Pop() + if e, a := 14, item.(testHeapObject).val; err != nil || a != e { + t.Fatalf("expected %d, got %d", e, a) + } +} + +// TestHeap_Delete tests Heap.Delete and ensures that heap invariant is +// preserved after deleting items. +func TestHeap_Delete(t *testing.T) { + h := NewHeap(testHeapObjectKeyFunc, compareInts) + h.Add(mkHeapObj("foo", 10)) + h.Add(mkHeapObj("bar", 1)) + h.Add(mkHeapObj("bal", 31)) + h.Add(mkHeapObj("baz", 11)) + + // Delete head. Delete should work with "key" and doesn't care about the value. + if err := h.Delete(mkHeapObj("bar", 200)); err != nil { + t.Fatalf("Failed to delete head.") + } + item, err := h.Pop() + if e, a := 10, item.(testHeapObject).val; err != nil || a != e { + t.Fatalf("expected %d, got %d", e, a) + } + h.Add(mkHeapObj("zab", 30)) + h.Add(mkHeapObj("faz", 30)) + len := h.data.Len() + // Delete non-existing item. + if err = h.Delete(mkHeapObj("non-existent", 10)); err == nil || len != h.data.Len() { + t.Fatalf("Didn't expect any item removal") + } + // Delete tail. + if err = h.Delete(mkHeapObj("bal", 31)); err != nil { + t.Fatalf("Failed to delete tail.") + } + // Delete one of the items with value 30. + if err = h.Delete(mkHeapObj("zab", 30)); err != nil { + t.Fatalf("Failed to delete item.") + } + item, err = h.Pop() + if e, a := 11, item.(testHeapObject).val; err != nil || a != e { + t.Fatalf("expected %d, got %d", e, a) + } + item, err = h.Pop() + if e, a := 30, item.(testHeapObject).val; err != nil || a != e { + t.Fatalf("expected %d, got %d", e, a) + } + if h.data.Len() != 0 { + t.Fatalf("expected an empty heap.") + } +} + +// TestHeap_Update tests Heap.Update and ensures that heap invariant is +// preserved after adding items. +func TestHeap_Update(t *testing.T) { + h := NewHeap(testHeapObjectKeyFunc, compareInts) + h.Add(mkHeapObj("foo", 10)) + h.Add(mkHeapObj("bar", 1)) + h.Add(mkHeapObj("bal", 31)) + h.Add(mkHeapObj("baz", 11)) + + // Update an item to a value that should push it to the head. + h.Update(mkHeapObj("baz", 0)) + if h.data.queue[0] != "baz" || h.data.items["baz"].index != 0 { + t.Fatalf("expected baz to be at the head") + } + item, err := h.Pop() + if e, a := 0, item.(testHeapObject).val; err != nil || a != e { + t.Fatalf("expected %d, got %d", e, a) + } + // Update bar to push it farther back in the queue. + h.Update(mkHeapObj("bar", 100)) + if h.data.queue[0] != "foo" || h.data.items["foo"].index != 0 { + t.Fatalf("expected foo to be at the head") + } +} + +// TestHeap_Get tests Heap.Get. +func TestHeap_Get(t *testing.T) { + h := NewHeap(testHeapObjectKeyFunc, compareInts) + h.Add(mkHeapObj("foo", 10)) + h.Add(mkHeapObj("bar", 1)) + h.Add(mkHeapObj("bal", 31)) + h.Add(mkHeapObj("baz", 11)) + + // Get works with the key. + obj, exists, err := h.Get(mkHeapObj("baz", 0)) + if err != nil || exists == false || obj.(testHeapObject).val != 11 { + t.Fatalf("unexpected error in getting element") + } + // Get non-existing object. + _, exists, err = h.Get(mkHeapObj("non-existing", 0)) + if err != nil || exists == true { + t.Fatalf("didn't expect to get any object") + } +} + +// TestHeap_GetByKey tests Heap.GetByKey and is very similar to TestHeap_Get. +func TestHeap_GetByKey(t *testing.T) { + h := NewHeap(testHeapObjectKeyFunc, compareInts) + h.Add(mkHeapObj("foo", 10)) + h.Add(mkHeapObj("bar", 1)) + h.Add(mkHeapObj("bal", 31)) + h.Add(mkHeapObj("baz", 11)) + + obj, exists, err := h.GetByKey("baz") + if err != nil || exists == false || obj.(testHeapObject).val != 11 { + t.Fatalf("unexpected error in getting element") + } + // Get non-existing object. + _, exists, err = h.GetByKey("non-existing") + if err != nil || exists == true { + t.Fatalf("didn't expect to get any object") + } +} + +// TestHeap_List tests Heap.List function. +func TestHeap_List(t *testing.T) { + h := NewHeap(testHeapObjectKeyFunc, compareInts) + list := h.List() + if len(list) != 0 { + t.Errorf("expected an empty list") + } + + items := map[string]int{ + "foo": 10, + "bar": 1, + "bal": 30, + "baz": 11, + "faz": 30, + } + for k, v := range items { + h.Add(mkHeapObj(k, v)) + } + list = h.List() + if len(list) != len(items) { + t.Errorf("expected %d items, got %d", len(items), len(list)) + } + for _, obj := range list { + heapObj := obj.(testHeapObject) + v, ok := items[heapObj.name] + if !ok || v != heapObj.val { + t.Errorf("unexpected item in the list: %v", heapObj) + } + } +} diff --git a/pkg/ssh/ssh.go b/pkg/ssh/ssh.go index bee21f6ee5a..5b6cb4e1509 100644 --- a/pkg/ssh/ssh.go +++ b/pkg/ssh/ssh.go @@ -26,7 +26,6 @@ import ( "encoding/pem" "errors" "fmt" - "io" "io/ioutil" mathrand "math/rand" "net" @@ -39,11 +38,11 @@ import ( "github.com/prometheus/client_golang/prometheus" "golang.org/x/crypto/ssh" - "k8s.io/klog" utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog" ) var ( @@ -68,51 +67,27 @@ func init() { // TODO: Unit tests for this code, we can spin up a test SSH server with instructions here: // https://godoc.org/golang.org/x/crypto/ssh#ServerConn -type SSHTunnel struct { +type sshTunnel struct { Config *ssh.ClientConfig Host string SSHPort string - running bool - sock net.Listener client *ssh.Client } -func (s *SSHTunnel) copyBytes(out io.Writer, in io.Reader) { - if _, err := io.Copy(out, in); err != nil { - klog.Errorf("Error in SSH tunnel: %v", err) - } -} - -func NewSSHTunnel(user, keyfile, host string) (*SSHTunnel, error) { - signer, err := MakePrivateKeySignerFromFile(keyfile) - if err != nil { - return nil, err - } - return makeSSHTunnel(user, signer, host) -} - -func NewSSHTunnelFromBytes(user string, privateKey []byte, host string) (*SSHTunnel, error) { - signer, err := MakePrivateKeySignerFromBytes(privateKey) - if err != nil { - return nil, err - } - return makeSSHTunnel(user, signer, host) -} - -func makeSSHTunnel(user string, signer ssh.Signer, host string) (*SSHTunnel, error) { +func makeSSHTunnel(user string, signer ssh.Signer, host string) (*sshTunnel, error) { config := ssh.ClientConfig{ User: user, Auth: []ssh.AuthMethod{ssh.PublicKeys(signer)}, HostKeyCallback: ssh.InsecureIgnoreHostKey(), } - return &SSHTunnel{ + return &sshTunnel{ Config: &config, Host: host, SSHPort: "22", }, nil } -func (s *SSHTunnel) Open() error { +func (s *sshTunnel) Open() error { var err error s.client, err = realTimeoutDialer.Dial("tcp", net.JoinHostPort(s.Host, s.SSHPort), s.Config) tunnelOpenCounter.Inc() @@ -122,7 +97,7 @@ func (s *SSHTunnel) Open() error { return err } -func (s *SSHTunnel) Dial(ctx context.Context, network, address string) (net.Conn, error) { +func (s *sshTunnel) Dial(ctx context.Context, network, address string) (net.Conn, error) { if s.client == nil { return nil, errors.New("tunnel is not opened.") } @@ -130,20 +105,7 @@ func (s *SSHTunnel) Dial(ctx context.Context, network, address string) (net.Conn return s.client.Dial(network, address) } -func (s *SSHTunnel) tunnel(conn net.Conn, remoteHost, remotePort string) error { - if s.client == nil { - return errors.New("tunnel is not opened.") - } - tunnel, err := s.client.Dial("tcp", net.JoinHostPort(remoteHost, remotePort)) - if err != nil { - return err - } - go s.copyBytes(tunnel, conn) - go s.copyBytes(conn, tunnel) - return nil -} - -func (s *SSHTunnel) Close() error { +func (s *sshTunnel) Close() error { if s.client == nil { return errors.New("Cannot close tunnel. Tunnel was not opened.") } @@ -305,13 +267,17 @@ type sshTunnelEntry struct { } type sshTunnelCreator interface { - NewSSHTunnel(user, keyFile, healthCheckURL string) (tunnel, error) + newSSHTunnel(user, keyFile, host string) (tunnel, error) } type realTunnelCreator struct{} -func (*realTunnelCreator) NewSSHTunnel(user, keyFile, healthCheckURL string) (tunnel, error) { - return NewSSHTunnel(user, keyFile, healthCheckURL) +func (*realTunnelCreator) newSSHTunnel(user, keyFile, host string) (tunnel, error) { + signer, err := MakePrivateKeySignerFromFile(keyFile) + if err != nil { + return nil, err + } + return makeSSHTunnel(user, signer, host) } type SSHTunnelList struct { @@ -481,7 +447,7 @@ func (l *SSHTunnelList) Update(addrs []string) { func (l *SSHTunnelList) createAndAddTunnel(addr string) { klog.Infof("Trying to add tunnel to %q", addr) - tunnel, err := l.tunnelCreator.NewSSHTunnel(l.user, l.keyfile, addr) + tunnel, err := l.tunnelCreator.newSSHTunnel(l.user, l.keyfile, addr) if err != nil { klog.Errorf("Failed to create tunnel for %q: %v", addr, err) return diff --git a/pkg/ssh/ssh_test.go b/pkg/ssh/ssh_test.go index 6148233f7ec..d0278016f45 100644 --- a/pkg/ssh/ssh_test.go +++ b/pkg/ssh/ssh_test.go @@ -27,9 +27,9 @@ import ( "testing" "time" - "k8s.io/apimachinery/pkg/util/wait" - "golang.org/x/crypto/ssh" + + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog" ) @@ -134,7 +134,7 @@ func TestSSHTunnel(t *testing.T) { } privateData := EncodePrivateKey(private) - tunnel, err := NewSSHTunnelFromBytes("foo", privateData, server.Host) + tunnel, err := newSSHTunnelFromBytes("foo", privateData, server.Host) if err != nil { t.Errorf("unexpected error: %v", err) t.FailNow() @@ -183,7 +183,7 @@ func (*fakeTunnel) Dial(ctx context.Context, network, address string) (net.Conn, type fakeTunnelCreator struct{} -func (*fakeTunnelCreator) NewSSHTunnel(string, string, string) (tunnel, error) { +func (*fakeTunnelCreator) newSSHTunnel(string, string, string) (tunnel, error) { return &fakeTunnel{}, nil } @@ -355,3 +355,11 @@ func TestTimeoutDialer(t *testing.T) { listener.Close() } + +func newSSHTunnelFromBytes(user string, privateKey []byte, host string) (*sshTunnel, error) { + signer, err := MakePrivateKeySignerFromBytes(privateKey) + if err != nil { + return nil, err + } + return makeSSHTunnel(user, signer, host) +} diff --git a/pkg/util/conntrack/conntrack.go b/pkg/util/conntrack/conntrack.go index 353bc0d0c25..5569b411dc1 100644 --- a/pkg/util/conntrack/conntrack.go +++ b/pkg/util/conntrack/conntrack.go @@ -107,3 +107,19 @@ func ClearEntriesForNAT(execer exec.Interface, origin, dest string, protocol v1. } return nil } + +// ClearEntriesForPortNAT uses the conntrack tool to delete the contrack entries +// for connections specified by the {dest IP, port} pair. +// Known issue: +// https://github.com/kubernetes/kubernetes/issues/59368 +func ClearEntriesForPortNAT(execer exec.Interface, dest string, port int, protocol v1.Protocol) error { + if port <= 0 { + return fmt.Errorf("Wrong port number. The port number must be greater then zero") + } + parameters := parametersWithFamily(utilnet.IsIPv6String(dest), "-D", "-p", protoStr(protocol), "--dport", strconv.Itoa(port), "--dst-nat", dest) + err := Exec(execer, parameters...) + if err != nil && !strings.Contains(err.Error(), NoConnectionToDelete) { + return fmt.Errorf("error deleting conntrack entries for UDP port: %d, error: %v", port, err) + } + return nil +} diff --git a/pkg/util/conntrack/conntrack_test.go b/pkg/util/conntrack/conntrack_test.go index 6e1c18735fb..af05a65e12a 100644 --- a/pkg/util/conntrack/conntrack_test.go +++ b/pkg/util/conntrack/conntrack_test.go @@ -234,3 +234,50 @@ func TestDeleteUDPConnections(t *testing.T) { t.Errorf("Expect command executed %d times, but got %d", svcCount, fexec.CommandCalls) } } + +func TestClearUDPConntrackForPortNAT(t *testing.T) { + fcmd := fakeexec.FakeCmd{ + CombinedOutputScript: []fakeexec.FakeCombinedOutputAction{ + func() ([]byte, error) { return []byte("1 flow entries have been deleted"), nil }, + func() ([]byte, error) { + return []byte(""), fmt.Errorf("conntrack v1.4.2 (conntrack-tools): 0 flow entries have been deleted") + }, + func() ([]byte, error) { return []byte("1 flow entries have been deleted"), nil }, + }, + } + fexec := fakeexec.FakeExec{ + CommandScript: []fakeexec.FakeCommandAction{ + func(cmd string, args ...string) exec.Cmd { return fakeexec.InitFakeCmd(&fcmd, cmd, args...) }, + func(cmd string, args ...string) exec.Cmd { return fakeexec.InitFakeCmd(&fcmd, cmd, args...) }, + func(cmd string, args ...string) exec.Cmd { return fakeexec.InitFakeCmd(&fcmd, cmd, args...) }, + }, + LookPathFunc: func(cmd string) (string, error) { return cmd, nil }, + } + testCases := []struct { + name string + port int + dest string + }{ + { + name: "IPv4 success", + port: 30211, + dest: "1.2.3.4", + }, + } + svcCount := 0 + for i, tc := range testCases { + err := ClearEntriesForPortNAT(&fexec, tc.dest, tc.port, v1.ProtocolUDP) + if err != nil { + t.Errorf("%s test case: unexpected error: %v", tc.name, err) + } + expectCommand := fmt.Sprintf("conntrack -D -p udp --dport %d --dst-nat %s", tc.port, tc.dest) + familyParamStr(utilnet.IsIPv6String(tc.dest)) + execCommand := strings.Join(fcmd.CombinedOutputLog[i], " ") + if expectCommand != execCommand { + t.Errorf("%s test case: Expect command: %s, but executed %s", tc.name, expectCommand, execCommand) + } + svcCount++ + } + if svcCount != fexec.CommandCalls { + t.Errorf("Expect command executed %d times, but got %d", svcCount, fexec.CommandCalls) + } +} diff --git a/pkg/util/mount/nsenter_mount_test.go b/pkg/util/mount/nsenter_mount_test.go index bb54a613fbc..164eab2f1cd 100644 --- a/pkg/util/mount/nsenter_mount_test.go +++ b/pkg/util/mount/nsenter_mount_test.go @@ -169,11 +169,19 @@ func newFakeNsenterMounter(tmpdir string, t *testing.T) (mounter *NsenterMounter } func TestNsenterExistsFile(t *testing.T) { - user, err := user.Current() - if err != nil { - t.Error(err) + var isRoot bool + usr, err := user.Current() + if err == nil { + isRoot = usr.Username == "root" + } else { + switch err.(type) { + case user.UnknownUserIdError: + // Root should be always known, this is some random UID + isRoot = false + default: + t.Fatal(err) + } } - isRoot := user.Username == "root" tests := []struct { name string diff --git a/pkg/util/node/node.go b/pkg/util/node/node.go index ff503855150..ce9ce636a9d 100644 --- a/pkg/util/node/node.go +++ b/pkg/util/node/node.go @@ -64,6 +64,17 @@ func GetHostname(hostnameOverride string) (string, error) { return strings.ToLower(hostName), nil } +// NoMatchError is a typed implementation of the error interface. It indicates a failure to get a matching Node. +type NoMatchError struct { + addresses []v1.NodeAddress +} + +// Error is the implementation of the conventional interface for +// representing an error condition, with the nil value representing no error. +func (e *NoMatchError) Error() string { + return fmt.Sprintf("no preferred addresses found; known addresses: %v", e.addresses) +} + // GetPreferredNodeAddress returns the address of the provided node, using the provided preference order. // If none of the preferred address types are found, an error is returned. func GetPreferredNodeAddress(node *v1.Node, preferredAddressTypes []v1.NodeAddressType) (string, error) { @@ -74,7 +85,7 @@ func GetPreferredNodeAddress(node *v1.Node, preferredAddressTypes []v1.NodeAddre } } } - return "", fmt.Errorf("no preferred addresses found; known addresses: %v", node.Status.Addresses) + return "", &NoMatchError{addresses: node.Status.Addresses} } // GetNodeHostIP returns the provided node's IP, based on the priority: diff --git a/pkg/volume/azure_dd/azure_dd.go b/pkg/volume/azure_dd/azure_dd.go index 961d5a35714..4a082addf1b 100644 --- a/pkg/volume/azure_dd/azure_dd.go +++ b/pkg/volume/azure_dd/azure_dd.go @@ -187,7 +187,7 @@ func getMaxDataDiskCount(instanceType string, sizeList *[]compute.VirtualMachine continue } if strings.ToUpper(*size.Name) == vmsize { - klog.V(2).Infof("got a matching size in getMaxDataDiskCount, Name: %s, MaxDataDiskCount: %d", *size.Name, *size.MaxDataDiskCount) + klog.V(12).Infof("got a matching size in getMaxDataDiskCount, Name: %s, MaxDataDiskCount: %d", *size.Name, *size.MaxDataDiskCount) return int64(*size.MaxDataDiskCount) } } diff --git a/pkg/volume/configmap/configmap.go b/pkg/volume/configmap/configmap.go index 823ce53d4b3..bd35aa50552 100644 --- a/pkg/volume/configmap/configmap.go +++ b/pkg/volume/configmap/configmap.go @@ -47,6 +47,10 @@ type configMapPlugin struct { var _ volume.VolumePlugin = &configMapPlugin{} +func getPath(uid types.UID, volName string, host volume.VolumeHost) string { + return host.GetPodVolumeDir(uid, strings.EscapeQualifiedNameForDisk(configMapPluginName), volName) +} + func (plugin *configMapPlugin) Init(host volume.VolumeHost) error { plugin.host = host plugin.getConfigMap = host.GetConfigMapFunc() @@ -92,7 +96,7 @@ func (plugin *configMapPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, opts v pod.UID, plugin, plugin.host.GetMounter(plugin.GetPluginName()), - volume.MetricsNil{}, + volume.NewCachedMetrics(volume.NewMetricsDu(getPath(pod.UID, spec.Name(), plugin.host))), }, source: *spec.Volume.ConfigMap, pod: *pod, @@ -108,7 +112,7 @@ func (plugin *configMapPlugin) NewUnmounter(volName string, podUID types.UID) (v podUID, plugin, plugin.host.GetMounter(plugin.GetPluginName()), - volume.MetricsNil{}, + volume.NewCachedMetrics(volume.NewMetricsDu(getPath(podUID, volName, plugin.host))), }, }, nil } @@ -128,7 +132,7 @@ type configMapVolume struct { podUID types.UID plugin *configMapPlugin mounter mount.Interface - volume.MetricsNil + volume.MetricsProvider } var _ volume.Volume = &configMapVolume{} diff --git a/pkg/volume/csi/BUILD b/pkg/volume/csi/BUILD index 2bc3929fe0f..a4ac6145d40 100644 --- a/pkg/volume/csi/BUILD +++ b/pkg/volume/csi/BUILD @@ -16,6 +16,7 @@ go_library( "//pkg/features:go_default_library", "//pkg/util/strings:go_default_library", "//pkg/volume:go_default_library", + "//pkg/volume/csi/csiv0:go_default_library", "//pkg/volume/csi/nodeinfomanager:go_default_library", "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -23,6 +24,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", @@ -44,7 +46,6 @@ go_test( "csi_client_test.go", "csi_mounter_test.go", "csi_plugin_test.go", - "main_test.go", ], embed = [":go_default_library"], deps = [ @@ -86,6 +87,7 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", + "//pkg/volume/csi/csiv0:all-srcs", "//pkg/volume/csi/fake:all-srcs", "//pkg/volume/csi/nodeinfomanager:all-srcs", ], diff --git a/pkg/volume/csi/csi_attacher.go b/pkg/volume/csi/csi_attacher.go index c66792af0f6..d13a7310120 100644 --- a/pkg/volume/csi/csi_attacher.go +++ b/pkg/volume/csi/csi_attacher.go @@ -29,7 +29,6 @@ import ( "k8s.io/klog" - csipb "github.com/container-storage-interface/spec/lib/go/csi" "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1beta1" apierrs "k8s.io/apimachinery/pkg/api/errors" @@ -342,14 +341,18 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo }() if c.csiClient == nil { - c.csiClient = newCsiDriverClient(csiSource.Driver) + c.csiClient, err = newCsiDriverClient(csiDriverName(csiSource.Driver)) + if err != nil { + klog.Errorf(log("attacher.MountDevice failed to create newCsiDriverClient: %v", err)) + return err + } } csi := c.csiClient ctx, cancel := context.WithTimeout(context.Background(), csiTimeout) defer cancel() // Check whether "STAGE_UNSTAGE_VOLUME" is set - stageUnstageSet, err := hasStageUnstageCapability(ctx, csi) + stageUnstageSet, err := csi.NodeSupportsStageUnstage(ctx) if err != nil { return err } @@ -361,7 +364,7 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo // Start MountDevice nodeName := string(c.plugin.host.GetNodeName()) - publishVolumeInfo, err := c.plugin.getPublishVolumeInfo(c.k8s, csiSource.VolumeHandle, csiSource.Driver, nodeName) + publishContext, err := c.plugin.getPublishContext(c.k8s, csiSource.VolumeHandle, csiSource.Driver, nodeName) nodeStageSecrets := map[string]string{} if csiSource.NodeStageSecretRef != nil { @@ -382,7 +385,7 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo fsType := csiSource.FSType err = csi.NodeStageVolume(ctx, csiSource.VolumeHandle, - publishVolumeInfo, + publishContext, deviceMountPath, fsType, accessMode, @@ -522,14 +525,18 @@ func (c *csiAttacher) UnmountDevice(deviceMountPath string) error { } if c.csiClient == nil { - c.csiClient = newCsiDriverClient(driverName) + c.csiClient, err = newCsiDriverClient(csiDriverName(driverName)) + if err != nil { + klog.Errorf(log("attacher.UnmountDevice failed to create newCsiDriverClient: %v", err)) + return err + } } csi := c.csiClient ctx, cancel := context.WithTimeout(context.Background(), csiTimeout) defer cancel() // Check whether "STAGE_UNSTAGE_VOLUME" is set - stageUnstageSet, err := hasStageUnstageCapability(ctx, csi) + stageUnstageSet, err := csi.NodeSupportsStageUnstage(ctx) if err != nil { klog.Errorf(log("attacher.UnmountDevice failed to check whether STAGE_UNSTAGE_VOLUME set: %v", err)) return err @@ -563,24 +570,6 @@ func (c *csiAttacher) UnmountDevice(deviceMountPath string) error { return nil } -func hasStageUnstageCapability(ctx context.Context, csi csiClient) (bool, error) { - capabilities, err := csi.NodeGetCapabilities(ctx) - if err != nil { - return false, err - } - - stageUnstageSet := false - if capabilities == nil { - return false, nil - } - for _, capability := range capabilities { - if capability.GetRpc().GetType() == csipb.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME { - stageUnstageSet = true - } - } - return stageUnstageSet, nil -} - // getAttachmentName returns csi- func getAttachmentName(volName, csiDriverName, nodeName string) string { result := sha256.Sum256([]byte(fmt.Sprintf("%s%s%s", volName, csiDriverName, nodeName))) diff --git a/pkg/volume/csi/csi_block.go b/pkg/volume/csi/csi_block.go index 6a536cda1d2..3a80352c580 100644 --- a/pkg/volume/csi/csi_block.go +++ b/pkg/volume/csi/csi_block.go @@ -40,7 +40,7 @@ type csiBlockMapper struct { k8s kubernetes.Interface csiClient csiClient plugin *csiPlugin - driverName string + driverName csiDriverName specName string volumeID string readOnly bool @@ -96,7 +96,7 @@ func (m *csiBlockMapper) stageVolumeForBlock( klog.V(4).Infof(log("blockMapper.stageVolumeForBlock stagingPath set [%s]", stagingPath)) // Check whether "STAGE_UNSTAGE_VOLUME" is set - stageUnstageSet, err := hasStageUnstageCapability(ctx, csi) + stageUnstageSet, err := csi.NodeSupportsStageUnstage(ctx) if err != nil { klog.Error(log("blockMapper.stageVolumeForBlock failed to check STAGE_UNSTAGE_VOLUME capability: %v", err)) return "", err @@ -287,7 +287,7 @@ func (m *csiBlockMapper) unpublishVolumeForBlock(ctx context.Context, csi csiCli // unstageVolumeForBlock unstages a block volume from stagingPath func (m *csiBlockMapper) unstageVolumeForBlock(ctx context.Context, csi csiClient, stagingPath string) error { // Check whether "STAGE_UNSTAGE_VOLUME" is set - stageUnstageSet, err := hasStageUnstageCapability(ctx, csi) + stageUnstageSet, err := csi.NodeSupportsStageUnstage(ctx) if err != nil { klog.Error(log("blockMapper.unstageVolumeForBlock failed to check STAGE_UNSTAGE_VOLUME capability: %v", err)) return err diff --git a/pkg/volume/csi/csi_block_test.go b/pkg/volume/csi/csi_block_test.go index b19c4867555..7bcd00e219e 100644 --- a/pkg/volume/csi/csi_block_test.go +++ b/pkg/volume/csi/csi_block_test.go @@ -33,7 +33,8 @@ import ( volumetest "k8s.io/kubernetes/pkg/volume/testing" ) -func prepareBlockMapperTest(plug *csiPlugin, specVolumeName string) (*csiBlockMapper, *volume.Spec, *api.PersistentVolume, error) { +func prepareBlockMapperTest(plug *csiPlugin, specVolumeName string, t *testing.T) (*csiBlockMapper, *volume.Spec, *api.PersistentVolume, error) { + registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t) pv := makeTestPV(specVolumeName, 10, testDriver, testVol) spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly) mapper, err := plug.NewBlockVolumeMapper( @@ -73,7 +74,7 @@ func TestBlockMapperGetGlobalMapPath(t *testing.T) { } for _, tc := range testCases { t.Logf("test case: %s", tc.name) - csiMapper, spec, _, err := prepareBlockMapperTest(plug, tc.specVolumeName) + csiMapper, spec, _, err := prepareBlockMapperTest(plug, tc.specVolumeName, t) if err != nil { t.Fatalf("Failed to make a new Mapper: %v", err) } @@ -113,7 +114,7 @@ func TestBlockMapperGetStagingPath(t *testing.T) { } for _, tc := range testCases { t.Logf("test case: %s", tc.name) - csiMapper, _, _, err := prepareBlockMapperTest(plug, tc.specVolumeName) + csiMapper, _, _, err := prepareBlockMapperTest(plug, tc.specVolumeName, t) if err != nil { t.Fatalf("Failed to make a new Mapper: %v", err) } @@ -150,7 +151,7 @@ func TestBlockMapperGetPublishPath(t *testing.T) { } for _, tc := range testCases { t.Logf("test case: %s", tc.name) - csiMapper, _, _, err := prepareBlockMapperTest(plug, tc.specVolumeName) + csiMapper, _, _, err := prepareBlockMapperTest(plug, tc.specVolumeName, t) if err != nil { t.Fatalf("Failed to make a new Mapper: %v", err) } @@ -187,7 +188,7 @@ func TestBlockMapperGetDeviceMapPath(t *testing.T) { } for _, tc := range testCases { t.Logf("test case: %s", tc.name) - csiMapper, _, _, err := prepareBlockMapperTest(plug, tc.specVolumeName) + csiMapper, _, _, err := prepareBlockMapperTest(plug, tc.specVolumeName, t) if err != nil { t.Fatalf("Failed to make a new Mapper: %v", err) } @@ -219,7 +220,7 @@ func TestBlockMapperSetupDevice(t *testing.T) { ) plug.host = host - csiMapper, _, pv, err := prepareBlockMapperTest(plug, "test-pv") + csiMapper, _, pv, err := prepareBlockMapperTest(plug, "test-pv", t) if err != nil { t.Fatalf("Failed to make a new Mapper: %v", err) } @@ -229,7 +230,7 @@ func TestBlockMapperSetupDevice(t *testing.T) { csiMapper.csiClient = setupClient(t, true) - attachID := getAttachmentName(csiMapper.volumeID, csiMapper.driverName, string(nodeName)) + attachID := getAttachmentName(csiMapper.volumeID, string(csiMapper.driverName), string(nodeName)) attachment := makeTestAttachment(attachID, nodeName, pvName) attachment.Status.Attached = true _, err = csiMapper.k8s.StorageV1beta1().VolumeAttachments().Create(attachment) @@ -286,7 +287,7 @@ func TestBlockMapperMapDevice(t *testing.T) { ) plug.host = host - csiMapper, _, pv, err := prepareBlockMapperTest(plug, "test-pv") + csiMapper, _, pv, err := prepareBlockMapperTest(plug, "test-pv", t) if err != nil { t.Fatalf("Failed to make a new Mapper: %v", err) } @@ -296,7 +297,7 @@ func TestBlockMapperMapDevice(t *testing.T) { csiMapper.csiClient = setupClient(t, true) - attachID := getAttachmentName(csiMapper.volumeID, csiMapper.driverName, string(nodeName)) + attachID := getAttachmentName(csiMapper.volumeID, string(csiMapper.driverName), string(nodeName)) attachment := makeTestAttachment(attachID, nodeName, pvName) attachment.Status.Attached = true _, err = csiMapper.k8s.StorageV1beta1().VolumeAttachments().Create(attachment) @@ -369,7 +370,7 @@ func TestBlockMapperTearDownDevice(t *testing.T) { ) plug.host = host - _, spec, pv, err := prepareBlockMapperTest(plug, "test-pv") + _, spec, pv, err := prepareBlockMapperTest(plug, "test-pv", t) if err != nil { t.Fatalf("Failed to make a new Mapper: %v", err) } diff --git a/pkg/volume/csi/csi_client.go b/pkg/volume/csi/csi_client.go index ea6ba190d8e..bbe30134629 100644 --- a/pkg/volume/csi/csi_client.go +++ b/pkg/volume/csi/csi_client.go @@ -24,19 +24,21 @@ import ( "net" "time" - csipb "github.com/container-storage-interface/spec/lib/go/csi" + csipbv1 "github.com/container-storage-interface/spec/lib/go/csi" "google.golang.org/grpc" api "k8s.io/api/core/v1" + utilversion "k8s.io/apimachinery/pkg/util/version" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/klog" "k8s.io/kubernetes/pkg/features" + csipbv0 "k8s.io/kubernetes/pkg/volume/csi/csiv0" ) type csiClient interface { NodeGetInfo(ctx context.Context) ( nodeID string, maxVolumePerNode int64, - accessibleTopology *csipb.Topology, + accessibleTopology map[string]string, err error) NodePublishVolume( ctx context.Context, @@ -45,7 +47,7 @@ type csiClient interface { stagingTargetPath string, targetPath string, accessMode api.PersistentVolumeAccessMode, - volumeInfo map[string]string, + publishContext map[string]string, volumeContext map[string]string, secrets map[string]string, fsType string, @@ -66,66 +68,172 @@ type csiClient interface { volumeContext map[string]string, ) error NodeUnstageVolume(ctx context.Context, volID, stagingTargetPath string) error - NodeGetCapabilities(ctx context.Context) ([]*csipb.NodeServiceCapability, error) + NodeSupportsStageUnstage(ctx context.Context) (bool, error) } +// Strongly typed address +type csiAddr string + +// Strongly typed driver name +type csiDriverName string + // csiClient encapsulates all csi-plugin methods type csiDriverClient struct { - driverName string - nodeClientCreator nodeClientCreator + driverName csiDriverName + addr csiAddr + nodeV1ClientCreator nodeV1ClientCreator + nodeV0ClientCreator nodeV0ClientCreator } var _ csiClient = &csiDriverClient{} -type nodeClientCreator func(driverName string) ( - nodeClient csipb.NodeClient, +type nodeV1ClientCreator func(addr csiAddr) ( + nodeClient csipbv1.NodeClient, closer io.Closer, err error, ) -// newNodeClient creates a new NodeClient with the internally used gRPC +type nodeV0ClientCreator func(addr csiAddr) ( + nodeClient csipbv0.NodeClient, + closer io.Closer, + err error, +) + +// newV1NodeClient creates a new NodeClient with the internally used gRPC // connection set up. It also returns a closer which must to be called to close // the gRPC connection when the NodeClient is not used anymore. -// This is the default implementation for the nodeClientCreator, used in +// This is the default implementation for the nodeV1ClientCreator, used in // newCsiDriverClient. -func newNodeClient(driverName string) (nodeClient csipb.NodeClient, closer io.Closer, err error) { +func newV1NodeClient(addr csiAddr) (nodeClient csipbv1.NodeClient, closer io.Closer, err error) { var conn *grpc.ClientConn - conn, err = newGrpcConn(driverName) + conn, err = newGrpcConn(addr) if err != nil { return nil, nil, err } - nodeClient = csipb.NewNodeClient(conn) + nodeClient = csipbv1.NewNodeClient(conn) return nodeClient, conn, nil } -func newCsiDriverClient(driverName string) *csiDriverClient { - c := &csiDriverClient{ - driverName: driverName, - nodeClientCreator: newNodeClient, +// newV0NodeClient creates a new NodeClient with the internally used gRPC +// connection set up. It also returns a closer which must to be called to close +// the gRPC connection when the NodeClient is not used anymore. +// This is the default implementation for the nodeV1ClientCreator, used in +// newCsiDriverClient. +func newV0NodeClient(addr csiAddr) (nodeClient csipbv0.NodeClient, closer io.Closer, err error) { + var conn *grpc.ClientConn + conn, err = newGrpcConn(addr) + if err != nil { + return nil, nil, err } - return c + + nodeClient = csipbv0.NewNodeClient(conn) + return nodeClient, conn, nil +} + +func newCsiDriverClient(driverName csiDriverName) (*csiDriverClient, error) { + if driverName == "" { + return nil, fmt.Errorf("driver name is empty") + } + + addr := fmt.Sprintf(csiAddrTemplate, driverName) + requiresV0Client := true + if utilfeature.DefaultFeatureGate.Enabled(features.KubeletPluginsWatcher) { + var existingDriver csiDriver + driverExists := false + func() { + csiDrivers.RLock() + defer csiDrivers.RUnlock() + existingDriver, driverExists = csiDrivers.driversMap[string(driverName)] + }() + + if !driverExists { + return nil, fmt.Errorf("driver name %s not found in the list of registered CSI drivers", driverName) + } + + addr = existingDriver.driverEndpoint + requiresV0Client = versionRequiresV0Client(existingDriver.highestSupportedVersion) + } + + nodeV1ClientCreator := newV1NodeClient + nodeV0ClientCreator := newV0NodeClient + if requiresV0Client { + nodeV1ClientCreator = nil + } else { + nodeV0ClientCreator = nil + } + + return &csiDriverClient{ + driverName: driverName, + addr: csiAddr(addr), + nodeV1ClientCreator: nodeV1ClientCreator, + nodeV0ClientCreator: nodeV0ClientCreator, + }, nil } func (c *csiDriverClient) NodeGetInfo(ctx context.Context) ( nodeID string, maxVolumePerNode int64, - accessibleTopology *csipb.Topology, + accessibleTopology map[string]string, err error) { klog.V(4).Info(log("calling NodeGetInfo rpc")) + if c.nodeV1ClientCreator != nil { + return c.nodeGetInfoV1(ctx) + } else if c.nodeV0ClientCreator != nil { + return c.nodeGetInfoV0(ctx) + } - nodeClient, closer, err := c.nodeClientCreator(c.driverName) + err = fmt.Errorf("failed to call NodeGetInfo. Both nodeV1ClientCreator and nodeV0ClientCreator are nil") + + return nodeID, maxVolumePerNode, accessibleTopology, err +} + +func (c *csiDriverClient) nodeGetInfoV1(ctx context.Context) ( + nodeID string, + maxVolumePerNode int64, + accessibleTopology map[string]string, + err error) { + + nodeClient, closer, err := c.nodeV1ClientCreator(c.addr) if err != nil { return "", 0, nil, err } defer closer.Close() - res, err := nodeClient.NodeGetInfo(ctx, &csipb.NodeGetInfoRequest{}) + res, err := nodeClient.NodeGetInfo(ctx, &csipbv1.NodeGetInfoRequest{}) if err != nil { return "", 0, nil, err } - return res.GetNodeId(), res.GetMaxVolumesPerNode(), res.GetAccessibleTopology(), nil + topology := res.GetAccessibleTopology() + if topology != nil { + accessibleTopology = topology.Segments + } + return res.GetNodeId(), res.GetMaxVolumesPerNode(), accessibleTopology, nil +} + +func (c *csiDriverClient) nodeGetInfoV0(ctx context.Context) ( + nodeID string, + maxVolumePerNode int64, + accessibleTopology map[string]string, + err error) { + + nodeClient, closer, err := c.nodeV0ClientCreator(c.addr) + if err != nil { + return "", 0, nil, err + } + defer closer.Close() + + res, err := nodeClient.NodeGetInfo(ctx, &csipbv0.NodeGetInfoRequest{}) + if err != nil { + return "", 0, nil, err + } + + topology := res.GetAccessibleTopology() + if topology != nil { + accessibleTopology = topology.Segments + } + return res.GetNodeId(), res.GetMaxVolumesPerNode(), accessibleTopology, nil } func (c *csiDriverClient) NodePublishVolume( @@ -135,7 +243,7 @@ func (c *csiDriverClient) NodePublishVolume( stagingTargetPath string, targetPath string, accessMode api.PersistentVolumeAccessMode, - volumeInfo map[string]string, + publishContext map[string]string, volumeContext map[string]string, secrets map[string]string, fsType string, @@ -148,23 +256,69 @@ func (c *csiDriverClient) NodePublishVolume( if targetPath == "" { return errors.New("missing target path") } + if c.nodeV1ClientCreator != nil { + return c.nodePublishVolumeV1( + ctx, + volID, + readOnly, + stagingTargetPath, + targetPath, + accessMode, + publishContext, + volumeContext, + secrets, + fsType, + mountOptions, + ) + } else if c.nodeV0ClientCreator != nil { + return c.nodePublishVolumeV0( + ctx, + volID, + readOnly, + stagingTargetPath, + targetPath, + accessMode, + publishContext, + volumeContext, + secrets, + fsType, + mountOptions, + ) + } - nodeClient, closer, err := c.nodeClientCreator(c.driverName) + return fmt.Errorf("failed to call NodePublishVolume. Both nodeV1ClientCreator and nodeV0ClientCreator are nil") + +} + +func (c *csiDriverClient) nodePublishVolumeV1( + ctx context.Context, + volID string, + readOnly bool, + stagingTargetPath string, + targetPath string, + accessMode api.PersistentVolumeAccessMode, + publishContext map[string]string, + volumeContext map[string]string, + secrets map[string]string, + fsType string, + mountOptions []string, +) error { + nodeClient, closer, err := c.nodeV1ClientCreator(c.addr) if err != nil { return err } defer closer.Close() - req := &csipb.NodePublishVolumeRequest{ + req := &csipbv1.NodePublishVolumeRequest{ VolumeId: volID, TargetPath: targetPath, Readonly: readOnly, - PublishContext: volumeInfo, + PublishContext: publishContext, VolumeContext: volumeContext, Secrets: secrets, - VolumeCapability: &csipb.VolumeCapability{ - AccessMode: &csipb.VolumeCapability_AccessMode{ - Mode: asCSIAccessMode(accessMode), + VolumeCapability: &csipbv1.VolumeCapability{ + AccessMode: &csipbv1.VolumeCapability_AccessMode{ + Mode: asCSIAccessModeV1(accessMode), }, }, } @@ -173,12 +327,65 @@ func (c *csiDriverClient) NodePublishVolume( } if fsType == fsTypeBlockName { - req.VolumeCapability.AccessType = &csipb.VolumeCapability_Block{ - Block: &csipb.VolumeCapability_BlockVolume{}, + req.VolumeCapability.AccessType = &csipbv1.VolumeCapability_Block{ + Block: &csipbv1.VolumeCapability_BlockVolume{}, } } else { - req.VolumeCapability.AccessType = &csipb.VolumeCapability_Mount{ - Mount: &csipb.VolumeCapability_MountVolume{ + req.VolumeCapability.AccessType = &csipbv1.VolumeCapability_Mount{ + Mount: &csipbv1.VolumeCapability_MountVolume{ + FsType: fsType, + MountFlags: mountOptions, + }, + } + } + + _, err = nodeClient.NodePublishVolume(ctx, req) + return err +} + +func (c *csiDriverClient) nodePublishVolumeV0( + ctx context.Context, + volID string, + readOnly bool, + stagingTargetPath string, + targetPath string, + accessMode api.PersistentVolumeAccessMode, + publishContext map[string]string, + volumeContext map[string]string, + secrets map[string]string, + fsType string, + mountOptions []string, +) error { + nodeClient, closer, err := c.nodeV0ClientCreator(c.addr) + if err != nil { + return err + } + defer closer.Close() + + req := &csipbv0.NodePublishVolumeRequest{ + VolumeId: volID, + TargetPath: targetPath, + Readonly: readOnly, + PublishInfo: publishContext, + VolumeAttributes: volumeContext, + NodePublishSecrets: secrets, + VolumeCapability: &csipbv0.VolumeCapability{ + AccessMode: &csipbv0.VolumeCapability_AccessMode{ + Mode: asCSIAccessModeV0(accessMode), + }, + }, + } + if stagingTargetPath != "" { + req.StagingTargetPath = stagingTargetPath + } + + if fsType == fsTypeBlockName { + req.VolumeCapability.AccessType = &csipbv0.VolumeCapability_Block{ + Block: &csipbv0.VolumeCapability_BlockVolume{}, + } + } else { + req.VolumeCapability.AccessType = &csipbv0.VolumeCapability_Mount{ + Mount: &csipbv0.VolumeCapability_MountVolume{ FsType: fsType, MountFlags: mountOptions, }, @@ -198,13 +405,39 @@ func (c *csiDriverClient) NodeUnpublishVolume(ctx context.Context, volID string, return errors.New("missing target path") } - nodeClient, closer, err := c.nodeClientCreator(c.driverName) + if c.nodeV1ClientCreator != nil { + return c.nodeUnpublishVolumeV1(ctx, volID, targetPath) + } else if c.nodeV0ClientCreator != nil { + return c.nodeUnpublishVolumeV0(ctx, volID, targetPath) + } + + return fmt.Errorf("failed to call NodeUnpublishVolume. Both nodeV1ClientCreator and nodeV0ClientCreator are nil") +} + +func (c *csiDriverClient) nodeUnpublishVolumeV1(ctx context.Context, volID string, targetPath string) error { + nodeClient, closer, err := c.nodeV1ClientCreator(c.addr) if err != nil { return err } defer closer.Close() - req := &csipb.NodeUnpublishVolumeRequest{ + req := &csipbv1.NodeUnpublishVolumeRequest{ + VolumeId: volID, + TargetPath: targetPath, + } + + _, err = nodeClient.NodeUnpublishVolume(ctx, req) + return err +} + +func (c *csiDriverClient) nodeUnpublishVolumeV0(ctx context.Context, volID string, targetPath string) error { + nodeClient, closer, err := c.nodeV0ClientCreator(c.addr) + if err != nil { + return err + } + defer closer.Close() + + req := &csipbv0.NodeUnpublishVolumeRequest{ VolumeId: volID, TargetPath: targetPath, } @@ -230,19 +463,38 @@ func (c *csiDriverClient) NodeStageVolume(ctx context.Context, return errors.New("missing staging target path") } - nodeClient, closer, err := c.nodeClientCreator(c.driverName) + if c.nodeV1ClientCreator != nil { + return c.nodeStageVolumeV1(ctx, volID, publishContext, stagingTargetPath, fsType, accessMode, secrets, volumeContext) + } else if c.nodeV0ClientCreator != nil { + return c.nodeStageVolumeV0(ctx, volID, publishContext, stagingTargetPath, fsType, accessMode, secrets, volumeContext) + } + + return fmt.Errorf("failed to call NodeStageVolume. Both nodeV1ClientCreator and nodeV0ClientCreator are nil") +} + +func (c *csiDriverClient) nodeStageVolumeV1( + ctx context.Context, + volID string, + publishContext map[string]string, + stagingTargetPath string, + fsType string, + accessMode api.PersistentVolumeAccessMode, + secrets map[string]string, + volumeContext map[string]string, +) error { + nodeClient, closer, err := c.nodeV1ClientCreator(c.addr) if err != nil { return err } defer closer.Close() - req := &csipb.NodeStageVolumeRequest{ + req := &csipbv1.NodeStageVolumeRequest{ VolumeId: volID, PublishContext: publishContext, StagingTargetPath: stagingTargetPath, - VolumeCapability: &csipb.VolumeCapability{ - AccessMode: &csipb.VolumeCapability_AccessMode{ - Mode: asCSIAccessMode(accessMode), + VolumeCapability: &csipbv1.VolumeCapability{ + AccessMode: &csipbv1.VolumeCapability_AccessMode{ + Mode: asCSIAccessModeV1(accessMode), }, }, Secrets: secrets, @@ -250,12 +502,57 @@ func (c *csiDriverClient) NodeStageVolume(ctx context.Context, } if fsType == fsTypeBlockName { - req.VolumeCapability.AccessType = &csipb.VolumeCapability_Block{ - Block: &csipb.VolumeCapability_BlockVolume{}, + req.VolumeCapability.AccessType = &csipbv1.VolumeCapability_Block{ + Block: &csipbv1.VolumeCapability_BlockVolume{}, } } else { - req.VolumeCapability.AccessType = &csipb.VolumeCapability_Mount{ - Mount: &csipb.VolumeCapability_MountVolume{ + req.VolumeCapability.AccessType = &csipbv1.VolumeCapability_Mount{ + Mount: &csipbv1.VolumeCapability_MountVolume{ + FsType: fsType, + }, + } + } + + _, err = nodeClient.NodeStageVolume(ctx, req) + return err +} + +func (c *csiDriverClient) nodeStageVolumeV0( + ctx context.Context, + volID string, + publishContext map[string]string, + stagingTargetPath string, + fsType string, + accessMode api.PersistentVolumeAccessMode, + secrets map[string]string, + volumeContext map[string]string, +) error { + nodeClient, closer, err := c.nodeV0ClientCreator(c.addr) + if err != nil { + return err + } + defer closer.Close() + + req := &csipbv0.NodeStageVolumeRequest{ + VolumeId: volID, + PublishInfo: publishContext, + StagingTargetPath: stagingTargetPath, + VolumeCapability: &csipbv0.VolumeCapability{ + AccessMode: &csipbv0.VolumeCapability_AccessMode{ + Mode: asCSIAccessModeV0(accessMode), + }, + }, + NodeStageSecrets: secrets, + VolumeAttributes: volumeContext, + } + + if fsType == fsTypeBlockName { + req.VolumeCapability.AccessType = &csipbv0.VolumeCapability_Block{ + Block: &csipbv0.VolumeCapability_BlockVolume{}, + } + } else { + req.VolumeCapability.AccessType = &csipbv0.VolumeCapability_Mount{ + Mount: &csipbv0.VolumeCapability_MountVolume{ FsType: fsType, }, } @@ -274,13 +571,23 @@ func (c *csiDriverClient) NodeUnstageVolume(ctx context.Context, volID, stagingT return errors.New("missing staging target path") } - nodeClient, closer, err := c.nodeClientCreator(c.driverName) + if c.nodeV1ClientCreator != nil { + return c.nodeUnstageVolumeV1(ctx, volID, stagingTargetPath) + } else if c.nodeV0ClientCreator != nil { + return c.nodeUnstageVolumeV0(ctx, volID, stagingTargetPath) + } + + return fmt.Errorf("failed to call NodeUnstageVolume. Both nodeV1ClientCreator and nodeV0ClientCreator are nil") +} + +func (c *csiDriverClient) nodeUnstageVolumeV1(ctx context.Context, volID, stagingTargetPath string) error { + nodeClient, closer, err := c.nodeV1ClientCreator(c.addr) if err != nil { return err } defer closer.Close() - req := &csipb.NodeUnstageVolumeRequest{ + req := &csipbv1.NodeUnstageVolumeRequest{ VolumeId: volID, StagingTargetPath: stagingTargetPath, } @@ -288,59 +595,128 @@ func (c *csiDriverClient) NodeUnstageVolume(ctx context.Context, volID, stagingT return err } -func (c *csiDriverClient) NodeGetCapabilities(ctx context.Context) ([]*csipb.NodeServiceCapability, error) { - klog.V(4).Info(log("calling NodeGetCapabilities rpc")) - - nodeClient, closer, err := c.nodeClientCreator(c.driverName) +func (c *csiDriverClient) nodeUnstageVolumeV0(ctx context.Context, volID, stagingTargetPath string) error { + nodeClient, closer, err := c.nodeV0ClientCreator(c.addr) if err != nil { - return nil, err + return err } defer closer.Close() - req := &csipb.NodeGetCapabilitiesRequest{} + req := &csipbv0.NodeUnstageVolumeRequest{ + VolumeId: volID, + StagingTargetPath: stagingTargetPath, + } + _, err = nodeClient.NodeUnstageVolume(ctx, req) + return err +} + +func (c *csiDriverClient) NodeSupportsStageUnstage(ctx context.Context) (bool, error) { + klog.V(4).Info(log("calling NodeGetCapabilities rpc to determine if NodeSupportsStageUnstage")) + + if c.nodeV1ClientCreator != nil { + return c.nodeSupportsStageUnstageV1(ctx) + } else if c.nodeV0ClientCreator != nil { + return c.nodeSupportsStageUnstageV0(ctx) + } + + return false, fmt.Errorf("failed to call NodeSupportsStageUnstage. Both nodeV1ClientCreator and nodeV0ClientCreator are nil") +} + +func (c *csiDriverClient) nodeSupportsStageUnstageV1(ctx context.Context) (bool, error) { + nodeClient, closer, err := c.nodeV1ClientCreator(c.addr) + if err != nil { + return false, err + } + defer closer.Close() + + req := &csipbv1.NodeGetCapabilitiesRequest{} resp, err := nodeClient.NodeGetCapabilities(ctx, req) if err != nil { - return nil, err + return false, err } - return resp.GetCapabilities(), nil + + capabilities := resp.GetCapabilities() + + stageUnstageSet := false + if capabilities == nil { + return false, nil + } + for _, capability := range capabilities { + if capability.GetRpc().GetType() == csipbv1.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME { + stageUnstageSet = true + } + } + return stageUnstageSet, nil } -func asCSIAccessMode(am api.PersistentVolumeAccessMode) csipb.VolumeCapability_AccessMode_Mode { +func (c *csiDriverClient) nodeSupportsStageUnstageV0(ctx context.Context) (bool, error) { + nodeClient, closer, err := c.nodeV0ClientCreator(c.addr) + if err != nil { + return false, err + } + defer closer.Close() + + req := &csipbv0.NodeGetCapabilitiesRequest{} + resp, err := nodeClient.NodeGetCapabilities(ctx, req) + if err != nil { + return false, err + } + + capabilities := resp.GetCapabilities() + + stageUnstageSet := false + if capabilities == nil { + return false, nil + } + for _, capability := range capabilities { + if capability.GetRpc().GetType() == csipbv0.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME { + stageUnstageSet = true + } + } + return stageUnstageSet, nil +} + +func asCSIAccessModeV1(am api.PersistentVolumeAccessMode) csipbv1.VolumeCapability_AccessMode_Mode { switch am { case api.ReadWriteOnce: - return csipb.VolumeCapability_AccessMode_SINGLE_NODE_WRITER + return csipbv1.VolumeCapability_AccessMode_SINGLE_NODE_WRITER case api.ReadOnlyMany: - return csipb.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY + return csipbv1.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY case api.ReadWriteMany: - return csipb.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER + return csipbv1.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER } - return csipb.VolumeCapability_AccessMode_UNKNOWN + return csipbv1.VolumeCapability_AccessMode_UNKNOWN } -func newGrpcConn(driverName string) (*grpc.ClientConn, error) { - if driverName == "" { - return nil, fmt.Errorf("driver name is empty") +func asCSIAccessModeV0(am api.PersistentVolumeAccessMode) csipbv0.VolumeCapability_AccessMode_Mode { + switch am { + case api.ReadWriteOnce: + return csipbv0.VolumeCapability_AccessMode_SINGLE_NODE_WRITER + case api.ReadOnlyMany: + return csipbv0.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY + case api.ReadWriteMany: + return csipbv0.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER } - addr := fmt.Sprintf(csiAddrTemplate, driverName) - // TODO once KubeletPluginsWatcher graduates to beta, remove FeatureGate check - if utilfeature.DefaultFeatureGate.Enabled(features.KubeletPluginsWatcher) { - csiDrivers.RLock() - driver, ok := csiDrivers.driversMap[driverName] - csiDrivers.RUnlock() + return csipbv0.VolumeCapability_AccessMode_UNKNOWN +} - if !ok { - return nil, fmt.Errorf("driver name %s not found in the list of registered CSI drivers", driverName) - } - addr = driver.driverEndpoint - } +func newGrpcConn(addr csiAddr) (*grpc.ClientConn, error) { network := "unix" klog.V(4).Infof(log("creating new gRPC connection for [%s://%s]", network, addr)) return grpc.Dial( - addr, + string(addr), grpc.WithInsecure(), grpc.WithDialer(func(target string, timeout time.Duration) (net.Conn, error) { return net.Dial(network, target) }), ) } + +func versionRequiresV0Client(version *utilversion.Version) bool { + if version != nil && version.Major() == 0 { + return true + } + + return false +} diff --git a/pkg/volume/csi/csi_client_test.go b/pkg/volume/csi/csi_client_test.go index 777be98b2de..78d70333277 100644 --- a/pkg/volume/csi/csi_client_test.go +++ b/pkg/volume/csi/csi_client_test.go @@ -23,7 +23,7 @@ import ( "reflect" "testing" - csipb "github.com/container-storage-interface/spec/lib/go/csi" + csipbv1 "github.com/container-storage-interface/spec/lib/go/csi" api "k8s.io/api/core/v1" "k8s.io/kubernetes/pkg/volume/csi/fake" ) @@ -43,10 +43,14 @@ func newFakeCsiDriverClient(t *testing.T, stagingCapable bool) *fakeCsiDriverCli func (c *fakeCsiDriverClient) NodeGetInfo(ctx context.Context) ( nodeID string, maxVolumePerNode int64, - accessibleTopology *csipb.Topology, + accessibleTopology map[string]string, err error) { - resp, err := c.nodeClient.NodeGetInfo(ctx, &csipb.NodeGetInfoRequest{}) - return resp.GetNodeId(), resp.GetMaxVolumesPerNode(), resp.GetAccessibleTopology(), err + resp, err := c.nodeClient.NodeGetInfo(ctx, &csipbv1.NodeGetInfoRequest{}) + topology := resp.GetAccessibleTopology() + if topology != nil { + accessibleTopology = topology.Segments + } + return resp.GetNodeId(), resp.GetMaxVolumesPerNode(), accessibleTopology, err } func (c *fakeCsiDriverClient) NodePublishVolume( @@ -56,26 +60,26 @@ func (c *fakeCsiDriverClient) NodePublishVolume( stagingTargetPath string, targetPath string, accessMode api.PersistentVolumeAccessMode, - volumeInfo map[string]string, + publishContext map[string]string, volumeContext map[string]string, secrets map[string]string, fsType string, mountOptions []string, ) error { c.t.Log("calling fake.NodePublishVolume...") - req := &csipb.NodePublishVolumeRequest{ + req := &csipbv1.NodePublishVolumeRequest{ VolumeId: volID, TargetPath: targetPath, Readonly: readOnly, - PublishContext: volumeInfo, + PublishContext: publishContext, VolumeContext: volumeContext, Secrets: secrets, - VolumeCapability: &csipb.VolumeCapability{ - AccessMode: &csipb.VolumeCapability_AccessMode{ - Mode: asCSIAccessMode(accessMode), + VolumeCapability: &csipbv1.VolumeCapability{ + AccessMode: &csipbv1.VolumeCapability_AccessMode{ + Mode: asCSIAccessModeV1(accessMode), }, - AccessType: &csipb.VolumeCapability_Mount{ - Mount: &csipb.VolumeCapability_MountVolume{ + AccessType: &csipbv1.VolumeCapability_Mount{ + Mount: &csipbv1.VolumeCapability_MountVolume{ FsType: fsType, MountFlags: mountOptions, }, @@ -89,7 +93,7 @@ func (c *fakeCsiDriverClient) NodePublishVolume( func (c *fakeCsiDriverClient) NodeUnpublishVolume(ctx context.Context, volID string, targetPath string) error { c.t.Log("calling fake.NodeUnpublishVolume...") - req := &csipb.NodeUnpublishVolumeRequest{ + req := &csipbv1.NodeUnpublishVolumeRequest{ VolumeId: volID, TargetPath: targetPath, } @@ -108,16 +112,16 @@ func (c *fakeCsiDriverClient) NodeStageVolume(ctx context.Context, volumeContext map[string]string, ) error { c.t.Log("calling fake.NodeStageVolume...") - req := &csipb.NodeStageVolumeRequest{ + req := &csipbv1.NodeStageVolumeRequest{ VolumeId: volID, PublishContext: publishContext, StagingTargetPath: stagingTargetPath, - VolumeCapability: &csipb.VolumeCapability{ - AccessMode: &csipb.VolumeCapability_AccessMode{ - Mode: asCSIAccessMode(accessMode), + VolumeCapability: &csipbv1.VolumeCapability{ + AccessMode: &csipbv1.VolumeCapability_AccessMode{ + Mode: asCSIAccessModeV1(accessMode), }, - AccessType: &csipb.VolumeCapability_Mount{ - Mount: &csipb.VolumeCapability_MountVolume{ + AccessType: &csipbv1.VolumeCapability_Mount{ + Mount: &csipbv1.VolumeCapability_MountVolume{ FsType: fsType, }, }, @@ -132,7 +136,7 @@ func (c *fakeCsiDriverClient) NodeStageVolume(ctx context.Context, func (c *fakeCsiDriverClient) NodeUnstageVolume(ctx context.Context, volID, stagingTargetPath string) error { c.t.Log("calling fake.NodeUnstageVolume...") - req := &csipb.NodeUnstageVolumeRequest{ + req := &csipbv1.NodeUnstageVolumeRequest{ VolumeId: volID, StagingTargetPath: stagingTargetPath, } @@ -140,14 +144,26 @@ func (c *fakeCsiDriverClient) NodeUnstageVolume(ctx context.Context, volID, stag return err } -func (c *fakeCsiDriverClient) NodeGetCapabilities(ctx context.Context) ([]*csipb.NodeServiceCapability, error) { - c.t.Log("calling fake.NodeGetCapabilities...") - req := &csipb.NodeGetCapabilitiesRequest{} +func (c *fakeCsiDriverClient) NodeSupportsStageUnstage(ctx context.Context) (bool, error) { + c.t.Log("calling fake.NodeGetCapabilities for NodeSupportsStageUnstage...") + req := &csipbv1.NodeGetCapabilitiesRequest{} resp, err := c.nodeClient.NodeGetCapabilities(ctx, req) if err != nil { - return nil, err + return false, err } - return resp.GetCapabilities(), nil + + capabilities := resp.GetCapabilities() + + stageUnstageSet := false + if capabilities == nil { + return false, nil + } + for _, capability := range capabilities { + if capability.GetRpc().GetType() == csipbv1.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME { + stageUnstageSet = true + } + } + return stageUnstageSet, nil } func setupClient(t *testing.T, stageUnstageSet bool) csiClient { @@ -173,17 +189,15 @@ func TestClientNodeGetInfo(t *testing.T) { name string expectedNodeID string expectedMaxVolumePerNode int64 - expectedAccessibleTopology *csipb.Topology + expectedAccessibleTopology map[string]string mustFail bool err error }{ { - name: "test ok", - expectedNodeID: "node1", - expectedMaxVolumePerNode: 16, - expectedAccessibleTopology: &csipb.Topology{ - Segments: map[string]string{"com.example.csi-topology/zone": "zone1"}, - }, + name: "test ok", + expectedNodeID: "node1", + expectedMaxVolumePerNode: 16, + expectedAccessibleTopology: map[string]string{"com.example.csi-topology/zone": "zone1"}, }, { name: "grpc error", @@ -198,13 +212,15 @@ func TestClientNodeGetInfo(t *testing.T) { fakeCloser := fake.NewCloser(t) client := &csiDriverClient{ driverName: "Fake Driver Name", - nodeClientCreator: func(driverName string) (csipb.NodeClient, io.Closer, error) { + nodeV1ClientCreator: func(addr csiAddr) (csipbv1.NodeClient, io.Closer, error) { nodeClient := fake.NewNodeClient(false /* stagingCapable */) nodeClient.SetNextError(tc.err) - nodeClient.SetNodeGetInfoResp(&csipb.NodeGetInfoResponse{ - NodeId: tc.expectedNodeID, - MaxVolumesPerNode: tc.expectedMaxVolumePerNode, - AccessibleTopology: tc.expectedAccessibleTopology, + nodeClient.SetNodeGetInfoResp(&csipbv1.NodeGetInfoResponse{ + NodeId: tc.expectedNodeID, + MaxVolumesPerNode: tc.expectedMaxVolumePerNode, + AccessibleTopology: &csipbv1.Topology{ + Segments: tc.expectedAccessibleTopology, + }, }) return nodeClient, fakeCloser, nil }, @@ -222,7 +238,7 @@ func TestClientNodeGetInfo(t *testing.T) { } if !reflect.DeepEqual(accessibleTopology, tc.expectedAccessibleTopology) { - t.Errorf("expected accessibleTopology: %v; got: %v", *tc.expectedAccessibleTopology, *accessibleTopology) + t.Errorf("expected accessibleTopology: %v; got: %v", tc.expectedAccessibleTopology, accessibleTopology) } if !tc.mustFail { @@ -252,7 +268,7 @@ func TestClientNodePublishVolume(t *testing.T) { fakeCloser := fake.NewCloser(t) client := &csiDriverClient{ driverName: "Fake Driver Name", - nodeClientCreator: func(driverName string) (csipb.NodeClient, io.Closer, error) { + nodeV1ClientCreator: func(addr csiAddr) (csipbv1.NodeClient, io.Closer, error) { nodeClient := fake.NewNodeClient(false /* stagingCapable */) nodeClient.SetNextError(tc.err) return nodeClient, fakeCloser, nil @@ -299,7 +315,7 @@ func TestClientNodeUnpublishVolume(t *testing.T) { fakeCloser := fake.NewCloser(t) client := &csiDriverClient{ driverName: "Fake Driver Name", - nodeClientCreator: func(driverName string) (csipb.NodeClient, io.Closer, error) { + nodeV1ClientCreator: func(addr csiAddr) (csipbv1.NodeClient, io.Closer, error) { nodeClient := fake.NewNodeClient(false /* stagingCapable */) nodeClient.SetNextError(tc.err) return nodeClient, fakeCloser, nil @@ -337,7 +353,7 @@ func TestClientNodeStageVolume(t *testing.T) { fakeCloser := fake.NewCloser(t) client := &csiDriverClient{ driverName: "Fake Driver Name", - nodeClientCreator: func(driverName string) (csipb.NodeClient, io.Closer, error) { + nodeV1ClientCreator: func(addr csiAddr) (csipbv1.NodeClient, io.Closer, error) { nodeClient := fake.NewNodeClient(false /* stagingCapable */) nodeClient.SetNextError(tc.err) return nodeClient, fakeCloser, nil @@ -381,7 +397,7 @@ func TestClientNodeUnstageVolume(t *testing.T) { fakeCloser := fake.NewCloser(t) client := &csiDriverClient{ driverName: "Fake Driver Name", - nodeClientCreator: func(driverName string) (csipb.NodeClient, io.Closer, error) { + nodeV1ClientCreator: func(addr csiAddr) (csipbv1.NodeClient, io.Closer, error) { nodeClient := fake.NewNodeClient(false /* stagingCapable */) nodeClient.SetNextError(tc.err) return nodeClient, fakeCloser, nil diff --git a/pkg/volume/csi/csi_mounter.go b/pkg/volume/csi/csi_mounter.go index b0884112774..e5c02fad49c 100644 --- a/pkg/volume/csi/csi_mounter.go +++ b/pkg/volume/csi/csi_mounter.go @@ -55,18 +55,18 @@ var ( ) type csiMountMgr struct { - csiClient csiClient - k8s kubernetes.Interface - plugin *csiPlugin - driverName string - volumeID string - specVolumeID string - readOnly bool - spec *volume.Spec - pod *api.Pod - podUID types.UID - options volume.VolumeOptions - volumeInfo map[string]string + csiClient csiClient + k8s kubernetes.Interface + plugin *csiPlugin + driverName csiDriverName + volumeID string + specVolumeID string + readOnly bool + spec *volume.Spec + pod *api.Pod + podUID types.UID + options volume.VolumeOptions + publishContext map[string]string volume.MetricsNil } @@ -121,7 +121,7 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error { // Check for STAGE_UNSTAGE_VOLUME set and populate deviceMountPath if so deviceMountPath := "" - stageUnstageSet, err := hasStageUnstageCapability(ctx, csi) + stageUnstageSet, err := csi.NodeSupportsStageUnstage(ctx) if err != nil { klog.Error(log("mounter.SetUpAt failed to check for STAGE_UNSTAGE_VOLUME capabilty: %v", err)) return err @@ -135,9 +135,9 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error { } } // search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName - if c.volumeInfo == nil { + if c.publishContext == nil { nodeName := string(c.plugin.host.GetNodeName()) - c.volumeInfo, err = c.plugin.getPublishVolumeInfo(c.k8s, c.volumeID, c.driverName, nodeName) + c.publishContext, err = c.plugin.getPublishContext(c.k8s, c.volumeID, string(c.driverName), nodeName) if err != nil { return err } @@ -191,7 +191,7 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error { deviceMountPath, dir, accessMode, - c.volumeInfo, + c.publishContext, attribs, nodePublishSecrets, fsType, @@ -239,7 +239,7 @@ func (c *csiMountMgr) podAttributes() (map[string]string, error) { return nil, errors.New("CSIDriver lister does not exist") } - csiDriver, err := c.plugin.csiDriverLister.Get(c.driverName) + csiDriver, err := c.plugin.csiDriverLister.Get(string(c.driverName)) if err != nil { if apierrs.IsNotFound(err) { klog.V(4).Infof(log("CSIDriver %q not found, not adding pod information", c.driverName)) diff --git a/pkg/volume/csi/csi_mounter_test.go b/pkg/volume/csi/csi_mounter_test.go index 1e86a210998..4a5d55f619c 100644 --- a/pkg/volume/csi/csi_mounter_test.go +++ b/pkg/volume/csi/csi_mounter_test.go @@ -75,6 +75,7 @@ func TestMounterGetPath(t *testing.T) { } for _, tc := range testCases { t.Logf("test case: %s", tc.name) + registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t) pv := makeTestPV(tc.specVolumeName, 10, testDriver, testVol) spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly) mounter, err := plug.NewMounter( @@ -161,6 +162,7 @@ func MounterSetUpTests(t *testing.T, podInfoEnabled bool) { }) } + registerFakePlugin(test.driver, "endpoint", []string{"1.0.0"}, t) pv := makeTestPV("test-pv", 10, test.driver, testVol) pv.Spec.CSI.VolumeAttributes = test.volumeContext pv.Spec.MountOptions = []string{"foo=bar", "baz=qux"} @@ -187,7 +189,7 @@ func MounterSetUpTests(t *testing.T, podInfoEnabled bool) { csiMounter := mounter.(*csiMountMgr) csiMounter.csiClient = setupClient(t, true) - attachID := getAttachmentName(csiMounter.volumeID, csiMounter.driverName, string(plug.host.GetNodeName())) + attachID := getAttachmentName(csiMounter.volumeID, string(csiMounter.driverName), string(plug.host.GetNodeName())) attachment := &storage.VolumeAttachment{ ObjectMeta: meta.ObjectMeta{ @@ -331,6 +333,7 @@ func TestMounterSetUpWithFSGroup(t *testing.T) { t.Logf("Running test %s", tc.name) volName := fmt.Sprintf("test-vol-%d", i) + registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t) pv := makeTestPV("test-pv", 10, testDriver, volName) pv.Spec.AccessModes = tc.accessModes pvName := pv.GetName() @@ -357,7 +360,7 @@ func TestMounterSetUpWithFSGroup(t *testing.T) { csiMounter := mounter.(*csiMountMgr) csiMounter.csiClient = setupClient(t, true) - attachID := getAttachmentName(csiMounter.volumeID, csiMounter.driverName, string(plug.host.GetNodeName())) + attachID := getAttachmentName(csiMounter.volumeID, string(csiMounter.driverName), string(plug.host.GetNodeName())) attachment := makeTestAttachment(attachID, "test-node", pvName) _, err = csiMounter.k8s.StorageV1beta1().VolumeAttachments().Create(attachment) @@ -392,6 +395,7 @@ func TestMounterSetUpWithFSGroup(t *testing.T) { func TestUnmounterTeardown(t *testing.T) { plug, tmpDir := newTestPlugin(t, nil, nil) defer os.RemoveAll(tmpDir) + registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t) pv := makeTestPV("test-pv", 10, testDriver, testVol) // save the data file prior to unmount diff --git a/pkg/volume/csi/csi_plugin.go b/pkg/volume/csi/csi_plugin.go index 271ca573aed..35f20dc6910 100644 --- a/pkg/volume/csi/csi_plugin.go +++ b/pkg/volume/csi/csi_plugin.go @@ -21,6 +21,7 @@ import ( "fmt" "os" "path" + "sort" "strings" "sync" "time" @@ -33,6 +34,7 @@ import ( apierrs "k8s.io/apimachinery/pkg/api/errors" meta "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + utilversion "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" clientset "k8s.io/client-go/kubernetes" @@ -61,6 +63,8 @@ const ( csiResyncPeriod = time.Minute ) +var deprecatedSocketDirVersions = []string{"0.1.0", "0.2.0", "0.3.0", "0.4.0"} + type csiPlugin struct { host volume.VolumeHost blockEnabled bool @@ -81,8 +85,9 @@ func ProbeVolumePlugins() []volume.VolumePlugin { var _ volume.VolumePlugin = &csiPlugin{} type csiDriver struct { - driverName string - driverEndpoint string + driverName string + driverEndpoint string + highestSupportedVersion *utilversion.Version } type csiDriversStore struct { @@ -107,17 +112,35 @@ var PluginHandler = &RegistrationHandler{} // ValidatePlugin is called by kubelet's plugin watcher upon detection // of a new registration socket opened by CSI Driver registrar side car. -func (h *RegistrationHandler) ValidatePlugin(pluginName string, endpoint string, versions []string) error { - klog.Infof(log("Trying to register a new plugin with name: %s endpoint: %s versions: %s", - pluginName, endpoint, strings.Join(versions, ","))) +func (h *RegistrationHandler) ValidatePlugin(pluginName string, endpoint string, versions []string, foundInDeprecatedDir bool) error { + klog.Infof(log("Trying to validate a new CSI Driver with name: %s endpoint: %s versions: %s, foundInDeprecatedDir: %v", + pluginName, endpoint, strings.Join(versions, ","), foundInDeprecatedDir)) - return nil + if foundInDeprecatedDir { + // CSI 0.x drivers used /var/lib/kubelet/plugins as the socket dir. + // This was deprecated as the socket dir for kubelet drivers, in lieu of a dedicated dir /var/lib/kubelet/plugins_registry + // The deprecated dir will only be allowed for a whitelisted set of old versions. + // CSI 1.x drivers should use the /var/lib/kubelet/plugins_registry + if !isDeprecatedSocketDirAllowed(versions) { + err := fmt.Errorf("socket for CSI driver %q versions %v was found in a deprecated dir. Drivers implementing CSI 1.x+ must use the new dir", pluginName, versions) + klog.Error(err) + return err + } + } + + _, err := h.validateVersions("ValidatePlugin", pluginName, endpoint, versions) + return err } // RegisterPlugin is called when a plugin can be registered -func (h *RegistrationHandler) RegisterPlugin(pluginName string, endpoint string) error { +func (h *RegistrationHandler) RegisterPlugin(pluginName string, endpoint string, versions []string) error { klog.Infof(log("Register new plugin with name: %s at endpoint: %s", pluginName, endpoint)) + highestSupportedVersion, err := h.validateVersions("RegisterPlugin", pluginName, endpoint, versions) + if err != nil { + return err + } + func() { // Storing endpoint of newly registered CSI driver into the map, where CSI driver name will be the key // all other CSI components will be able to get the actual socket of CSI drivers by its name. @@ -127,11 +150,15 @@ func (h *RegistrationHandler) RegisterPlugin(pluginName string, endpoint string) // updated in the rest of the function. csiDrivers.Lock() defer csiDrivers.Unlock() - csiDrivers.driversMap[pluginName] = csiDriver{driverName: pluginName, driverEndpoint: endpoint} + csiDrivers.driversMap[pluginName] = csiDriver{driverName: pluginName, driverEndpoint: endpoint, highestSupportedVersion: highestSupportedVersion} }() // Get node info from the driver. - csi := newCsiDriverClient(pluginName) + csi, err := newCsiDriverClient(csiDriverName(pluginName)) + if err != nil { + return err + } + // TODO (verult) retry with exponential backoff, possibly added in csi client library. ctx, cancel := context.WithTimeout(context.Background(), csiTimeout) defer cancel() @@ -159,6 +186,41 @@ func (h *RegistrationHandler) RegisterPlugin(pluginName string, endpoint string) return nil } +func (h *RegistrationHandler) validateVersions(callerName, pluginName string, endpoint string, versions []string) (*utilversion.Version, error) { + if len(versions) == 0 { + err := fmt.Errorf("%s for CSI driver %q failed. Plugin returned an empty list for supported versions", callerName, pluginName) + klog.Error(err) + return nil, err + } + + // Validate version + newDriverHighestVersion, err := highestSupportedVersion(versions) + if err != nil { + err := fmt.Errorf("%s for CSI driver %q failed. None of the versions specified %q are supported. err=%v", callerName, pluginName, versions, err) + klog.Error(err) + return nil, err + } + + // Check for existing drivers with the same name + var existingDriver csiDriver + driverExists := false + func() { + csiDrivers.RLock() + defer csiDrivers.RUnlock() + existingDriver, driverExists = csiDrivers.driversMap[pluginName] + }() + + if driverExists { + if !existingDriver.highestSupportedVersion.LessThan(newDriverHighestVersion) { + err := fmt.Errorf("%s for CSI driver %q failed. Another driver with the same name is already registered with a higher supported version: %q", callerName, pluginName, existingDriver.highestSupportedVersion) + klog.Error(err) + return nil, err + } + } + + return newDriverHighestVersion, nil +} + // DeRegisterPlugin is called when a plugin removed its socket, signaling // it is no longer available func (h *RegistrationHandler) DeRegisterPlugin(pluginName string) { @@ -240,7 +302,10 @@ func (p *csiPlugin) NewMounter( return nil, errors.New("failed to get a Kubernetes client") } - csi := newCsiDriverClient(pvSource.Driver) + csi, err := newCsiDriverClient(csiDriverName(pvSource.Driver)) + if err != nil { + return nil, err + } mounter := &csiMountMgr{ plugin: p, @@ -248,7 +313,7 @@ func (p *csiPlugin) NewMounter( spec: spec, pod: pod, podUID: pod.UID, - driverName: pvSource.Driver, + driverName: csiDriverName(pvSource.Driver), volumeID: pvSource.VolumeHandle, specVolumeID: spec.Name(), csiClient: csi, @@ -307,9 +372,12 @@ func (p *csiPlugin) NewUnmounter(specName string, podUID types.UID) (volume.Unmo klog.Error(log("unmounter failed to load volume data file [%s]: %v", dir, err)) return nil, err } - unmounter.driverName = data[volDataKey.driverName] + unmounter.driverName = csiDriverName(data[volDataKey.driverName]) unmounter.volumeID = data[volDataKey.volHandle] - unmounter.csiClient = newCsiDriverClient(unmounter.driverName) + unmounter.csiClient, err = newCsiDriverClient(unmounter.driverName) + if err != nil { + return nil, err + } return unmounter, nil } @@ -421,7 +489,10 @@ func (p *csiPlugin) NewBlockVolumeMapper(spec *volume.Spec, podRef *api.Pod, opt } klog.V(4).Info(log("setting up block mapper for [volume=%v,driver=%v]", pvSource.VolumeHandle, pvSource.Driver)) - client := newCsiDriverClient(pvSource.Driver) + client, err := newCsiDriverClient(csiDriverName(pvSource.Driver)) + if err != nil { + return nil, err + } k8s := p.host.GetKubeClient() if k8s == nil { @@ -434,7 +505,7 @@ func (p *csiPlugin) NewBlockVolumeMapper(spec *volume.Spec, podRef *api.Pod, opt k8s: k8s, plugin: p, volumeID: pvSource.VolumeHandle, - driverName: pvSource.Driver, + driverName: csiDriverName(pvSource.Driver), readOnly: readOnly, spec: spec, specName: spec.Name(), @@ -492,9 +563,12 @@ func (p *csiPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (vo klog.Error(log("unmapper failed to load volume data file [%s]: %v", dataDir, err)) return nil, err } - unmapper.driverName = data[volDataKey.driverName] + unmapper.driverName = csiDriverName(data[volDataKey.driverName]) unmapper.volumeID = data[volDataKey.volHandle] - unmapper.csiClient = newCsiDriverClient(unmapper.driverName) + unmapper.csiClient, err = newCsiDriverClient(unmapper.driverName) + if err != nil { + return nil, err + } return unmapper, nil } @@ -555,7 +629,7 @@ func (p *csiPlugin) skipAttach(driver string) (bool, error) { return false, nil } -func (p *csiPlugin) getPublishVolumeInfo(client clientset.Interface, handle, driver, nodeName string) (map[string]string, error) { +func (p *csiPlugin) getPublishContext(client clientset.Interface, handle, driver, nodeName string) (map[string]string, error) { skip, err := p.skipAttach(driver) if err != nil { return nil, err @@ -593,3 +667,69 @@ func unregisterDriver(driverName string) error { return nil } + +// Return the highest supported version +func highestSupportedVersion(versions []string) (*utilversion.Version, error) { + if len(versions) == 0 { + return nil, fmt.Errorf("CSI driver reporting empty array for supported versions") + } + + // Sort by lowest to highest version + sort.Slice(versions, func(i, j int) bool { + parsedVersionI, err := utilversion.ParseGeneric(versions[i]) + if err != nil { + // Push bad values to the bottom + return true + } + + parsedVersionJ, err := utilversion.ParseGeneric(versions[j]) + if err != nil { + // Push bad values to the bottom + return false + } + + return parsedVersionI.LessThan(parsedVersionJ) + }) + + for i := len(versions) - 1; i >= 0; i-- { + highestSupportedVersion, err := utilversion.ParseGeneric(versions[i]) + if err != nil { + return nil, err + } + + if highestSupportedVersion.Major() <= 1 { + return highestSupportedVersion, nil + } + } + + return nil, fmt.Errorf("None of the CSI versions reported by this driver are supported") +} + +// Only drivers that implement CSI 0.x are allowed to use deprecated socket dir. +func isDeprecatedSocketDirAllowed(versions []string) bool { + for _, version := range versions { + if isV0Version(version) { + return true + } + } + + return false +} + +func isV0Version(version string) bool { + parsedVersion, err := utilversion.ParseGeneric(version) + if err != nil { + return false + } + + return parsedVersion.Major() == 0 +} + +func isV1Version(version string) bool { + parsedVersion, err := utilversion.ParseGeneric(version) + if err != nil { + return false + } + + return parsedVersion.Major() == 1 +} diff --git a/pkg/volume/csi/csi_plugin_test.go b/pkg/volume/csi/csi_plugin_test.go index c2fbafd95af..ab82fde6455 100644 --- a/pkg/volume/csi/csi_plugin_test.go +++ b/pkg/volume/csi/csi_plugin_test.go @@ -104,6 +104,16 @@ func makeTestPV(name string, sizeGig int, driverName, volID string) *api.Persist } } +func registerFakePlugin(pluginName, endpoint string, versions []string, t *testing.T) { + csiDrivers = csiDriversStore{driversMap: map[string]csiDriver{}} + highestSupportedVersions, err := highestSupportedVersion(versions) + if err != nil { + t.Fatalf("unexpected error parsing versions (%v) for pluginName % q endpoint %q: %#v", versions, pluginName, endpoint, err) + } + + csiDrivers.driversMap[pluginName] = csiDriver{driverName: pluginName, driverEndpoint: endpoint, highestSupportedVersion: highestSupportedVersions} +} + func TestPluginGetPluginName(t *testing.T) { defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)() @@ -133,6 +143,7 @@ func TestPluginGetVolumeName(t *testing.T) { for _, tc := range testCases { t.Logf("testing: %s", tc.name) + registerFakePlugin(tc.driverName, "endpoint", []string{"0.3.0"}, t) pv := makeTestPV("test-pv", 10, tc.driverName, tc.volName) spec := volume.NewSpecFromPersistentVolume(pv, false) name, err := plug.GetVolumeName(spec) @@ -151,6 +162,7 @@ func TestPluginCanSupport(t *testing.T) { plug, tmpDir := newTestPlugin(t, nil, nil) defer os.RemoveAll(tmpDir) + registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t) pv := makeTestPV("test-pv", 10, testDriver, testVol) spec := volume.NewSpecFromPersistentVolume(pv, false) @@ -227,6 +239,7 @@ func TestPluginNewMounter(t *testing.T) { plug, tmpDir := newTestPlugin(t, nil, nil) defer os.RemoveAll(tmpDir) + registerFakePlugin(testDriver, "endpoint", []string{"1.2.0"}, t) pv := makeTestPV("test-pv", 10, testDriver, testVol) mounter, err := plug.NewMounter( volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly), @@ -243,7 +256,7 @@ func TestPluginNewMounter(t *testing.T) { csiMounter := mounter.(*csiMountMgr) // validate mounter fields - if csiMounter.driverName != testDriver { + if string(csiMounter.driverName) != testDriver { t.Error("mounter driver name not set") } if csiMounter.volumeID != testVol { @@ -277,6 +290,7 @@ func TestPluginNewUnmounter(t *testing.T) { plug, tmpDir := newTestPlugin(t, nil, nil) defer os.RemoveAll(tmpDir) + registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t) pv := makeTestPV("test-pv", 10, testDriver, testVol) // save the data file to re-create client @@ -364,6 +378,7 @@ func TestPluginNewBlockMapper(t *testing.T) { plug, tmpDir := newTestPlugin(t, nil, nil) defer os.RemoveAll(tmpDir) + registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t) pv := makeTestPV("test-block-pv", 10, testDriver, testVol) mounter, err := plug.NewBlockVolumeMapper( volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly), @@ -380,7 +395,7 @@ func TestPluginNewBlockMapper(t *testing.T) { csiMapper := mounter.(*csiBlockMapper) // validate mounter fields - if csiMapper.driverName != testDriver { + if string(csiMapper.driverName) != testDriver { t.Error("CSI block mapper missing driver name") } if csiMapper.volumeID != testVol { @@ -411,6 +426,7 @@ func TestPluginNewUnmapper(t *testing.T) { plug, tmpDir := newTestPlugin(t, nil, nil) defer os.RemoveAll(tmpDir) + registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t) pv := makeTestPV("test-pv", 10, testDriver, testVol) // save the data file to re-create client @@ -456,7 +472,7 @@ func TestPluginNewUnmapper(t *testing.T) { } // test loaded vol data - if csiUnmapper.driverName != testDriver { + if string(csiUnmapper.driverName) != testDriver { t.Error("unmapper driverName not set") } if csiUnmapper.volumeID != testVol { @@ -524,3 +540,433 @@ func TestPluginConstructBlockVolumeSpec(t *testing.T) { } } } + +func TestValidatePlugin(t *testing.T) { + testCases := []struct { + pluginName string + endpoint string + versions []string + foundInDeprecatedDir bool + shouldFail bool + }{ + { + pluginName: "test.plugin", + endpoint: "/var/log/kubelet/plugins_registry/myplugin/csi.sock", + versions: []string{"v1.0.0"}, + foundInDeprecatedDir: false, + shouldFail: false, + }, + { + pluginName: "test.plugin", + endpoint: "/var/log/kubelet/plugins_registry/myplugin/csi.sock", + versions: []string{"0.3.0"}, + foundInDeprecatedDir: false, + shouldFail: false, + }, + { + pluginName: "test.plugin", + endpoint: "/var/log/kubelet/plugins_registry/myplugin/csi.sock", + versions: []string{"0.2.0"}, + foundInDeprecatedDir: false, + shouldFail: false, + }, + { + pluginName: "test.plugin", + endpoint: "/var/log/kubelet/plugins/myplugin/csi.sock", + versions: []string{"v1.0.0"}, + foundInDeprecatedDir: true, + shouldFail: true, + }, + { + pluginName: "test.plugin", + endpoint: "/var/log/kubelet/plugins/myplugin/csi.sock", + versions: []string{"v0.3.0"}, + foundInDeprecatedDir: true, + shouldFail: false, + }, + { + pluginName: "test.plugin", + endpoint: "/var/log/kubelet/plugins/myplugin/csi.sock", + versions: []string{"0.2.0"}, + foundInDeprecatedDir: true, + shouldFail: false, + }, + { + pluginName: "test.plugin", + endpoint: "/var/log/kubelet/plugins_registry/myplugin/csi.sock", + versions: []string{"0.2.0", "v0.3.0"}, + foundInDeprecatedDir: false, + shouldFail: false, + }, + { + pluginName: "test.plugin", + endpoint: "/var/log/kubelet/plugins/myplugin/csi.sock", + versions: []string{"0.2.0", "v0.3.0"}, + foundInDeprecatedDir: true, + shouldFail: false, + }, + { + pluginName: "test.plugin", + endpoint: "/var/log/kubelet/plugins_registry/myplugin/csi.sock", + versions: []string{"0.2.0", "v1.0.0"}, + foundInDeprecatedDir: false, + shouldFail: false, + }, + { + pluginName: "test.plugin", + endpoint: "/var/log/kubelet/plugins/myplugin/csi.sock", + versions: []string{"0.2.0", "v1.0.0"}, + foundInDeprecatedDir: true, + shouldFail: false, + }, + { + pluginName: "test.plugin", + endpoint: "/var/log/kubelet/plugins_registry/myplugin/csi.sock", + versions: []string{"0.2.0", "v1.2.3"}, + foundInDeprecatedDir: false, + shouldFail: false, + }, + { + pluginName: "test.plugin", + endpoint: "/var/log/kubelet/plugins/myplugin/csi.sock", + versions: []string{"0.2.0", "v1.2.3"}, + foundInDeprecatedDir: true, + shouldFail: false, + }, + { + pluginName: "test.plugin", + endpoint: "/var/log/kubelet/plugins_registry/myplugin/csi.sock", + versions: []string{"v1.2.3", "v0.3.0"}, + foundInDeprecatedDir: false, + shouldFail: false, + }, + { + pluginName: "test.plugin", + endpoint: "/var/log/kubelet/plugins/myplugin/csi.sock", + versions: []string{"v1.2.3", "v0.3.0"}, + foundInDeprecatedDir: true, + shouldFail: false, + }, + { + pluginName: "test.plugin", + endpoint: "/var/log/kubelet/plugins_registry/myplugin/csi.sock", + versions: []string{"v1.2.3", "v0.3.0", "2.0.1"}, + foundInDeprecatedDir: false, + shouldFail: false, + }, + { + pluginName: "test.plugin", + endpoint: "/var/log/kubelet/plugins/myplugin/csi.sock", + versions: []string{"v1.2.3", "v0.3.0", "2.0.1"}, + foundInDeprecatedDir: true, + shouldFail: false, + }, + { + pluginName: "test.plugin", + endpoint: "/var/log/kubelet/plugins/myplugin/csi.sock", + versions: []string{"v0.3.0", "2.0.1"}, + foundInDeprecatedDir: true, + shouldFail: false, + }, + { + pluginName: "test.plugin", + endpoint: "/var/log/kubelet/plugins_registry/myplugin/csi.sock", + versions: []string{"v1.2.3", "4.9.12", "v0.3.0", "2.0.1"}, + foundInDeprecatedDir: false, + shouldFail: false, + }, + { + pluginName: "test.plugin", + endpoint: "/var/log/kubelet/plugins/myplugin/csi.sock", + versions: []string{"v1.2.3", "4.9.12", "v0.3.0", "2.0.1"}, + foundInDeprecatedDir: true, + shouldFail: false, + }, + { + pluginName: "test.plugin", + endpoint: "/var/log/kubelet/plugins_registry/myplugin/csi.sock", + versions: []string{"v1.2.3", "boo", "v0.3.0", "2.0.1"}, + foundInDeprecatedDir: false, + shouldFail: false, + }, + { + pluginName: "test.plugin", + endpoint: "/var/log/kubelet/plugins/myplugin/csi.sock", + versions: []string{"v1.2.3", "boo", "v0.3.0", "2.0.1"}, + foundInDeprecatedDir: true, + shouldFail: false, + }, + { + pluginName: "test.plugin", + endpoint: "/var/log/kubelet/plugins_registry/myplugin/csi.sock", + versions: []string{"4.9.12", "2.0.1"}, + foundInDeprecatedDir: false, + shouldFail: true, + }, + { + pluginName: "test.plugin", + endpoint: "/var/log/kubelet/plugins/myplugin/csi.sock", + versions: []string{"4.9.12", "2.0.1"}, + foundInDeprecatedDir: true, + shouldFail: true, + }, + { + pluginName: "test.plugin", + endpoint: "/var/log/kubelet/plugins_registry/myplugin/csi.sock", + versions: []string{}, + foundInDeprecatedDir: false, + shouldFail: true, + }, + { + pluginName: "test.plugin", + endpoint: "/var/log/kubelet/plugins/myplugin/csi.sock", + versions: []string{}, + foundInDeprecatedDir: true, + shouldFail: true, + }, + { + pluginName: "test.plugin", + endpoint: "/var/log/kubelet/plugins_registry/myplugin/csi.sock", + versions: []string{"var", "boo", "foo"}, + foundInDeprecatedDir: false, + shouldFail: true, + }, + { + pluginName: "test.plugin", + endpoint: "/var/log/kubelet/plugins/myplugin/csi.sock", + versions: []string{"var", "boo", "foo"}, + foundInDeprecatedDir: true, + shouldFail: true, + }, + } + + for _, tc := range testCases { + // Arrange & Act + err := PluginHandler.ValidatePlugin(tc.pluginName, tc.endpoint, tc.versions, tc.foundInDeprecatedDir) + + // Assert + if tc.shouldFail && err == nil { + t.Fatalf("expecting ValidatePlugin to fail, but got nil error for testcase: %#v", tc) + } + if !tc.shouldFail && err != nil { + t.Fatalf("unexpected error during ValidatePlugin for testcase: %#v\r\n err:%v", tc, err) + } + } +} + +func TestValidatePluginExistingDriver(t *testing.T) { + testCases := []struct { + pluginName1 string + endpoint1 string + versions1 []string + pluginName2 string + endpoint2 string + versions2 []string + foundInDeprecatedDir2 bool + shouldFail bool + }{ + { + pluginName1: "test.plugin", + endpoint1: "/var/log/kubelet/plugins_registry/myplugin/csi.sock", + versions1: []string{"v1.0.0"}, + pluginName2: "test.plugin2", + endpoint2: "/var/log/kubelet/plugins_registry/myplugin/csi.sock", + versions2: []string{"v1.0.0"}, + foundInDeprecatedDir2: false, + shouldFail: false, + }, + { + pluginName1: "test.plugin", + endpoint1: "/var/log/kubelet/plugins_registry/myplugin/csi.sock", + versions1: []string{"v1.0.0"}, + pluginName2: "test.plugin2", + endpoint2: "/var/log/kubelet/plugins/myplugin/csi.sock", + versions2: []string{"v1.0.0"}, + foundInDeprecatedDir2: true, + shouldFail: true, + }, + { + pluginName1: "test.plugin", + endpoint1: "/var/log/kubelet/plugins/myplugin/csi.sock", + versions1: []string{"v1.0.0"}, + pluginName2: "test.plugin", + endpoint2: "/var/log/kubelet/plugins_registry/myplugin/csi.sock", + versions2: []string{"v1.0.0"}, + foundInDeprecatedDir2: false, + shouldFail: true, + }, + { + pluginName1: "test.plugin", + endpoint1: "/var/log/kubelet/plugins_registry/myplugin/csi.sock", + versions1: []string{"v1.0.0"}, + pluginName2: "test.plugin", + endpoint2: "/var/log/kubelet/plugins_registry/myplugin/csi.sock", + versions2: []string{"v1.0.0"}, + foundInDeprecatedDir2: false, + shouldFail: true, + }, + { + pluginName1: "test.plugin", + endpoint1: "/var/log/kubelet/plugins_registry/myplugin/csi.sock", + versions1: []string{"v1.0.0"}, + pluginName2: "test.plugin", + endpoint2: "/var/log/kubelet/plugins/myplugin/csi.sock", + versions2: []string{"v1.0.0"}, + foundInDeprecatedDir2: true, + shouldFail: true, + }, + { + pluginName1: "test.plugin", + endpoint1: "/var/log/kubelet/plugins/myplugin/csi.sock", + versions1: []string{"v0.3.0", "0.2.0"}, + pluginName2: "test.plugin", + endpoint2: "/var/log/kubelet/plugins_registry/myplugin/csi.sock", + versions2: []string{"1.0.0"}, + foundInDeprecatedDir2: false, + shouldFail: false, + }, + { + pluginName1: "test.plugin", + endpoint1: "/var/log/kubelet/plugins/myplugin/csi.sock", + versions1: []string{"v0.3.0", "0.2.0"}, + pluginName2: "test.plugin", + endpoint2: "/var/log/kubelet/plugins/myplugin/csi.sock", + versions2: []string{"1.0.0"}, + foundInDeprecatedDir2: true, + shouldFail: true, + }, + } + + for _, tc := range testCases { + // Arrange & Act + csiDrivers = csiDriversStore{driversMap: map[string]csiDriver{}} + highestSupportedVersions1, err := highestSupportedVersion(tc.versions1) + if err != nil { + t.Fatalf("unexpected error parsing version for testcase: %#v", tc) + } + + csiDrivers.driversMap[tc.pluginName1] = csiDriver{driverName: tc.pluginName1, driverEndpoint: tc.endpoint1, highestSupportedVersion: highestSupportedVersions1} + + // Arrange & Act + err = PluginHandler.ValidatePlugin(tc.pluginName2, tc.endpoint2, tc.versions2, tc.foundInDeprecatedDir2) + + // Assert + if tc.shouldFail && err == nil { + t.Fatalf("expecting ValidatePlugin to fail, but got nil error for testcase: %#v", tc) + } + if !tc.shouldFail && err != nil { + t.Fatalf("unexpected error during ValidatePlugin for testcase: %#v\r\n err:%v", tc, err) + } + } +} + +func TestHighestSupportedVersion(t *testing.T) { + testCases := []struct { + versions []string + expectedHighestSupportedVersion string + shouldFail bool + }{ + { + versions: []string{"v1.0.0"}, + expectedHighestSupportedVersion: "1.0.0", + shouldFail: false, + }, + { + versions: []string{"0.3.0"}, + expectedHighestSupportedVersion: "0.3.0", + shouldFail: false, + }, + { + versions: []string{"0.2.0"}, + expectedHighestSupportedVersion: "0.2.0", + shouldFail: false, + }, + { + versions: []string{"1.0.0"}, + expectedHighestSupportedVersion: "1.0.0", + shouldFail: false, + }, + { + versions: []string{"v0.3.0"}, + expectedHighestSupportedVersion: "0.3.0", + shouldFail: false, + }, + { + versions: []string{"0.2.0"}, + expectedHighestSupportedVersion: "0.2.0", + shouldFail: false, + }, + { + versions: []string{"0.2.0", "v0.3.0"}, + expectedHighestSupportedVersion: "0.3.0", + shouldFail: false, + }, + { + versions: []string{"0.2.0", "v1.0.0"}, + expectedHighestSupportedVersion: "1.0.0", + shouldFail: false, + }, + { + versions: []string{"0.2.0", "v1.2.3"}, + expectedHighestSupportedVersion: "1.2.3", + shouldFail: false, + }, + { + versions: []string{"v1.2.3", "v0.3.0"}, + expectedHighestSupportedVersion: "1.2.3", + shouldFail: false, + }, + { + versions: []string{"v1.2.3", "v0.3.0", "2.0.1"}, + expectedHighestSupportedVersion: "1.2.3", + shouldFail: false, + }, + { + versions: []string{"v1.2.3", "4.9.12", "v0.3.0", "2.0.1"}, + expectedHighestSupportedVersion: "1.2.3", + shouldFail: false, + }, + { + versions: []string{"4.9.12", "2.0.1"}, + expectedHighestSupportedVersion: "", + shouldFail: true, + }, + { + versions: []string{"v1.2.3", "boo", "v0.3.0", "2.0.1"}, + expectedHighestSupportedVersion: "1.2.3", + shouldFail: false, + }, + { + versions: []string{}, + expectedHighestSupportedVersion: "", + shouldFail: true, + }, + { + versions: []string{"var", "boo", "foo"}, + expectedHighestSupportedVersion: "", + shouldFail: true, + }, + } + + for _, tc := range testCases { + // Arrange & Act + actual, err := highestSupportedVersion(tc.versions) + + // Assert + if tc.shouldFail && err == nil { + t.Fatalf("expecting highestSupportedVersion to fail, but got nil error for testcase: %#v", tc) + } + if !tc.shouldFail && err != nil { + t.Fatalf("unexpected error during ValidatePlugin for testcase: %#v\r\n err:%v", tc, err) + } + if tc.expectedHighestSupportedVersion != "" { + result, err := actual.Compare(tc.expectedHighestSupportedVersion) + if err != nil { + t.Fatalf("comparison failed with %v for testcase %#v", err, tc) + } + if result != 0 { + t.Fatalf("expectedHighestSupportedVersion %v, but got %v for tc: %#v", tc.expectedHighestSupportedVersion, actual, tc) + } + } + } +} diff --git a/pkg/volume/csi/csiv0/BUILD b/pkg/volume/csi/csiv0/BUILD new file mode 100644 index 00000000000..6dcb1ab2291 --- /dev/null +++ b/pkg/volume/csi/csiv0/BUILD @@ -0,0 +1,28 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["csi.pb.go"], + importpath = "k8s.io/kubernetes/pkg/volume/csi/csiv0", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/golang/protobuf/proto:go_default_library", + "//vendor/github.com/golang/protobuf/ptypes/wrappers:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/grpc:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/pkg/volume/csi/csiv0/csi.pb.go b/pkg/volume/csi/csiv0/csi.pb.go new file mode 100644 index 00000000000..174badd75a0 --- /dev/null +++ b/pkg/volume/csi/csiv0/csi.pb.go @@ -0,0 +1,5007 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// For backwards compatibility with CSI 0.x we carry a copy of the +// CSI 0.3 client. + +package csiv0 + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import wrappers "github.com/golang/protobuf/ptypes/wrappers" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type PluginCapability_Service_Type int32 + +const ( + PluginCapability_Service_UNKNOWN PluginCapability_Service_Type = 0 + // CONTROLLER_SERVICE indicates that the Plugin provides RPCs for + // the ControllerService. Plugins SHOULD provide this capability. + // In rare cases certain plugins may wish to omit the + // ControllerService entirely from their implementation, but such + // SHOULD NOT be the common case. + // The presence of this capability determines whether the CO will + // attempt to invoke the REQUIRED ControllerService RPCs, as well + // as specific RPCs as indicated by ControllerGetCapabilities. + PluginCapability_Service_CONTROLLER_SERVICE PluginCapability_Service_Type = 1 + // ACCESSIBILITY_CONSTRAINTS indicates that the volumes for this + // plugin may not be equally accessible by all nodes in the + // cluster. The CO MUST use the topology information returned by + // CreateVolumeRequest along with the topology information + // returned by NodeGetInfo to ensure that a given volume is + // accessible from a given node when scheduling workloads. + PluginCapability_Service_ACCESSIBILITY_CONSTRAINTS PluginCapability_Service_Type = 2 +) + +var PluginCapability_Service_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "CONTROLLER_SERVICE", + 2: "ACCESSIBILITY_CONSTRAINTS", +} +var PluginCapability_Service_Type_value = map[string]int32{ + "UNKNOWN": 0, + "CONTROLLER_SERVICE": 1, + "ACCESSIBILITY_CONSTRAINTS": 2, +} + +func (x PluginCapability_Service_Type) String() string { + return proto.EnumName(PluginCapability_Service_Type_name, int32(x)) +} +func (PluginCapability_Service_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{4, 0, 0} +} + +type VolumeCapability_AccessMode_Mode int32 + +const ( + VolumeCapability_AccessMode_UNKNOWN VolumeCapability_AccessMode_Mode = 0 + // Can only be published once as read/write on a single node, at + // any given time. + VolumeCapability_AccessMode_SINGLE_NODE_WRITER VolumeCapability_AccessMode_Mode = 1 + // Can only be published once as readonly on a single node, at + // any given time. + VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY VolumeCapability_AccessMode_Mode = 2 + // Can be published as readonly at multiple nodes simultaneously. + VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY VolumeCapability_AccessMode_Mode = 3 + // Can be published at multiple nodes simultaneously. Only one of + // the node can be used as read/write. The rest will be readonly. + VolumeCapability_AccessMode_MULTI_NODE_SINGLE_WRITER VolumeCapability_AccessMode_Mode = 4 + // Can be published as read/write at multiple nodes + // simultaneously. + VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER VolumeCapability_AccessMode_Mode = 5 +) + +var VolumeCapability_AccessMode_Mode_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SINGLE_NODE_WRITER", + 2: "SINGLE_NODE_READER_ONLY", + 3: "MULTI_NODE_READER_ONLY", + 4: "MULTI_NODE_SINGLE_WRITER", + 5: "MULTI_NODE_MULTI_WRITER", +} +var VolumeCapability_AccessMode_Mode_value = map[string]int32{ + "UNKNOWN": 0, + "SINGLE_NODE_WRITER": 1, + "SINGLE_NODE_READER_ONLY": 2, + "MULTI_NODE_READER_ONLY": 3, + "MULTI_NODE_SINGLE_WRITER": 4, + "MULTI_NODE_MULTI_WRITER": 5, +} + +func (x VolumeCapability_AccessMode_Mode) String() string { + return proto.EnumName(VolumeCapability_AccessMode_Mode_name, int32(x)) +} +func (VolumeCapability_AccessMode_Mode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{10, 2, 0} +} + +type ControllerServiceCapability_RPC_Type int32 + +const ( + ControllerServiceCapability_RPC_UNKNOWN ControllerServiceCapability_RPC_Type = 0 + ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME ControllerServiceCapability_RPC_Type = 1 + ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME ControllerServiceCapability_RPC_Type = 2 + ControllerServiceCapability_RPC_LIST_VOLUMES ControllerServiceCapability_RPC_Type = 3 + ControllerServiceCapability_RPC_GET_CAPACITY ControllerServiceCapability_RPC_Type = 4 + // Currently the only way to consume a snapshot is to create + // a volume from it. Therefore plugins supporting + // CREATE_DELETE_SNAPSHOT MUST support creating volume from + // snapshot. + ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT ControllerServiceCapability_RPC_Type = 5 + // LIST_SNAPSHOTS is NOT REQUIRED. For plugins that need to upload + // a snapshot after it is being cut, LIST_SNAPSHOTS COULD be used + // with the snapshot_id as the filter to query whether the + // uploading process is complete or not. + ControllerServiceCapability_RPC_LIST_SNAPSHOTS ControllerServiceCapability_RPC_Type = 6 +) + +var ControllerServiceCapability_RPC_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "CREATE_DELETE_VOLUME", + 2: "PUBLISH_UNPUBLISH_VOLUME", + 3: "LIST_VOLUMES", + 4: "GET_CAPACITY", + 5: "CREATE_DELETE_SNAPSHOT", + 6: "LIST_SNAPSHOTS", +} +var ControllerServiceCapability_RPC_Type_value = map[string]int32{ + "UNKNOWN": 0, + "CREATE_DELETE_VOLUME": 1, + "PUBLISH_UNPUBLISH_VOLUME": 2, + "LIST_VOLUMES": 3, + "GET_CAPACITY": 4, + "CREATE_DELETE_SNAPSHOT": 5, + "LIST_SNAPSHOTS": 6, +} + +func (x ControllerServiceCapability_RPC_Type) String() string { + return proto.EnumName(ControllerServiceCapability_RPC_Type_name, int32(x)) +} +func (ControllerServiceCapability_RPC_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{29, 0, 0} +} + +type SnapshotStatus_Type int32 + +const ( + SnapshotStatus_UNKNOWN SnapshotStatus_Type = 0 + // A snapshot is ready for use. + SnapshotStatus_READY SnapshotStatus_Type = 1 + // A snapshot is cut and is now being uploaded. + // Some cloud providers and storage systems uploads the snapshot + // to the cloud after the snapshot is cut. During this phase, + // `thaw` can be done so the application can be running again if + // `freeze` was done before taking the snapshot. + SnapshotStatus_UPLOADING SnapshotStatus_Type = 2 + // An error occurred during the snapshot uploading process. + // This error status is specific for uploading because + // `CreateSnaphot` is a blocking call before the snapshot is + // cut and therefore it SHOULD NOT come back with an error + // status when an error occurs. Instead a gRPC error code SHALL + // be returned by `CreateSnapshot` when an error occurs before + // a snapshot is cut. + SnapshotStatus_ERROR_UPLOADING SnapshotStatus_Type = 3 +) + +var SnapshotStatus_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "READY", + 2: "UPLOADING", + 3: "ERROR_UPLOADING", +} +var SnapshotStatus_Type_value = map[string]int32{ + "UNKNOWN": 0, + "READY": 1, + "UPLOADING": 2, + "ERROR_UPLOADING": 3, +} + +func (x SnapshotStatus_Type) String() string { + return proto.EnumName(SnapshotStatus_Type_name, int32(x)) +} +func (SnapshotStatus_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{33, 0} +} + +type NodeServiceCapability_RPC_Type int32 + +const ( + NodeServiceCapability_RPC_UNKNOWN NodeServiceCapability_RPC_Type = 0 + NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME NodeServiceCapability_RPC_Type = 1 +) + +var NodeServiceCapability_RPC_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "STAGE_UNSTAGE_VOLUME", +} +var NodeServiceCapability_RPC_Type_value = map[string]int32{ + "UNKNOWN": 0, + "STAGE_UNSTAGE_VOLUME": 1, +} + +func (x NodeServiceCapability_RPC_Type) String() string { + return proto.EnumName(NodeServiceCapability_RPC_Type_name, int32(x)) +} +func (NodeServiceCapability_RPC_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{50, 0, 0} +} + +type GetPluginInfoRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetPluginInfoRequest) Reset() { *m = GetPluginInfoRequest{} } +func (m *GetPluginInfoRequest) String() string { return proto.CompactTextString(m) } +func (*GetPluginInfoRequest) ProtoMessage() {} +func (*GetPluginInfoRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{0} +} +func (m *GetPluginInfoRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetPluginInfoRequest.Unmarshal(m, b) +} +func (m *GetPluginInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetPluginInfoRequest.Marshal(b, m, deterministic) +} +func (dst *GetPluginInfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPluginInfoRequest.Merge(dst, src) +} +func (m *GetPluginInfoRequest) XXX_Size() int { + return xxx_messageInfo_GetPluginInfoRequest.Size(m) +} +func (m *GetPluginInfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetPluginInfoRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetPluginInfoRequest proto.InternalMessageInfo + +type GetPluginInfoResponse struct { + // The name MUST follow reverse domain name notation format + // (https://en.wikipedia.org/wiki/Reverse_domain_name_notation). + // It SHOULD include the plugin's host company name and the plugin + // name, to minimize the possibility of collisions. It MUST be 63 + // characters or less, beginning and ending with an alphanumeric + // character ([a-z0-9A-Z]) with dashes (-), underscores (_), + // dots (.), and alphanumerics between. This field is REQUIRED. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // This field is REQUIRED. Value of this field is opaque to the CO. + VendorVersion string `protobuf:"bytes,2,opt,name=vendor_version,json=vendorVersion" json:"vendor_version,omitempty"` + // This field is OPTIONAL. Values are opaque to the CO. + Manifest map[string]string `protobuf:"bytes,3,rep,name=manifest" json:"manifest,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetPluginInfoResponse) Reset() { *m = GetPluginInfoResponse{} } +func (m *GetPluginInfoResponse) String() string { return proto.CompactTextString(m) } +func (*GetPluginInfoResponse) ProtoMessage() {} +func (*GetPluginInfoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{1} +} +func (m *GetPluginInfoResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetPluginInfoResponse.Unmarshal(m, b) +} +func (m *GetPluginInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetPluginInfoResponse.Marshal(b, m, deterministic) +} +func (dst *GetPluginInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPluginInfoResponse.Merge(dst, src) +} +func (m *GetPluginInfoResponse) XXX_Size() int { + return xxx_messageInfo_GetPluginInfoResponse.Size(m) +} +func (m *GetPluginInfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetPluginInfoResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetPluginInfoResponse proto.InternalMessageInfo + +func (m *GetPluginInfoResponse) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *GetPluginInfoResponse) GetVendorVersion() string { + if m != nil { + return m.VendorVersion + } + return "" +} + +func (m *GetPluginInfoResponse) GetManifest() map[string]string { + if m != nil { + return m.Manifest + } + return nil +} + +type GetPluginCapabilitiesRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetPluginCapabilitiesRequest) Reset() { *m = GetPluginCapabilitiesRequest{} } +func (m *GetPluginCapabilitiesRequest) String() string { return proto.CompactTextString(m) } +func (*GetPluginCapabilitiesRequest) ProtoMessage() {} +func (*GetPluginCapabilitiesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{2} +} +func (m *GetPluginCapabilitiesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetPluginCapabilitiesRequest.Unmarshal(m, b) +} +func (m *GetPluginCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetPluginCapabilitiesRequest.Marshal(b, m, deterministic) +} +func (dst *GetPluginCapabilitiesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPluginCapabilitiesRequest.Merge(dst, src) +} +func (m *GetPluginCapabilitiesRequest) XXX_Size() int { + return xxx_messageInfo_GetPluginCapabilitiesRequest.Size(m) +} +func (m *GetPluginCapabilitiesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetPluginCapabilitiesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetPluginCapabilitiesRequest proto.InternalMessageInfo + +type GetPluginCapabilitiesResponse struct { + // All the capabilities that the controller service supports. This + // field is OPTIONAL. + Capabilities []*PluginCapability `protobuf:"bytes,2,rep,name=capabilities" json:"capabilities,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetPluginCapabilitiesResponse) Reset() { *m = GetPluginCapabilitiesResponse{} } +func (m *GetPluginCapabilitiesResponse) String() string { return proto.CompactTextString(m) } +func (*GetPluginCapabilitiesResponse) ProtoMessage() {} +func (*GetPluginCapabilitiesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{3} +} +func (m *GetPluginCapabilitiesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetPluginCapabilitiesResponse.Unmarshal(m, b) +} +func (m *GetPluginCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetPluginCapabilitiesResponse.Marshal(b, m, deterministic) +} +func (dst *GetPluginCapabilitiesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPluginCapabilitiesResponse.Merge(dst, src) +} +func (m *GetPluginCapabilitiesResponse) XXX_Size() int { + return xxx_messageInfo_GetPluginCapabilitiesResponse.Size(m) +} +func (m *GetPluginCapabilitiesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetPluginCapabilitiesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetPluginCapabilitiesResponse proto.InternalMessageInfo + +func (m *GetPluginCapabilitiesResponse) GetCapabilities() []*PluginCapability { + if m != nil { + return m.Capabilities + } + return nil +} + +// Specifies a capability of the plugin. +type PluginCapability struct { + // Types that are valid to be assigned to Type: + // *PluginCapability_Service_ + Type isPluginCapability_Type `protobuf_oneof:"type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PluginCapability) Reset() { *m = PluginCapability{} } +func (m *PluginCapability) String() string { return proto.CompactTextString(m) } +func (*PluginCapability) ProtoMessage() {} +func (*PluginCapability) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{4} +} +func (m *PluginCapability) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PluginCapability.Unmarshal(m, b) +} +func (m *PluginCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PluginCapability.Marshal(b, m, deterministic) +} +func (dst *PluginCapability) XXX_Merge(src proto.Message) { + xxx_messageInfo_PluginCapability.Merge(dst, src) +} +func (m *PluginCapability) XXX_Size() int { + return xxx_messageInfo_PluginCapability.Size(m) +} +func (m *PluginCapability) XXX_DiscardUnknown() { + xxx_messageInfo_PluginCapability.DiscardUnknown(m) +} + +var xxx_messageInfo_PluginCapability proto.InternalMessageInfo + +type isPluginCapability_Type interface { + isPluginCapability_Type() +} + +type PluginCapability_Service_ struct { + Service *PluginCapability_Service `protobuf:"bytes,1,opt,name=service,oneof"` +} + +func (*PluginCapability_Service_) isPluginCapability_Type() {} + +func (m *PluginCapability) GetType() isPluginCapability_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *PluginCapability) GetService() *PluginCapability_Service { + if x, ok := m.GetType().(*PluginCapability_Service_); ok { + return x.Service + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*PluginCapability) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _PluginCapability_OneofMarshaler, _PluginCapability_OneofUnmarshaler, _PluginCapability_OneofSizer, []interface{}{ + (*PluginCapability_Service_)(nil), + } +} + +func _PluginCapability_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*PluginCapability) + // type + switch x := m.Type.(type) { + case *PluginCapability_Service_: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Service); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("PluginCapability.Type has unexpected type %T", x) + } + return nil +} + +func _PluginCapability_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*PluginCapability) + switch tag { + case 1: // type.service + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PluginCapability_Service) + err := b.DecodeMessage(msg) + m.Type = &PluginCapability_Service_{msg} + return true, err + default: + return false, nil + } +} + +func _PluginCapability_OneofSizer(msg proto.Message) (n int) { + m := msg.(*PluginCapability) + // type + switch x := m.Type.(type) { + case *PluginCapability_Service_: + s := proto.Size(x.Service) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type PluginCapability_Service struct { + Type PluginCapability_Service_Type `protobuf:"varint,1,opt,name=type,enum=csi.v0.PluginCapability_Service_Type" json:"type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PluginCapability_Service) Reset() { *m = PluginCapability_Service{} } +func (m *PluginCapability_Service) String() string { return proto.CompactTextString(m) } +func (*PluginCapability_Service) ProtoMessage() {} +func (*PluginCapability_Service) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{4, 0} +} +func (m *PluginCapability_Service) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PluginCapability_Service.Unmarshal(m, b) +} +func (m *PluginCapability_Service) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PluginCapability_Service.Marshal(b, m, deterministic) +} +func (dst *PluginCapability_Service) XXX_Merge(src proto.Message) { + xxx_messageInfo_PluginCapability_Service.Merge(dst, src) +} +func (m *PluginCapability_Service) XXX_Size() int { + return xxx_messageInfo_PluginCapability_Service.Size(m) +} +func (m *PluginCapability_Service) XXX_DiscardUnknown() { + xxx_messageInfo_PluginCapability_Service.DiscardUnknown(m) +} + +var xxx_messageInfo_PluginCapability_Service proto.InternalMessageInfo + +func (m *PluginCapability_Service) GetType() PluginCapability_Service_Type { + if m != nil { + return m.Type + } + return PluginCapability_Service_UNKNOWN +} + +type ProbeRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProbeRequest) Reset() { *m = ProbeRequest{} } +func (m *ProbeRequest) String() string { return proto.CompactTextString(m) } +func (*ProbeRequest) ProtoMessage() {} +func (*ProbeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{5} +} +func (m *ProbeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProbeRequest.Unmarshal(m, b) +} +func (m *ProbeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProbeRequest.Marshal(b, m, deterministic) +} +func (dst *ProbeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProbeRequest.Merge(dst, src) +} +func (m *ProbeRequest) XXX_Size() int { + return xxx_messageInfo_ProbeRequest.Size(m) +} +func (m *ProbeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ProbeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ProbeRequest proto.InternalMessageInfo + +type ProbeResponse struct { + // Readiness allows a plugin to report its initialization status back + // to the CO. Initialization for some plugins MAY be time consuming + // and it is important for a CO to distinguish between the following + // cases: + // + // 1) The plugin is in an unhealthy state and MAY need restarting. In + // this case a gRPC error code SHALL be returned. + // 2) The plugin is still initializing, but is otherwise perfectly + // healthy. In this case a successful response SHALL be returned + // with a readiness value of `false`. Calls to the plugin's + // Controller and/or Node services MAY fail due to an incomplete + // initialization state. + // 3) The plugin has finished initializing and is ready to service + // calls to its Controller and/or Node services. A successful + // response is returned with a readiness value of `true`. + // + // This field is OPTIONAL. If not present, the caller SHALL assume + // that the plugin is in a ready state and is accepting calls to its + // Controller and/or Node services (according to the plugin's reported + // capabilities). + Ready *wrappers.BoolValue `protobuf:"bytes,1,opt,name=ready" json:"ready,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProbeResponse) Reset() { *m = ProbeResponse{} } +func (m *ProbeResponse) String() string { return proto.CompactTextString(m) } +func (*ProbeResponse) ProtoMessage() {} +func (*ProbeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{6} +} +func (m *ProbeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProbeResponse.Unmarshal(m, b) +} +func (m *ProbeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProbeResponse.Marshal(b, m, deterministic) +} +func (dst *ProbeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProbeResponse.Merge(dst, src) +} +func (m *ProbeResponse) XXX_Size() int { + return xxx_messageInfo_ProbeResponse.Size(m) +} +func (m *ProbeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ProbeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ProbeResponse proto.InternalMessageInfo + +func (m *ProbeResponse) GetReady() *wrappers.BoolValue { + if m != nil { + return m.Ready + } + return nil +} + +type CreateVolumeRequest struct { + // The suggested name for the storage space. This field is REQUIRED. + // It serves two purposes: + // 1) Idempotency - This name is generated by the CO to achieve + // idempotency. If `CreateVolume` fails, the volume may or may not + // be provisioned. In this case, the CO may call `CreateVolume` + // again, with the same name, to ensure the volume exists. The + // Plugin should ensure that multiple `CreateVolume` calls for the + // same name do not result in more than one piece of storage + // provisioned corresponding to that name. If a Plugin is unable to + // enforce idempotency, the CO's error recovery logic could result + // in multiple (unused) volumes being provisioned. + // 2) Suggested name - Some storage systems allow callers to specify + // an identifier by which to refer to the newly provisioned + // storage. If a storage system supports this, it can optionally + // use this name as the identifier for the new volume. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + CapacityRange *CapacityRange `protobuf:"bytes,2,opt,name=capacity_range,json=capacityRange" json:"capacity_range,omitempty"` + // The capabilities that the provisioned volume MUST have: the Plugin + // MUST provision a volume that could satisfy ALL of the + // capabilities specified in this list. The Plugin MUST assume that + // the CO MAY use the provisioned volume later with ANY of the + // capabilities specified in this list. This also enables the CO to do + // early validation: if ANY of the specified volume capabilities are + // not supported by the Plugin, the call SHALL fail. This field is + // REQUIRED. + VolumeCapabilities []*VolumeCapability `protobuf:"bytes,3,rep,name=volume_capabilities,json=volumeCapabilities" json:"volume_capabilities,omitempty"` + // Plugin specific parameters passed in as opaque key-value pairs. + // This field is OPTIONAL. The Plugin is responsible for parsing and + // validating these parameters. COs will treat these as opaque. + Parameters map[string]string `protobuf:"bytes,4,rep,name=parameters" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Secrets required by plugin to complete volume creation request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + ControllerCreateSecrets map[string]string `protobuf:"bytes,5,rep,name=controller_create_secrets,json=controllerCreateSecrets" json:"controller_create_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // If specified, the new volume will be pre-populated with data from + // this source. This field is OPTIONAL. + VolumeContentSource *VolumeContentSource `protobuf:"bytes,6,opt,name=volume_content_source,json=volumeContentSource" json:"volume_content_source,omitempty"` + // Specifies where (regions, zones, racks, etc.) the provisioned + // volume MUST be accessible from. + // An SP SHALL advertise the requirements for topological + // accessibility information in documentation. COs SHALL only specify + // topological accessibility information supported by the SP. + // This field is OPTIONAL. + // This field SHALL NOT be specified unless the SP has the + // ACCESSIBILITY_CONSTRAINTS plugin capability. + // If this field is not specified and the SP has the + // ACCESSIBILITY_CONSTRAINTS plugin capability, the SP MAY choose + // where the provisioned volume is accessible from. + AccessibilityRequirements *TopologyRequirement `protobuf:"bytes,7,opt,name=accessibility_requirements,json=accessibilityRequirements" json:"accessibility_requirements,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateVolumeRequest) Reset() { *m = CreateVolumeRequest{} } +func (m *CreateVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*CreateVolumeRequest) ProtoMessage() {} +func (*CreateVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{7} +} +func (m *CreateVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateVolumeRequest.Unmarshal(m, b) +} +func (m *CreateVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateVolumeRequest.Marshal(b, m, deterministic) +} +func (dst *CreateVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateVolumeRequest.Merge(dst, src) +} +func (m *CreateVolumeRequest) XXX_Size() int { + return xxx_messageInfo_CreateVolumeRequest.Size(m) +} +func (m *CreateVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateVolumeRequest proto.InternalMessageInfo + +func (m *CreateVolumeRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateVolumeRequest) GetCapacityRange() *CapacityRange { + if m != nil { + return m.CapacityRange + } + return nil +} + +func (m *CreateVolumeRequest) GetVolumeCapabilities() []*VolumeCapability { + if m != nil { + return m.VolumeCapabilities + } + return nil +} + +func (m *CreateVolumeRequest) GetParameters() map[string]string { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *CreateVolumeRequest) GetControllerCreateSecrets() map[string]string { + if m != nil { + return m.ControllerCreateSecrets + } + return nil +} + +func (m *CreateVolumeRequest) GetVolumeContentSource() *VolumeContentSource { + if m != nil { + return m.VolumeContentSource + } + return nil +} + +func (m *CreateVolumeRequest) GetAccessibilityRequirements() *TopologyRequirement { + if m != nil { + return m.AccessibilityRequirements + } + return nil +} + +// Specifies what source the volume will be created from. One of the +// type fields MUST be specified. +type VolumeContentSource struct { + // Types that are valid to be assigned to Type: + // *VolumeContentSource_Snapshot + Type isVolumeContentSource_Type `protobuf_oneof:"type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VolumeContentSource) Reset() { *m = VolumeContentSource{} } +func (m *VolumeContentSource) String() string { return proto.CompactTextString(m) } +func (*VolumeContentSource) ProtoMessage() {} +func (*VolumeContentSource) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{8} +} +func (m *VolumeContentSource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VolumeContentSource.Unmarshal(m, b) +} +func (m *VolumeContentSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VolumeContentSource.Marshal(b, m, deterministic) +} +func (dst *VolumeContentSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeContentSource.Merge(dst, src) +} +func (m *VolumeContentSource) XXX_Size() int { + return xxx_messageInfo_VolumeContentSource.Size(m) +} +func (m *VolumeContentSource) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeContentSource.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeContentSource proto.InternalMessageInfo + +type isVolumeContentSource_Type interface { + isVolumeContentSource_Type() +} + +type VolumeContentSource_Snapshot struct { + Snapshot *VolumeContentSource_SnapshotSource `protobuf:"bytes,1,opt,name=snapshot,oneof"` +} + +func (*VolumeContentSource_Snapshot) isVolumeContentSource_Type() {} + +func (m *VolumeContentSource) GetType() isVolumeContentSource_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *VolumeContentSource) GetSnapshot() *VolumeContentSource_SnapshotSource { + if x, ok := m.GetType().(*VolumeContentSource_Snapshot); ok { + return x.Snapshot + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*VolumeContentSource) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _VolumeContentSource_OneofMarshaler, _VolumeContentSource_OneofUnmarshaler, _VolumeContentSource_OneofSizer, []interface{}{ + (*VolumeContentSource_Snapshot)(nil), + } +} + +func _VolumeContentSource_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*VolumeContentSource) + // type + switch x := m.Type.(type) { + case *VolumeContentSource_Snapshot: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Snapshot); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("VolumeContentSource.Type has unexpected type %T", x) + } + return nil +} + +func _VolumeContentSource_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*VolumeContentSource) + switch tag { + case 1: // type.snapshot + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(VolumeContentSource_SnapshotSource) + err := b.DecodeMessage(msg) + m.Type = &VolumeContentSource_Snapshot{msg} + return true, err + default: + return false, nil + } +} + +func _VolumeContentSource_OneofSizer(msg proto.Message) (n int) { + m := msg.(*VolumeContentSource) + // type + switch x := m.Type.(type) { + case *VolumeContentSource_Snapshot: + s := proto.Size(x.Snapshot) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type VolumeContentSource_SnapshotSource struct { + // Contains identity information for the existing source snapshot. + // This field is REQUIRED. Plugin is REQUIRED to support creating + // volume from snapshot if it supports the capability + // CREATE_DELETE_SNAPSHOT. + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VolumeContentSource_SnapshotSource) Reset() { *m = VolumeContentSource_SnapshotSource{} } +func (m *VolumeContentSource_SnapshotSource) String() string { return proto.CompactTextString(m) } +func (*VolumeContentSource_SnapshotSource) ProtoMessage() {} +func (*VolumeContentSource_SnapshotSource) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{8, 0} +} +func (m *VolumeContentSource_SnapshotSource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VolumeContentSource_SnapshotSource.Unmarshal(m, b) +} +func (m *VolumeContentSource_SnapshotSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VolumeContentSource_SnapshotSource.Marshal(b, m, deterministic) +} +func (dst *VolumeContentSource_SnapshotSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeContentSource_SnapshotSource.Merge(dst, src) +} +func (m *VolumeContentSource_SnapshotSource) XXX_Size() int { + return xxx_messageInfo_VolumeContentSource_SnapshotSource.Size(m) +} +func (m *VolumeContentSource_SnapshotSource) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeContentSource_SnapshotSource.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeContentSource_SnapshotSource proto.InternalMessageInfo + +func (m *VolumeContentSource_SnapshotSource) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +type CreateVolumeResponse struct { + // Contains all attributes of the newly created volume that are + // relevant to the CO along with information required by the Plugin + // to uniquely identify the volume. This field is REQUIRED. + Volume *Volume `protobuf:"bytes,1,opt,name=volume" json:"volume,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateVolumeResponse) Reset() { *m = CreateVolumeResponse{} } +func (m *CreateVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*CreateVolumeResponse) ProtoMessage() {} +func (*CreateVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{9} +} +func (m *CreateVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateVolumeResponse.Unmarshal(m, b) +} +func (m *CreateVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateVolumeResponse.Marshal(b, m, deterministic) +} +func (dst *CreateVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateVolumeResponse.Merge(dst, src) +} +func (m *CreateVolumeResponse) XXX_Size() int { + return xxx_messageInfo_CreateVolumeResponse.Size(m) +} +func (m *CreateVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CreateVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateVolumeResponse proto.InternalMessageInfo + +func (m *CreateVolumeResponse) GetVolume() *Volume { + if m != nil { + return m.Volume + } + return nil +} + +// Specify a capability of a volume. +type VolumeCapability struct { + // Specifies what API the volume will be accessed using. One of the + // following fields MUST be specified. + // + // Types that are valid to be assigned to AccessType: + // *VolumeCapability_Block + // *VolumeCapability_Mount + AccessType isVolumeCapability_AccessType `protobuf_oneof:"access_type"` + // This is a REQUIRED field. + AccessMode *VolumeCapability_AccessMode `protobuf:"bytes,3,opt,name=access_mode,json=accessMode" json:"access_mode,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VolumeCapability) Reset() { *m = VolumeCapability{} } +func (m *VolumeCapability) String() string { return proto.CompactTextString(m) } +func (*VolumeCapability) ProtoMessage() {} +func (*VolumeCapability) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{10} +} +func (m *VolumeCapability) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VolumeCapability.Unmarshal(m, b) +} +func (m *VolumeCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VolumeCapability.Marshal(b, m, deterministic) +} +func (dst *VolumeCapability) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeCapability.Merge(dst, src) +} +func (m *VolumeCapability) XXX_Size() int { + return xxx_messageInfo_VolumeCapability.Size(m) +} +func (m *VolumeCapability) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeCapability.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeCapability proto.InternalMessageInfo + +type isVolumeCapability_AccessType interface { + isVolumeCapability_AccessType() +} + +type VolumeCapability_Block struct { + Block *VolumeCapability_BlockVolume `protobuf:"bytes,1,opt,name=block,oneof"` +} +type VolumeCapability_Mount struct { + Mount *VolumeCapability_MountVolume `protobuf:"bytes,2,opt,name=mount,oneof"` +} + +func (*VolumeCapability_Block) isVolumeCapability_AccessType() {} +func (*VolumeCapability_Mount) isVolumeCapability_AccessType() {} + +func (m *VolumeCapability) GetAccessType() isVolumeCapability_AccessType { + if m != nil { + return m.AccessType + } + return nil +} + +func (m *VolumeCapability) GetBlock() *VolumeCapability_BlockVolume { + if x, ok := m.GetAccessType().(*VolumeCapability_Block); ok { + return x.Block + } + return nil +} + +func (m *VolumeCapability) GetMount() *VolumeCapability_MountVolume { + if x, ok := m.GetAccessType().(*VolumeCapability_Mount); ok { + return x.Mount + } + return nil +} + +func (m *VolumeCapability) GetAccessMode() *VolumeCapability_AccessMode { + if m != nil { + return m.AccessMode + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*VolumeCapability) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _VolumeCapability_OneofMarshaler, _VolumeCapability_OneofUnmarshaler, _VolumeCapability_OneofSizer, []interface{}{ + (*VolumeCapability_Block)(nil), + (*VolumeCapability_Mount)(nil), + } +} + +func _VolumeCapability_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*VolumeCapability) + // access_type + switch x := m.AccessType.(type) { + case *VolumeCapability_Block: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Block); err != nil { + return err + } + case *VolumeCapability_Mount: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Mount); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("VolumeCapability.AccessType has unexpected type %T", x) + } + return nil +} + +func _VolumeCapability_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*VolumeCapability) + switch tag { + case 1: // access_type.block + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(VolumeCapability_BlockVolume) + err := b.DecodeMessage(msg) + m.AccessType = &VolumeCapability_Block{msg} + return true, err + case 2: // access_type.mount + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(VolumeCapability_MountVolume) + err := b.DecodeMessage(msg) + m.AccessType = &VolumeCapability_Mount{msg} + return true, err + default: + return false, nil + } +} + +func _VolumeCapability_OneofSizer(msg proto.Message) (n int) { + m := msg.(*VolumeCapability) + // access_type + switch x := m.AccessType.(type) { + case *VolumeCapability_Block: + s := proto.Size(x.Block) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *VolumeCapability_Mount: + s := proto.Size(x.Mount) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Indicate that the volume will be accessed via the block device API. +type VolumeCapability_BlockVolume struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VolumeCapability_BlockVolume) Reset() { *m = VolumeCapability_BlockVolume{} } +func (m *VolumeCapability_BlockVolume) String() string { return proto.CompactTextString(m) } +func (*VolumeCapability_BlockVolume) ProtoMessage() {} +func (*VolumeCapability_BlockVolume) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{10, 0} +} +func (m *VolumeCapability_BlockVolume) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VolumeCapability_BlockVolume.Unmarshal(m, b) +} +func (m *VolumeCapability_BlockVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VolumeCapability_BlockVolume.Marshal(b, m, deterministic) +} +func (dst *VolumeCapability_BlockVolume) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeCapability_BlockVolume.Merge(dst, src) +} +func (m *VolumeCapability_BlockVolume) XXX_Size() int { + return xxx_messageInfo_VolumeCapability_BlockVolume.Size(m) +} +func (m *VolumeCapability_BlockVolume) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeCapability_BlockVolume.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeCapability_BlockVolume proto.InternalMessageInfo + +// Indicate that the volume will be accessed via the filesystem API. +type VolumeCapability_MountVolume struct { + // The filesystem type. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + FsType string `protobuf:"bytes,1,opt,name=fs_type,json=fsType" json:"fs_type,omitempty"` + // The mount options that can be used for the volume. This field is + // OPTIONAL. `mount_flags` MAY contain sensitive information. + // Therefore, the CO and the Plugin MUST NOT leak this information + // to untrusted entities. The total size of this repeated field + // SHALL NOT exceed 4 KiB. + MountFlags []string `protobuf:"bytes,2,rep,name=mount_flags,json=mountFlags" json:"mount_flags,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VolumeCapability_MountVolume) Reset() { *m = VolumeCapability_MountVolume{} } +func (m *VolumeCapability_MountVolume) String() string { return proto.CompactTextString(m) } +func (*VolumeCapability_MountVolume) ProtoMessage() {} +func (*VolumeCapability_MountVolume) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{10, 1} +} +func (m *VolumeCapability_MountVolume) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VolumeCapability_MountVolume.Unmarshal(m, b) +} +func (m *VolumeCapability_MountVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VolumeCapability_MountVolume.Marshal(b, m, deterministic) +} +func (dst *VolumeCapability_MountVolume) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeCapability_MountVolume.Merge(dst, src) +} +func (m *VolumeCapability_MountVolume) XXX_Size() int { + return xxx_messageInfo_VolumeCapability_MountVolume.Size(m) +} +func (m *VolumeCapability_MountVolume) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeCapability_MountVolume.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeCapability_MountVolume proto.InternalMessageInfo + +func (m *VolumeCapability_MountVolume) GetFsType() string { + if m != nil { + return m.FsType + } + return "" +} + +func (m *VolumeCapability_MountVolume) GetMountFlags() []string { + if m != nil { + return m.MountFlags + } + return nil +} + +// Specify how a volume can be accessed. +type VolumeCapability_AccessMode struct { + // This field is REQUIRED. + Mode VolumeCapability_AccessMode_Mode `protobuf:"varint,1,opt,name=mode,enum=csi.v0.VolumeCapability_AccessMode_Mode" json:"mode,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VolumeCapability_AccessMode) Reset() { *m = VolumeCapability_AccessMode{} } +func (m *VolumeCapability_AccessMode) String() string { return proto.CompactTextString(m) } +func (*VolumeCapability_AccessMode) ProtoMessage() {} +func (*VolumeCapability_AccessMode) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{10, 2} +} +func (m *VolumeCapability_AccessMode) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VolumeCapability_AccessMode.Unmarshal(m, b) +} +func (m *VolumeCapability_AccessMode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VolumeCapability_AccessMode.Marshal(b, m, deterministic) +} +func (dst *VolumeCapability_AccessMode) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeCapability_AccessMode.Merge(dst, src) +} +func (m *VolumeCapability_AccessMode) XXX_Size() int { + return xxx_messageInfo_VolumeCapability_AccessMode.Size(m) +} +func (m *VolumeCapability_AccessMode) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeCapability_AccessMode.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeCapability_AccessMode proto.InternalMessageInfo + +func (m *VolumeCapability_AccessMode) GetMode() VolumeCapability_AccessMode_Mode { + if m != nil { + return m.Mode + } + return VolumeCapability_AccessMode_UNKNOWN +} + +// The capacity of the storage space in bytes. To specify an exact size, +// `required_bytes` and `limit_bytes` SHALL be set to the same value. At +// least one of the these fields MUST be specified. +type CapacityRange struct { + // Volume MUST be at least this big. This field is OPTIONAL. + // A value of 0 is equal to an unspecified field value. + // The value of this field MUST NOT be negative. + RequiredBytes int64 `protobuf:"varint,1,opt,name=required_bytes,json=requiredBytes" json:"required_bytes,omitempty"` + // Volume MUST not be bigger than this. This field is OPTIONAL. + // A value of 0 is equal to an unspecified field value. + // The value of this field MUST NOT be negative. + LimitBytes int64 `protobuf:"varint,2,opt,name=limit_bytes,json=limitBytes" json:"limit_bytes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CapacityRange) Reset() { *m = CapacityRange{} } +func (m *CapacityRange) String() string { return proto.CompactTextString(m) } +func (*CapacityRange) ProtoMessage() {} +func (*CapacityRange) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{11} +} +func (m *CapacityRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CapacityRange.Unmarshal(m, b) +} +func (m *CapacityRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CapacityRange.Marshal(b, m, deterministic) +} +func (dst *CapacityRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_CapacityRange.Merge(dst, src) +} +func (m *CapacityRange) XXX_Size() int { + return xxx_messageInfo_CapacityRange.Size(m) +} +func (m *CapacityRange) XXX_DiscardUnknown() { + xxx_messageInfo_CapacityRange.DiscardUnknown(m) +} + +var xxx_messageInfo_CapacityRange proto.InternalMessageInfo + +func (m *CapacityRange) GetRequiredBytes() int64 { + if m != nil { + return m.RequiredBytes + } + return 0 +} + +func (m *CapacityRange) GetLimitBytes() int64 { + if m != nil { + return m.LimitBytes + } + return 0 +} + +// The information about a provisioned volume. +type Volume struct { + // The capacity of the volume in bytes. This field is OPTIONAL. If not + // set (value of 0), it indicates that the capacity of the volume is + // unknown (e.g., NFS share). + // The value of this field MUST NOT be negative. + CapacityBytes int64 `protobuf:"varint,1,opt,name=capacity_bytes,json=capacityBytes" json:"capacity_bytes,omitempty"` + // Contains identity information for the created volume. This field is + // REQUIRED. The identity information will be used by the CO in + // subsequent calls to refer to the provisioned volume. + Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"` + // Attributes reflect static properties of a volume and MUST be passed + // to volume validation and publishing calls. + // Attributes SHALL be opaque to a CO. Attributes SHALL NOT be mutable + // and SHALL be safe for the CO to cache. Attributes SHOULD NOT + // contain sensitive information. Attributes MAY NOT uniquely identify + // a volume. A volume uniquely identified by `id` SHALL always report + // the same attributes. This field is OPTIONAL and when present MUST + // be passed to volume validation and publishing calls. + Attributes map[string]string `protobuf:"bytes,3,rep,name=attributes" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // If specified, indicates that the volume is not empty and is + // pre-populated with data from the specified source. + // This field is OPTIONAL. + ContentSource *VolumeContentSource `protobuf:"bytes,4,opt,name=content_source,json=contentSource" json:"content_source,omitempty"` + // Specifies where (regions, zones, racks, etc.) the provisioned + // volume is accessible from. + // A plugin that returns this field MUST also set the + // ACCESSIBILITY_CONSTRAINTS plugin capability. + // An SP MAY specify multiple topologies to indicate the volume is + // accessible from multiple locations. + // COs MAY use this information along with the topology information + // returned by NodeGetInfo to ensure that a given volume is accessible + // from a given node when scheduling workloads. + // This field is OPTIONAL. If it is not specified, the CO MAY assume + // the volume is equally accessible from all nodes in the cluster and + // may schedule workloads referencing the volume on any available + // node. + // + // Example 1: + // accessible_topology = {"region": "R1", "zone": "Z2"} + // Indicates a volume accessible only from the "region" "R1" and the + // "zone" "Z2". + // + // Example 2: + // accessible_topology = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // Indicates a volume accessible from both "zone" "Z2" and "zone" "Z3" + // in the "region" "R1". + AccessibleTopology []*Topology `protobuf:"bytes,5,rep,name=accessible_topology,json=accessibleTopology" json:"accessible_topology,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Volume) Reset() { *m = Volume{} } +func (m *Volume) String() string { return proto.CompactTextString(m) } +func (*Volume) ProtoMessage() {} +func (*Volume) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{12} +} +func (m *Volume) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Volume.Unmarshal(m, b) +} +func (m *Volume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Volume.Marshal(b, m, deterministic) +} +func (dst *Volume) XXX_Merge(src proto.Message) { + xxx_messageInfo_Volume.Merge(dst, src) +} +func (m *Volume) XXX_Size() int { + return xxx_messageInfo_Volume.Size(m) +} +func (m *Volume) XXX_DiscardUnknown() { + xxx_messageInfo_Volume.DiscardUnknown(m) +} + +var xxx_messageInfo_Volume proto.InternalMessageInfo + +func (m *Volume) GetCapacityBytes() int64 { + if m != nil { + return m.CapacityBytes + } + return 0 +} + +func (m *Volume) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Volume) GetAttributes() map[string]string { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *Volume) GetContentSource() *VolumeContentSource { + if m != nil { + return m.ContentSource + } + return nil +} + +func (m *Volume) GetAccessibleTopology() []*Topology { + if m != nil { + return m.AccessibleTopology + } + return nil +} + +type TopologyRequirement struct { + // Specifies the list of topologies the provisioned volume MUST be + // accessible from. + // This field is OPTIONAL. If TopologyRequirement is specified either + // requisite or preferred or both MUST be specified. + // + // If requisite is specified, the provisioned volume MUST be + // accessible from at least one of the requisite topologies. + // + // Given + // x = number of topologies provisioned volume is accessible from + // n = number of requisite topologies + // The CO MUST ensure n >= 1. The SP MUST ensure x >= 1 + // If x==n, than the SP MUST make the provisioned volume available to + // all topologies from the list of requisite topologies. If it is + // unable to do so, the SP MUST fail the CreateVolume call. + // For example, if a volume should be accessible from a single zone, + // and requisite = + // {"region": "R1", "zone": "Z2"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and the "zone" "Z2". + // Similarly, if a volume should be accessible from two zones, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and both "zone" "Z2" and "zone" "Z3". + // + // If xn, than the SP MUST make the provisioned volume available from + // all topologies from the list of requisite topologies and MAY choose + // the remaining x-n unique topologies from the list of all possible + // topologies. If it is unable to do so, the SP MUST fail the + // CreateVolume call. + // For example, if a volume should be accessible from two zones, and + // requisite = + // {"region": "R1", "zone": "Z2"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and the "zone" "Z2" and the SP may select the second zone + // independently, e.g. "R1/Z4". + Requisite []*Topology `protobuf:"bytes,1,rep,name=requisite" json:"requisite,omitempty"` + // Specifies the list of topologies the CO would prefer the volume to + // be provisioned in. + // + // This field is OPTIONAL. If TopologyRequirement is specified either + // requisite or preferred or both MUST be specified. + // + // An SP MUST attempt to make the provisioned volume available using + // the preferred topologies in order from first to last. + // + // If requisite is specified, all topologies in preferred list MUST + // also be present in the list of requisite topologies. + // + // If the SP is unable to to make the provisioned volume available + // from any of the preferred topologies, the SP MAY choose a topology + // from the list of requisite topologies. + // If the list of requisite topologies is not specified, then the SP + // MAY choose from the list of all possible topologies. + // If the list of requisite topologies is specified and the SP is + // unable to to make the provisioned volume available from any of the + // requisite topologies it MUST fail the CreateVolume call. + // + // Example 1: + // Given a volume should be accessible from a single zone, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // preferred = + // {"region": "R1", "zone": "Z3"} + // then the the SP SHOULD first attempt to make the provisioned volume + // available from "zone" "Z3" in the "region" "R1" and fall back to + // "zone" "Z2" in the "region" "R1" if that is not possible. + // + // Example 2: + // Given a volume should be accessible from a single zone, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z5"} + // preferred = + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z2"} + // then the the SP SHOULD first attempt to make the provisioned volume + // accessible from "zone" "Z4" in the "region" "R1" and fall back to + // "zone" "Z2" in the "region" "R1" if that is not possible. If that + // is not possible, the SP may choose between either the "zone" + // "Z3" or "Z5" in the "region" "R1". + // + // Example 3: + // Given a volume should be accessible from TWO zones (because an + // opaque parameter in CreateVolumeRequest, for example, specifies + // the volume is accessible from two zones, aka synchronously + // replicated), and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z5"} + // preferred = + // {"region": "R1", "zone": "Z5"}, + // {"region": "R1", "zone": "Z3"} + // then the the SP SHOULD first attempt to make the provisioned volume + // accessible from the combination of the two "zones" "Z5" and "Z3" in + // the "region" "R1". If that's not possible, it should fall back to + // a combination of "Z5" and other possibilities from the list of + // requisite. If that's not possible, it should fall back to a + // combination of "Z3" and other possibilities from the list of + // requisite. If that's not possible, it should fall back to a + // combination of other possibilities from the list of requisite. + Preferred []*Topology `protobuf:"bytes,2,rep,name=preferred" json:"preferred,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TopologyRequirement) Reset() { *m = TopologyRequirement{} } +func (m *TopologyRequirement) String() string { return proto.CompactTextString(m) } +func (*TopologyRequirement) ProtoMessage() {} +func (*TopologyRequirement) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{13} +} +func (m *TopologyRequirement) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TopologyRequirement.Unmarshal(m, b) +} +func (m *TopologyRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TopologyRequirement.Marshal(b, m, deterministic) +} +func (dst *TopologyRequirement) XXX_Merge(src proto.Message) { + xxx_messageInfo_TopologyRequirement.Merge(dst, src) +} +func (m *TopologyRequirement) XXX_Size() int { + return xxx_messageInfo_TopologyRequirement.Size(m) +} +func (m *TopologyRequirement) XXX_DiscardUnknown() { + xxx_messageInfo_TopologyRequirement.DiscardUnknown(m) +} + +var xxx_messageInfo_TopologyRequirement proto.InternalMessageInfo + +func (m *TopologyRequirement) GetRequisite() []*Topology { + if m != nil { + return m.Requisite + } + return nil +} + +func (m *TopologyRequirement) GetPreferred() []*Topology { + if m != nil { + return m.Preferred + } + return nil +} + +// Topology is a map of topological domains to topological segments. +// A topological domain is a sub-division of a cluster, like "region", +// "zone", "rack", etc. +// A topological segment is a specific instance of a topological domain, +// like "zone3", "rack3", etc. +// For example {"com.company/zone": "Z1", "com.company/rack": "R3"} +// Valid keys have two segments: an optional prefix and name, separated +// by a slash (/), for example: "com.company.example/zone". +// The key name segment is required. The prefix is optional. +// Both the key name and the prefix MUST each be 63 characters or less, +// begin and end with an alphanumeric character ([a-z0-9A-Z]) and +// contain only dashes (-), underscores (_), dots (.), or alphanumerics +// in between, for example "zone". +// The key prefix MUST follow reverse domain name notation format +// (https://en.wikipedia.org/wiki/Reverse_domain_name_notation). +// The key prefix SHOULD include the plugin's host company name and/or +// the plugin name, to minimize the possibility of collisions with keys +// from other plugins. +// If a key prefix is specified, it MUST be identical across all +// topology keys returned by the SP (across all RPCs). +// Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone" +// MUST not both exist. +// Each value (topological segment) MUST contain 1 or more strings. +// Each string MUST be 63 characters or less and begin and end with an +// alphanumeric character with '-', '_', '.', or alphanumerics in +// between. +type Topology struct { + Segments map[string]string `protobuf:"bytes,1,rep,name=segments" json:"segments,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Topology) Reset() { *m = Topology{} } +func (m *Topology) String() string { return proto.CompactTextString(m) } +func (*Topology) ProtoMessage() {} +func (*Topology) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{14} +} +func (m *Topology) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Topology.Unmarshal(m, b) +} +func (m *Topology) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Topology.Marshal(b, m, deterministic) +} +func (dst *Topology) XXX_Merge(src proto.Message) { + xxx_messageInfo_Topology.Merge(dst, src) +} +func (m *Topology) XXX_Size() int { + return xxx_messageInfo_Topology.Size(m) +} +func (m *Topology) XXX_DiscardUnknown() { + xxx_messageInfo_Topology.DiscardUnknown(m) +} + +var xxx_messageInfo_Topology proto.InternalMessageInfo + +func (m *Topology) GetSegments() map[string]string { + if m != nil { + return m.Segments + } + return nil +} + +type DeleteVolumeRequest struct { + // The ID of the volume to be deprovisioned. + // This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + // Secrets required by plugin to complete volume deletion request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + ControllerDeleteSecrets map[string]string `protobuf:"bytes,2,rep,name=controller_delete_secrets,json=controllerDeleteSecrets" json:"controller_delete_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteVolumeRequest) Reset() { *m = DeleteVolumeRequest{} } +func (m *DeleteVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteVolumeRequest) ProtoMessage() {} +func (*DeleteVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{15} +} +func (m *DeleteVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteVolumeRequest.Unmarshal(m, b) +} +func (m *DeleteVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteVolumeRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteVolumeRequest.Merge(dst, src) +} +func (m *DeleteVolumeRequest) XXX_Size() int { + return xxx_messageInfo_DeleteVolumeRequest.Size(m) +} +func (m *DeleteVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteVolumeRequest proto.InternalMessageInfo + +func (m *DeleteVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *DeleteVolumeRequest) GetControllerDeleteSecrets() map[string]string { + if m != nil { + return m.ControllerDeleteSecrets + } + return nil +} + +type DeleteVolumeResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteVolumeResponse) Reset() { *m = DeleteVolumeResponse{} } +func (m *DeleteVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteVolumeResponse) ProtoMessage() {} +func (*DeleteVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{16} +} +func (m *DeleteVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteVolumeResponse.Unmarshal(m, b) +} +func (m *DeleteVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteVolumeResponse.Marshal(b, m, deterministic) +} +func (dst *DeleteVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteVolumeResponse.Merge(dst, src) +} +func (m *DeleteVolumeResponse) XXX_Size() int { + return xxx_messageInfo_DeleteVolumeResponse.Size(m) +} +func (m *DeleteVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteVolumeResponse proto.InternalMessageInfo + +type ControllerPublishVolumeRequest struct { + // The ID of the volume to be used on a node. + // This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + // The ID of the node. This field is REQUIRED. The CO SHALL set this + // field to match the node ID returned by `NodeGetInfo`. + NodeId string `protobuf:"bytes,2,opt,name=node_id,json=nodeId" json:"node_id,omitempty"` + // The capability of the volume the CO expects the volume to have. + // This is a REQUIRED field. + VolumeCapability *VolumeCapability `protobuf:"bytes,3,opt,name=volume_capability,json=volumeCapability" json:"volume_capability,omitempty"` + // Whether to publish the volume in readonly mode. This field is + // REQUIRED. + Readonly bool `protobuf:"varint,4,opt,name=readonly" json:"readonly,omitempty"` + // Secrets required by plugin to complete controller publish volume + // request. This field is OPTIONAL. Refer to the + // `Secrets Requirements` section on how to use this field. + ControllerPublishSecrets map[string]string `protobuf:"bytes,5,rep,name=controller_publish_secrets,json=controllerPublishSecrets" json:"controller_publish_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Attributes of the volume to be used on a node. This field is + // OPTIONAL and MUST match the attributes of the Volume identified + // by `volume_id`. + VolumeAttributes map[string]string `protobuf:"bytes,6,rep,name=volume_attributes,json=volumeAttributes" json:"volume_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerPublishVolumeRequest) Reset() { *m = ControllerPublishVolumeRequest{} } +func (m *ControllerPublishVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*ControllerPublishVolumeRequest) ProtoMessage() {} +func (*ControllerPublishVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{17} +} +func (m *ControllerPublishVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerPublishVolumeRequest.Unmarshal(m, b) +} +func (m *ControllerPublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerPublishVolumeRequest.Marshal(b, m, deterministic) +} +func (dst *ControllerPublishVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerPublishVolumeRequest.Merge(dst, src) +} +func (m *ControllerPublishVolumeRequest) XXX_Size() int { + return xxx_messageInfo_ControllerPublishVolumeRequest.Size(m) +} +func (m *ControllerPublishVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerPublishVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerPublishVolumeRequest proto.InternalMessageInfo + +func (m *ControllerPublishVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *ControllerPublishVolumeRequest) GetNodeId() string { + if m != nil { + return m.NodeId + } + return "" +} + +func (m *ControllerPublishVolumeRequest) GetVolumeCapability() *VolumeCapability { + if m != nil { + return m.VolumeCapability + } + return nil +} + +func (m *ControllerPublishVolumeRequest) GetReadonly() bool { + if m != nil { + return m.Readonly + } + return false +} + +func (m *ControllerPublishVolumeRequest) GetControllerPublishSecrets() map[string]string { + if m != nil { + return m.ControllerPublishSecrets + } + return nil +} + +func (m *ControllerPublishVolumeRequest) GetVolumeAttributes() map[string]string { + if m != nil { + return m.VolumeAttributes + } + return nil +} + +type ControllerPublishVolumeResponse struct { + // The SP specific information that will be passed to the Plugin in + // the subsequent `NodeStageVolume` or `NodePublishVolume` calls + // for the given volume. + // This information is opaque to the CO. This field is OPTIONAL. + PublishInfo map[string]string `protobuf:"bytes,1,rep,name=publish_info,json=publishInfo" json:"publish_info,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerPublishVolumeResponse) Reset() { *m = ControllerPublishVolumeResponse{} } +func (m *ControllerPublishVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*ControllerPublishVolumeResponse) ProtoMessage() {} +func (*ControllerPublishVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{18} +} +func (m *ControllerPublishVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerPublishVolumeResponse.Unmarshal(m, b) +} +func (m *ControllerPublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerPublishVolumeResponse.Marshal(b, m, deterministic) +} +func (dst *ControllerPublishVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerPublishVolumeResponse.Merge(dst, src) +} +func (m *ControllerPublishVolumeResponse) XXX_Size() int { + return xxx_messageInfo_ControllerPublishVolumeResponse.Size(m) +} +func (m *ControllerPublishVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerPublishVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerPublishVolumeResponse proto.InternalMessageInfo + +func (m *ControllerPublishVolumeResponse) GetPublishInfo() map[string]string { + if m != nil { + return m.PublishInfo + } + return nil +} + +type ControllerUnpublishVolumeRequest struct { + // The ID of the volume. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + // The ID of the node. This field is OPTIONAL. The CO SHOULD set this + // field to match the node ID returned by `NodeGetInfo` or leave it + // unset. If the value is set, the SP MUST unpublish the volume from + // the specified node. If the value is unset, the SP MUST unpublish + // the volume from all nodes it is published to. + NodeId string `protobuf:"bytes,2,opt,name=node_id,json=nodeId" json:"node_id,omitempty"` + // Secrets required by plugin to complete controller unpublish volume + // request. This SHOULD be the same secrets passed to the + // ControllerPublishVolume call for the specified volume. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + ControllerUnpublishSecrets map[string]string `protobuf:"bytes,3,rep,name=controller_unpublish_secrets,json=controllerUnpublishSecrets" json:"controller_unpublish_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerUnpublishVolumeRequest) Reset() { *m = ControllerUnpublishVolumeRequest{} } +func (m *ControllerUnpublishVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*ControllerUnpublishVolumeRequest) ProtoMessage() {} +func (*ControllerUnpublishVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{19} +} +func (m *ControllerUnpublishVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerUnpublishVolumeRequest.Unmarshal(m, b) +} +func (m *ControllerUnpublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerUnpublishVolumeRequest.Marshal(b, m, deterministic) +} +func (dst *ControllerUnpublishVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerUnpublishVolumeRequest.Merge(dst, src) +} +func (m *ControllerUnpublishVolumeRequest) XXX_Size() int { + return xxx_messageInfo_ControllerUnpublishVolumeRequest.Size(m) +} +func (m *ControllerUnpublishVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerUnpublishVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerUnpublishVolumeRequest proto.InternalMessageInfo + +func (m *ControllerUnpublishVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *ControllerUnpublishVolumeRequest) GetNodeId() string { + if m != nil { + return m.NodeId + } + return "" +} + +func (m *ControllerUnpublishVolumeRequest) GetControllerUnpublishSecrets() map[string]string { + if m != nil { + return m.ControllerUnpublishSecrets + } + return nil +} + +type ControllerUnpublishVolumeResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerUnpublishVolumeResponse) Reset() { *m = ControllerUnpublishVolumeResponse{} } +func (m *ControllerUnpublishVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*ControllerUnpublishVolumeResponse) ProtoMessage() {} +func (*ControllerUnpublishVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{20} +} +func (m *ControllerUnpublishVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerUnpublishVolumeResponse.Unmarshal(m, b) +} +func (m *ControllerUnpublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerUnpublishVolumeResponse.Marshal(b, m, deterministic) +} +func (dst *ControllerUnpublishVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerUnpublishVolumeResponse.Merge(dst, src) +} +func (m *ControllerUnpublishVolumeResponse) XXX_Size() int { + return xxx_messageInfo_ControllerUnpublishVolumeResponse.Size(m) +} +func (m *ControllerUnpublishVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerUnpublishVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerUnpublishVolumeResponse proto.InternalMessageInfo + +type ValidateVolumeCapabilitiesRequest struct { + // The ID of the volume to check. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + // The capabilities that the CO wants to check for the volume. This + // call SHALL return "supported" only if all the volume capabilities + // specified below are supported. This field is REQUIRED. + VolumeCapabilities []*VolumeCapability `protobuf:"bytes,2,rep,name=volume_capabilities,json=volumeCapabilities" json:"volume_capabilities,omitempty"` + // Attributes of the volume to check. This field is OPTIONAL and MUST + // match the attributes of the Volume identified by `volume_id`. + VolumeAttributes map[string]string `protobuf:"bytes,3,rep,name=volume_attributes,json=volumeAttributes" json:"volume_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Specifies where (regions, zones, racks, etc.) the caller believes + // the volume is accessible from. + // A caller MAY specify multiple topologies to indicate they believe + // the volume to be accessible from multiple locations. + // This field is OPTIONAL. This field SHALL NOT be set unless the + // plugin advertises the ACCESSIBILITY_CONSTRAINTS capability. + AccessibleTopology []*Topology `protobuf:"bytes,4,rep,name=accessible_topology,json=accessibleTopology" json:"accessible_topology,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateVolumeCapabilitiesRequest) Reset() { *m = ValidateVolumeCapabilitiesRequest{} } +func (m *ValidateVolumeCapabilitiesRequest) String() string { return proto.CompactTextString(m) } +func (*ValidateVolumeCapabilitiesRequest) ProtoMessage() {} +func (*ValidateVolumeCapabilitiesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{21} +} +func (m *ValidateVolumeCapabilitiesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Unmarshal(m, b) +} +func (m *ValidateVolumeCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Marshal(b, m, deterministic) +} +func (dst *ValidateVolumeCapabilitiesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Merge(dst, src) +} +func (m *ValidateVolumeCapabilitiesRequest) XXX_Size() int { + return xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Size(m) +} +func (m *ValidateVolumeCapabilitiesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateVolumeCapabilitiesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateVolumeCapabilitiesRequest proto.InternalMessageInfo + +func (m *ValidateVolumeCapabilitiesRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *ValidateVolumeCapabilitiesRequest) GetVolumeCapabilities() []*VolumeCapability { + if m != nil { + return m.VolumeCapabilities + } + return nil +} + +func (m *ValidateVolumeCapabilitiesRequest) GetVolumeAttributes() map[string]string { + if m != nil { + return m.VolumeAttributes + } + return nil +} + +func (m *ValidateVolumeCapabilitiesRequest) GetAccessibleTopology() []*Topology { + if m != nil { + return m.AccessibleTopology + } + return nil +} + +type ValidateVolumeCapabilitiesResponse struct { + // True if the Plugin supports the specified capabilities for the + // given volume. This field is REQUIRED. + Supported bool `protobuf:"varint,1,opt,name=supported" json:"supported,omitempty"` + // Message to the CO if `supported` above is false. This field is + // OPTIONAL. + // An empty string is equal to an unspecified field value. + Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateVolumeCapabilitiesResponse) Reset() { *m = ValidateVolumeCapabilitiesResponse{} } +func (m *ValidateVolumeCapabilitiesResponse) String() string { return proto.CompactTextString(m) } +func (*ValidateVolumeCapabilitiesResponse) ProtoMessage() {} +func (*ValidateVolumeCapabilitiesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{22} +} +func (m *ValidateVolumeCapabilitiesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Unmarshal(m, b) +} +func (m *ValidateVolumeCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Marshal(b, m, deterministic) +} +func (dst *ValidateVolumeCapabilitiesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Merge(dst, src) +} +func (m *ValidateVolumeCapabilitiesResponse) XXX_Size() int { + return xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Size(m) +} +func (m *ValidateVolumeCapabilitiesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateVolumeCapabilitiesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateVolumeCapabilitiesResponse proto.InternalMessageInfo + +func (m *ValidateVolumeCapabilitiesResponse) GetSupported() bool { + if m != nil { + return m.Supported + } + return false +} + +func (m *ValidateVolumeCapabilitiesResponse) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +type ListVolumesRequest struct { + // If specified (non-zero value), the Plugin MUST NOT return more + // entries than this number in the response. If the actual number of + // entries is more than this number, the Plugin MUST set `next_token` + // in the response which can be used to get the next page of entries + // in the subsequent `ListVolumes` call. This field is OPTIONAL. If + // not specified (zero value), it means there is no restriction on the + // number of entries that can be returned. + // The value of this field MUST NOT be negative. + MaxEntries int32 `protobuf:"varint,1,opt,name=max_entries,json=maxEntries" json:"max_entries,omitempty"` + // A token to specify where to start paginating. Set this field to + // `next_token` returned by a previous `ListVolumes` call to get the + // next page of entries. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + StartingToken string `protobuf:"bytes,2,opt,name=starting_token,json=startingToken" json:"starting_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListVolumesRequest) Reset() { *m = ListVolumesRequest{} } +func (m *ListVolumesRequest) String() string { return proto.CompactTextString(m) } +func (*ListVolumesRequest) ProtoMessage() {} +func (*ListVolumesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{23} +} +func (m *ListVolumesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListVolumesRequest.Unmarshal(m, b) +} +func (m *ListVolumesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListVolumesRequest.Marshal(b, m, deterministic) +} +func (dst *ListVolumesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListVolumesRequest.Merge(dst, src) +} +func (m *ListVolumesRequest) XXX_Size() int { + return xxx_messageInfo_ListVolumesRequest.Size(m) +} +func (m *ListVolumesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListVolumesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListVolumesRequest proto.InternalMessageInfo + +func (m *ListVolumesRequest) GetMaxEntries() int32 { + if m != nil { + return m.MaxEntries + } + return 0 +} + +func (m *ListVolumesRequest) GetStartingToken() string { + if m != nil { + return m.StartingToken + } + return "" +} + +type ListVolumesResponse struct { + Entries []*ListVolumesResponse_Entry `protobuf:"bytes,1,rep,name=entries" json:"entries,omitempty"` + // This token allows you to get the next page of entries for + // `ListVolumes` request. If the number of entries is larger than + // `max_entries`, use the `next_token` as a value for the + // `starting_token` field in the next `ListVolumes` request. This + // field is OPTIONAL. + // An empty string is equal to an unspecified field value. + NextToken string `protobuf:"bytes,2,opt,name=next_token,json=nextToken" json:"next_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListVolumesResponse) Reset() { *m = ListVolumesResponse{} } +func (m *ListVolumesResponse) String() string { return proto.CompactTextString(m) } +func (*ListVolumesResponse) ProtoMessage() {} +func (*ListVolumesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{24} +} +func (m *ListVolumesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListVolumesResponse.Unmarshal(m, b) +} +func (m *ListVolumesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListVolumesResponse.Marshal(b, m, deterministic) +} +func (dst *ListVolumesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListVolumesResponse.Merge(dst, src) +} +func (m *ListVolumesResponse) XXX_Size() int { + return xxx_messageInfo_ListVolumesResponse.Size(m) +} +func (m *ListVolumesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListVolumesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListVolumesResponse proto.InternalMessageInfo + +func (m *ListVolumesResponse) GetEntries() []*ListVolumesResponse_Entry { + if m != nil { + return m.Entries + } + return nil +} + +func (m *ListVolumesResponse) GetNextToken() string { + if m != nil { + return m.NextToken + } + return "" +} + +type ListVolumesResponse_Entry struct { + Volume *Volume `protobuf:"bytes,1,opt,name=volume" json:"volume,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListVolumesResponse_Entry) Reset() { *m = ListVolumesResponse_Entry{} } +func (m *ListVolumesResponse_Entry) String() string { return proto.CompactTextString(m) } +func (*ListVolumesResponse_Entry) ProtoMessage() {} +func (*ListVolumesResponse_Entry) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{24, 0} +} +func (m *ListVolumesResponse_Entry) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListVolumesResponse_Entry.Unmarshal(m, b) +} +func (m *ListVolumesResponse_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListVolumesResponse_Entry.Marshal(b, m, deterministic) +} +func (dst *ListVolumesResponse_Entry) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListVolumesResponse_Entry.Merge(dst, src) +} +func (m *ListVolumesResponse_Entry) XXX_Size() int { + return xxx_messageInfo_ListVolumesResponse_Entry.Size(m) +} +func (m *ListVolumesResponse_Entry) XXX_DiscardUnknown() { + xxx_messageInfo_ListVolumesResponse_Entry.DiscardUnknown(m) +} + +var xxx_messageInfo_ListVolumesResponse_Entry proto.InternalMessageInfo + +func (m *ListVolumesResponse_Entry) GetVolume() *Volume { + if m != nil { + return m.Volume + } + return nil +} + +type GetCapacityRequest struct { + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes that satisfy ALL of the + // specified `volume_capabilities`. These are the same + // `volume_capabilities` the CO will use in `CreateVolumeRequest`. + // This field is OPTIONAL. + VolumeCapabilities []*VolumeCapability `protobuf:"bytes,1,rep,name=volume_capabilities,json=volumeCapabilities" json:"volume_capabilities,omitempty"` + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes with the given Plugin + // specific `parameters`. These are the same `parameters` the CO will + // use in `CreateVolumeRequest`. This field is OPTIONAL. + Parameters map[string]string `protobuf:"bytes,2,rep,name=parameters" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes that in the specified + // `accessible_topology`. This is the same as the + // `accessible_topology` the CO returns in a `CreateVolumeResponse`. + // This field is OPTIONAL. This field SHALL NOT be set unless the + // plugin advertises the ACCESSIBILITY_CONSTRAINTS capability. + AccessibleTopology *Topology `protobuf:"bytes,3,opt,name=accessible_topology,json=accessibleTopology" json:"accessible_topology,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetCapacityRequest) Reset() { *m = GetCapacityRequest{} } +func (m *GetCapacityRequest) String() string { return proto.CompactTextString(m) } +func (*GetCapacityRequest) ProtoMessage() {} +func (*GetCapacityRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{25} +} +func (m *GetCapacityRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetCapacityRequest.Unmarshal(m, b) +} +func (m *GetCapacityRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetCapacityRequest.Marshal(b, m, deterministic) +} +func (dst *GetCapacityRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetCapacityRequest.Merge(dst, src) +} +func (m *GetCapacityRequest) XXX_Size() int { + return xxx_messageInfo_GetCapacityRequest.Size(m) +} +func (m *GetCapacityRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetCapacityRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetCapacityRequest proto.InternalMessageInfo + +func (m *GetCapacityRequest) GetVolumeCapabilities() []*VolumeCapability { + if m != nil { + return m.VolumeCapabilities + } + return nil +} + +func (m *GetCapacityRequest) GetParameters() map[string]string { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *GetCapacityRequest) GetAccessibleTopology() *Topology { + if m != nil { + return m.AccessibleTopology + } + return nil +} + +type GetCapacityResponse struct { + // The available capacity, in bytes, of the storage that can be used + // to provision volumes. If `volume_capabilities` or `parameters` is + // specified in the request, the Plugin SHALL take those into + // consideration when calculating the available capacity of the + // storage. This field is REQUIRED. + // The value of this field MUST NOT be negative. + AvailableCapacity int64 `protobuf:"varint,1,opt,name=available_capacity,json=availableCapacity" json:"available_capacity,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetCapacityResponse) Reset() { *m = GetCapacityResponse{} } +func (m *GetCapacityResponse) String() string { return proto.CompactTextString(m) } +func (*GetCapacityResponse) ProtoMessage() {} +func (*GetCapacityResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{26} +} +func (m *GetCapacityResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetCapacityResponse.Unmarshal(m, b) +} +func (m *GetCapacityResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetCapacityResponse.Marshal(b, m, deterministic) +} +func (dst *GetCapacityResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetCapacityResponse.Merge(dst, src) +} +func (m *GetCapacityResponse) XXX_Size() int { + return xxx_messageInfo_GetCapacityResponse.Size(m) +} +func (m *GetCapacityResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetCapacityResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetCapacityResponse proto.InternalMessageInfo + +func (m *GetCapacityResponse) GetAvailableCapacity() int64 { + if m != nil { + return m.AvailableCapacity + } + return 0 +} + +type ControllerGetCapabilitiesRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerGetCapabilitiesRequest) Reset() { *m = ControllerGetCapabilitiesRequest{} } +func (m *ControllerGetCapabilitiesRequest) String() string { return proto.CompactTextString(m) } +func (*ControllerGetCapabilitiesRequest) ProtoMessage() {} +func (*ControllerGetCapabilitiesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{27} +} +func (m *ControllerGetCapabilitiesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerGetCapabilitiesRequest.Unmarshal(m, b) +} +func (m *ControllerGetCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerGetCapabilitiesRequest.Marshal(b, m, deterministic) +} +func (dst *ControllerGetCapabilitiesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerGetCapabilitiesRequest.Merge(dst, src) +} +func (m *ControllerGetCapabilitiesRequest) XXX_Size() int { + return xxx_messageInfo_ControllerGetCapabilitiesRequest.Size(m) +} +func (m *ControllerGetCapabilitiesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerGetCapabilitiesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerGetCapabilitiesRequest proto.InternalMessageInfo + +type ControllerGetCapabilitiesResponse struct { + // All the capabilities that the controller service supports. This + // field is OPTIONAL. + Capabilities []*ControllerServiceCapability `protobuf:"bytes,2,rep,name=capabilities" json:"capabilities,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerGetCapabilitiesResponse) Reset() { *m = ControllerGetCapabilitiesResponse{} } +func (m *ControllerGetCapabilitiesResponse) String() string { return proto.CompactTextString(m) } +func (*ControllerGetCapabilitiesResponse) ProtoMessage() {} +func (*ControllerGetCapabilitiesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{28} +} +func (m *ControllerGetCapabilitiesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerGetCapabilitiesResponse.Unmarshal(m, b) +} +func (m *ControllerGetCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerGetCapabilitiesResponse.Marshal(b, m, deterministic) +} +func (dst *ControllerGetCapabilitiesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerGetCapabilitiesResponse.Merge(dst, src) +} +func (m *ControllerGetCapabilitiesResponse) XXX_Size() int { + return xxx_messageInfo_ControllerGetCapabilitiesResponse.Size(m) +} +func (m *ControllerGetCapabilitiesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerGetCapabilitiesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerGetCapabilitiesResponse proto.InternalMessageInfo + +func (m *ControllerGetCapabilitiesResponse) GetCapabilities() []*ControllerServiceCapability { + if m != nil { + return m.Capabilities + } + return nil +} + +// Specifies a capability of the controller service. +type ControllerServiceCapability struct { + // Types that are valid to be assigned to Type: + // *ControllerServiceCapability_Rpc + Type isControllerServiceCapability_Type `protobuf_oneof:"type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerServiceCapability) Reset() { *m = ControllerServiceCapability{} } +func (m *ControllerServiceCapability) String() string { return proto.CompactTextString(m) } +func (*ControllerServiceCapability) ProtoMessage() {} +func (*ControllerServiceCapability) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{29} +} +func (m *ControllerServiceCapability) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerServiceCapability.Unmarshal(m, b) +} +func (m *ControllerServiceCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerServiceCapability.Marshal(b, m, deterministic) +} +func (dst *ControllerServiceCapability) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerServiceCapability.Merge(dst, src) +} +func (m *ControllerServiceCapability) XXX_Size() int { + return xxx_messageInfo_ControllerServiceCapability.Size(m) +} +func (m *ControllerServiceCapability) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerServiceCapability.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerServiceCapability proto.InternalMessageInfo + +type isControllerServiceCapability_Type interface { + isControllerServiceCapability_Type() +} + +type ControllerServiceCapability_Rpc struct { + Rpc *ControllerServiceCapability_RPC `protobuf:"bytes,1,opt,name=rpc,oneof"` +} + +func (*ControllerServiceCapability_Rpc) isControllerServiceCapability_Type() {} + +func (m *ControllerServiceCapability) GetType() isControllerServiceCapability_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *ControllerServiceCapability) GetRpc() *ControllerServiceCapability_RPC { + if x, ok := m.GetType().(*ControllerServiceCapability_Rpc); ok { + return x.Rpc + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ControllerServiceCapability) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ControllerServiceCapability_OneofMarshaler, _ControllerServiceCapability_OneofUnmarshaler, _ControllerServiceCapability_OneofSizer, []interface{}{ + (*ControllerServiceCapability_Rpc)(nil), + } +} + +func _ControllerServiceCapability_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ControllerServiceCapability) + // type + switch x := m.Type.(type) { + case *ControllerServiceCapability_Rpc: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Rpc); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ControllerServiceCapability.Type has unexpected type %T", x) + } + return nil +} + +func _ControllerServiceCapability_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ControllerServiceCapability) + switch tag { + case 1: // type.rpc + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ControllerServiceCapability_RPC) + err := b.DecodeMessage(msg) + m.Type = &ControllerServiceCapability_Rpc{msg} + return true, err + default: + return false, nil + } +} + +func _ControllerServiceCapability_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ControllerServiceCapability) + // type + switch x := m.Type.(type) { + case *ControllerServiceCapability_Rpc: + s := proto.Size(x.Rpc) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type ControllerServiceCapability_RPC struct { + Type ControllerServiceCapability_RPC_Type `protobuf:"varint,1,opt,name=type,enum=csi.v0.ControllerServiceCapability_RPC_Type" json:"type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerServiceCapability_RPC) Reset() { *m = ControllerServiceCapability_RPC{} } +func (m *ControllerServiceCapability_RPC) String() string { return proto.CompactTextString(m) } +func (*ControllerServiceCapability_RPC) ProtoMessage() {} +func (*ControllerServiceCapability_RPC) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{29, 0} +} +func (m *ControllerServiceCapability_RPC) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerServiceCapability_RPC.Unmarshal(m, b) +} +func (m *ControllerServiceCapability_RPC) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerServiceCapability_RPC.Marshal(b, m, deterministic) +} +func (dst *ControllerServiceCapability_RPC) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerServiceCapability_RPC.Merge(dst, src) +} +func (m *ControllerServiceCapability_RPC) XXX_Size() int { + return xxx_messageInfo_ControllerServiceCapability_RPC.Size(m) +} +func (m *ControllerServiceCapability_RPC) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerServiceCapability_RPC.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerServiceCapability_RPC proto.InternalMessageInfo + +func (m *ControllerServiceCapability_RPC) GetType() ControllerServiceCapability_RPC_Type { + if m != nil { + return m.Type + } + return ControllerServiceCapability_RPC_UNKNOWN +} + +type CreateSnapshotRequest struct { + // The ID of the source volume to be snapshotted. + // This field is REQUIRED. + SourceVolumeId string `protobuf:"bytes,1,opt,name=source_volume_id,json=sourceVolumeId" json:"source_volume_id,omitempty"` + // The suggested name for the snapshot. This field is REQUIRED for + // idempotency. + Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + // Secrets required by plugin to complete snapshot creation request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + CreateSnapshotSecrets map[string]string `protobuf:"bytes,3,rep,name=create_snapshot_secrets,json=createSnapshotSecrets" json:"create_snapshot_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Plugin specific parameters passed in as opaque key-value pairs. + // This field is OPTIONAL. The Plugin is responsible for parsing and + // validating these parameters. COs will treat these as opaque. + // Use cases for opaque parameters: + // - Specify a policy to automatically clean up the snapshot. + // - Specify an expiration date for the snapshot. + // - Specify whether the snapshot is readonly or read/write. + // - Specify if the snapshot should be replicated to some place. + // - Specify primary or secondary for replication systems that + // support snapshotting only on primary. + Parameters map[string]string `protobuf:"bytes,4,rep,name=parameters" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateSnapshotRequest) Reset() { *m = CreateSnapshotRequest{} } +func (m *CreateSnapshotRequest) String() string { return proto.CompactTextString(m) } +func (*CreateSnapshotRequest) ProtoMessage() {} +func (*CreateSnapshotRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{30} +} +func (m *CreateSnapshotRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateSnapshotRequest.Unmarshal(m, b) +} +func (m *CreateSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateSnapshotRequest.Marshal(b, m, deterministic) +} +func (dst *CreateSnapshotRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateSnapshotRequest.Merge(dst, src) +} +func (m *CreateSnapshotRequest) XXX_Size() int { + return xxx_messageInfo_CreateSnapshotRequest.Size(m) +} +func (m *CreateSnapshotRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateSnapshotRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateSnapshotRequest proto.InternalMessageInfo + +func (m *CreateSnapshotRequest) GetSourceVolumeId() string { + if m != nil { + return m.SourceVolumeId + } + return "" +} + +func (m *CreateSnapshotRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateSnapshotRequest) GetCreateSnapshotSecrets() map[string]string { + if m != nil { + return m.CreateSnapshotSecrets + } + return nil +} + +func (m *CreateSnapshotRequest) GetParameters() map[string]string { + if m != nil { + return m.Parameters + } + return nil +} + +type CreateSnapshotResponse struct { + // Contains all attributes of the newly created snapshot that are + // relevant to the CO along with information required by the Plugin + // to uniquely identify the snapshot. This field is REQUIRED. + Snapshot *Snapshot `protobuf:"bytes,1,opt,name=snapshot" json:"snapshot,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateSnapshotResponse) Reset() { *m = CreateSnapshotResponse{} } +func (m *CreateSnapshotResponse) String() string { return proto.CompactTextString(m) } +func (*CreateSnapshotResponse) ProtoMessage() {} +func (*CreateSnapshotResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{31} +} +func (m *CreateSnapshotResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateSnapshotResponse.Unmarshal(m, b) +} +func (m *CreateSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateSnapshotResponse.Marshal(b, m, deterministic) +} +func (dst *CreateSnapshotResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateSnapshotResponse.Merge(dst, src) +} +func (m *CreateSnapshotResponse) XXX_Size() int { + return xxx_messageInfo_CreateSnapshotResponse.Size(m) +} +func (m *CreateSnapshotResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CreateSnapshotResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateSnapshotResponse proto.InternalMessageInfo + +func (m *CreateSnapshotResponse) GetSnapshot() *Snapshot { + if m != nil { + return m.Snapshot + } + return nil +} + +// The information about a provisioned snapshot. +type Snapshot struct { + // This is the complete size of the snapshot in bytes. The purpose of + // this field is to give CO guidance on how much space is needed to + // create a volume from this snapshot. The size of the volume MUST NOT + // be less than the size of the source snapshot. This field is + // OPTIONAL. If this field is not set, it indicates that this size is + // unknown. The value of this field MUST NOT be negative and a size of + // zero means it is unspecified. + SizeBytes int64 `protobuf:"varint,1,opt,name=size_bytes,json=sizeBytes" json:"size_bytes,omitempty"` + // Uniquely identifies a snapshot and is generated by the plugin. It + // will not change over time. This field is REQUIRED. The identity + // information will be used by the CO in subsequent calls to refer to + // the provisioned snapshot. + Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"` + // Identity information for the source volume. Note that creating a + // snapshot from a snapshot is not supported here so the source has to + // be a volume. This field is REQUIRED. + SourceVolumeId string `protobuf:"bytes,3,opt,name=source_volume_id,json=sourceVolumeId" json:"source_volume_id,omitempty"` + // Timestamp when the point-in-time snapshot is taken on the storage + // system. The format of this field should be a Unix nanoseconds time + // encoded as an int64. On Unix, the command `date +%s%N` returns the + // current time in nanoseconds since 1970-01-01 00:00:00 UTC. This + // field is REQUIRED. + CreatedAt int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt" json:"created_at,omitempty"` + // The status of a snapshot. + Status *SnapshotStatus `protobuf:"bytes,5,opt,name=status" json:"status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (m *Snapshot) String() string { return proto.CompactTextString(m) } +func (*Snapshot) ProtoMessage() {} +func (*Snapshot) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{32} +} +func (m *Snapshot) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Snapshot.Unmarshal(m, b) +} +func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic) +} +func (dst *Snapshot) XXX_Merge(src proto.Message) { + xxx_messageInfo_Snapshot.Merge(dst, src) +} +func (m *Snapshot) XXX_Size() int { + return xxx_messageInfo_Snapshot.Size(m) +} +func (m *Snapshot) XXX_DiscardUnknown() { + xxx_messageInfo_Snapshot.DiscardUnknown(m) +} + +var xxx_messageInfo_Snapshot proto.InternalMessageInfo + +func (m *Snapshot) GetSizeBytes() int64 { + if m != nil { + return m.SizeBytes + } + return 0 +} + +func (m *Snapshot) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Snapshot) GetSourceVolumeId() string { + if m != nil { + return m.SourceVolumeId + } + return "" +} + +func (m *Snapshot) GetCreatedAt() int64 { + if m != nil { + return m.CreatedAt + } + return 0 +} + +func (m *Snapshot) GetStatus() *SnapshotStatus { + if m != nil { + return m.Status + } + return nil +} + +// The status of a snapshot. +type SnapshotStatus struct { + // This field is REQUIRED. + Type SnapshotStatus_Type `protobuf:"varint,1,opt,name=type,enum=csi.v0.SnapshotStatus_Type" json:"type,omitempty"` + // Additional information to describe why a snapshot ended up in the + // `ERROR_UPLOADING` status. This field is OPTIONAL. + Details string `protobuf:"bytes,2,opt,name=details" json:"details,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SnapshotStatus) Reset() { *m = SnapshotStatus{} } +func (m *SnapshotStatus) String() string { return proto.CompactTextString(m) } +func (*SnapshotStatus) ProtoMessage() {} +func (*SnapshotStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{33} +} +func (m *SnapshotStatus) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SnapshotStatus.Unmarshal(m, b) +} +func (m *SnapshotStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SnapshotStatus.Marshal(b, m, deterministic) +} +func (dst *SnapshotStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_SnapshotStatus.Merge(dst, src) +} +func (m *SnapshotStatus) XXX_Size() int { + return xxx_messageInfo_SnapshotStatus.Size(m) +} +func (m *SnapshotStatus) XXX_DiscardUnknown() { + xxx_messageInfo_SnapshotStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_SnapshotStatus proto.InternalMessageInfo + +func (m *SnapshotStatus) GetType() SnapshotStatus_Type { + if m != nil { + return m.Type + } + return SnapshotStatus_UNKNOWN +} + +func (m *SnapshotStatus) GetDetails() string { + if m != nil { + return m.Details + } + return "" +} + +type DeleteSnapshotRequest struct { + // The ID of the snapshot to be deleted. + // This field is REQUIRED. + SnapshotId string `protobuf:"bytes,1,opt,name=snapshot_id,json=snapshotId" json:"snapshot_id,omitempty"` + // Secrets required by plugin to complete snapshot deletion request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + DeleteSnapshotSecrets map[string]string `protobuf:"bytes,2,rep,name=delete_snapshot_secrets,json=deleteSnapshotSecrets" json:"delete_snapshot_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteSnapshotRequest) Reset() { *m = DeleteSnapshotRequest{} } +func (m *DeleteSnapshotRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteSnapshotRequest) ProtoMessage() {} +func (*DeleteSnapshotRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{34} +} +func (m *DeleteSnapshotRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteSnapshotRequest.Unmarshal(m, b) +} +func (m *DeleteSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteSnapshotRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteSnapshotRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteSnapshotRequest.Merge(dst, src) +} +func (m *DeleteSnapshotRequest) XXX_Size() int { + return xxx_messageInfo_DeleteSnapshotRequest.Size(m) +} +func (m *DeleteSnapshotRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteSnapshotRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteSnapshotRequest proto.InternalMessageInfo + +func (m *DeleteSnapshotRequest) GetSnapshotId() string { + if m != nil { + return m.SnapshotId + } + return "" +} + +func (m *DeleteSnapshotRequest) GetDeleteSnapshotSecrets() map[string]string { + if m != nil { + return m.DeleteSnapshotSecrets + } + return nil +} + +type DeleteSnapshotResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteSnapshotResponse) Reset() { *m = DeleteSnapshotResponse{} } +func (m *DeleteSnapshotResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteSnapshotResponse) ProtoMessage() {} +func (*DeleteSnapshotResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{35} +} +func (m *DeleteSnapshotResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteSnapshotResponse.Unmarshal(m, b) +} +func (m *DeleteSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteSnapshotResponse.Marshal(b, m, deterministic) +} +func (dst *DeleteSnapshotResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteSnapshotResponse.Merge(dst, src) +} +func (m *DeleteSnapshotResponse) XXX_Size() int { + return xxx_messageInfo_DeleteSnapshotResponse.Size(m) +} +func (m *DeleteSnapshotResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteSnapshotResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteSnapshotResponse proto.InternalMessageInfo + +// List all snapshots on the storage system regardless of how they were +// created. +type ListSnapshotsRequest struct { + // If specified (non-zero value), the Plugin MUST NOT return more + // entries than this number in the response. If the actual number of + // entries is more than this number, the Plugin MUST set `next_token` + // in the response which can be used to get the next page of entries + // in the subsequent `ListSnapshots` call. This field is OPTIONAL. If + // not specified (zero value), it means there is no restriction on the + // number of entries that can be returned. + // The value of this field MUST NOT be negative. + MaxEntries int32 `protobuf:"varint,1,opt,name=max_entries,json=maxEntries" json:"max_entries,omitempty"` + // A token to specify where to start paginating. Set this field to + // `next_token` returned by a previous `ListSnapshots` call to get the + // next page of entries. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + StartingToken string `protobuf:"bytes,2,opt,name=starting_token,json=startingToken" json:"starting_token,omitempty"` + // Identity information for the source volume. This field is OPTIONAL. + // It can be used to list snapshots by volume. + SourceVolumeId string `protobuf:"bytes,3,opt,name=source_volume_id,json=sourceVolumeId" json:"source_volume_id,omitempty"` + // Identity information for a specific snapshot. This field is + // OPTIONAL. It can be used to list only a specific snapshot. + // ListSnapshots will return with current snapshot information + // and will not block if the snapshot is being uploaded. + SnapshotId string `protobuf:"bytes,4,opt,name=snapshot_id,json=snapshotId" json:"snapshot_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListSnapshotsRequest) Reset() { *m = ListSnapshotsRequest{} } +func (m *ListSnapshotsRequest) String() string { return proto.CompactTextString(m) } +func (*ListSnapshotsRequest) ProtoMessage() {} +func (*ListSnapshotsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{36} +} +func (m *ListSnapshotsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListSnapshotsRequest.Unmarshal(m, b) +} +func (m *ListSnapshotsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListSnapshotsRequest.Marshal(b, m, deterministic) +} +func (dst *ListSnapshotsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListSnapshotsRequest.Merge(dst, src) +} +func (m *ListSnapshotsRequest) XXX_Size() int { + return xxx_messageInfo_ListSnapshotsRequest.Size(m) +} +func (m *ListSnapshotsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListSnapshotsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListSnapshotsRequest proto.InternalMessageInfo + +func (m *ListSnapshotsRequest) GetMaxEntries() int32 { + if m != nil { + return m.MaxEntries + } + return 0 +} + +func (m *ListSnapshotsRequest) GetStartingToken() string { + if m != nil { + return m.StartingToken + } + return "" +} + +func (m *ListSnapshotsRequest) GetSourceVolumeId() string { + if m != nil { + return m.SourceVolumeId + } + return "" +} + +func (m *ListSnapshotsRequest) GetSnapshotId() string { + if m != nil { + return m.SnapshotId + } + return "" +} + +type ListSnapshotsResponse struct { + Entries []*ListSnapshotsResponse_Entry `protobuf:"bytes,1,rep,name=entries" json:"entries,omitempty"` + // This token allows you to get the next page of entries for + // `ListSnapshots` request. If the number of entries is larger than + // `max_entries`, use the `next_token` as a value for the + // `starting_token` field in the next `ListSnapshots` request. This + // field is OPTIONAL. + // An empty string is equal to an unspecified field value. + NextToken string `protobuf:"bytes,2,opt,name=next_token,json=nextToken" json:"next_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListSnapshotsResponse) Reset() { *m = ListSnapshotsResponse{} } +func (m *ListSnapshotsResponse) String() string { return proto.CompactTextString(m) } +func (*ListSnapshotsResponse) ProtoMessage() {} +func (*ListSnapshotsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{37} +} +func (m *ListSnapshotsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListSnapshotsResponse.Unmarshal(m, b) +} +func (m *ListSnapshotsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListSnapshotsResponse.Marshal(b, m, deterministic) +} +func (dst *ListSnapshotsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListSnapshotsResponse.Merge(dst, src) +} +func (m *ListSnapshotsResponse) XXX_Size() int { + return xxx_messageInfo_ListSnapshotsResponse.Size(m) +} +func (m *ListSnapshotsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListSnapshotsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListSnapshotsResponse proto.InternalMessageInfo + +func (m *ListSnapshotsResponse) GetEntries() []*ListSnapshotsResponse_Entry { + if m != nil { + return m.Entries + } + return nil +} + +func (m *ListSnapshotsResponse) GetNextToken() string { + if m != nil { + return m.NextToken + } + return "" +} + +type ListSnapshotsResponse_Entry struct { + Snapshot *Snapshot `protobuf:"bytes,1,opt,name=snapshot" json:"snapshot,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListSnapshotsResponse_Entry) Reset() { *m = ListSnapshotsResponse_Entry{} } +func (m *ListSnapshotsResponse_Entry) String() string { return proto.CompactTextString(m) } +func (*ListSnapshotsResponse_Entry) ProtoMessage() {} +func (*ListSnapshotsResponse_Entry) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{37, 0} +} +func (m *ListSnapshotsResponse_Entry) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListSnapshotsResponse_Entry.Unmarshal(m, b) +} +func (m *ListSnapshotsResponse_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListSnapshotsResponse_Entry.Marshal(b, m, deterministic) +} +func (dst *ListSnapshotsResponse_Entry) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListSnapshotsResponse_Entry.Merge(dst, src) +} +func (m *ListSnapshotsResponse_Entry) XXX_Size() int { + return xxx_messageInfo_ListSnapshotsResponse_Entry.Size(m) +} +func (m *ListSnapshotsResponse_Entry) XXX_DiscardUnknown() { + xxx_messageInfo_ListSnapshotsResponse_Entry.DiscardUnknown(m) +} + +var xxx_messageInfo_ListSnapshotsResponse_Entry proto.InternalMessageInfo + +func (m *ListSnapshotsResponse_Entry) GetSnapshot() *Snapshot { + if m != nil { + return m.Snapshot + } + return nil +} + +type NodeStageVolumeRequest struct { + // The ID of the volume to publish. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + // The CO SHALL set this field to the value returned by + // `ControllerPublishVolume` if the corresponding Controller Plugin + // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be + // left unset if the corresponding Controller Plugin does not have + // this capability. This is an OPTIONAL field. + PublishInfo map[string]string `protobuf:"bytes,2,rep,name=publish_info,json=publishInfo" json:"publish_info,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // The path to which the volume will be published. It MUST be an + // absolute path in the root filesystem of the process serving this + // request. The CO SHALL ensure that there is only one + // staging_target_path per volume. + // This is a REQUIRED field. + StagingTargetPath string `protobuf:"bytes,3,opt,name=staging_target_path,json=stagingTargetPath" json:"staging_target_path,omitempty"` + // The capability of the volume the CO expects the volume to have. + // This is a REQUIRED field. + VolumeCapability *VolumeCapability `protobuf:"bytes,4,opt,name=volume_capability,json=volumeCapability" json:"volume_capability,omitempty"` + // Secrets required by plugin to complete node stage volume request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + NodeStageSecrets map[string]string `protobuf:"bytes,5,rep,name=node_stage_secrets,json=nodeStageSecrets" json:"node_stage_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Attributes of the volume to publish. This field is OPTIONAL and + // MUST match the attributes of the `Volume` identified by + // `volume_id`. + VolumeAttributes map[string]string `protobuf:"bytes,6,rep,name=volume_attributes,json=volumeAttributes" json:"volume_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeStageVolumeRequest) Reset() { *m = NodeStageVolumeRequest{} } +func (m *NodeStageVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*NodeStageVolumeRequest) ProtoMessage() {} +func (*NodeStageVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{38} +} +func (m *NodeStageVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeStageVolumeRequest.Unmarshal(m, b) +} +func (m *NodeStageVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeStageVolumeRequest.Marshal(b, m, deterministic) +} +func (dst *NodeStageVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeStageVolumeRequest.Merge(dst, src) +} +func (m *NodeStageVolumeRequest) XXX_Size() int { + return xxx_messageInfo_NodeStageVolumeRequest.Size(m) +} +func (m *NodeStageVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodeStageVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeStageVolumeRequest proto.InternalMessageInfo + +func (m *NodeStageVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *NodeStageVolumeRequest) GetPublishInfo() map[string]string { + if m != nil { + return m.PublishInfo + } + return nil +} + +func (m *NodeStageVolumeRequest) GetStagingTargetPath() string { + if m != nil { + return m.StagingTargetPath + } + return "" +} + +func (m *NodeStageVolumeRequest) GetVolumeCapability() *VolumeCapability { + if m != nil { + return m.VolumeCapability + } + return nil +} + +func (m *NodeStageVolumeRequest) GetNodeStageSecrets() map[string]string { + if m != nil { + return m.NodeStageSecrets + } + return nil +} + +func (m *NodeStageVolumeRequest) GetVolumeAttributes() map[string]string { + if m != nil { + return m.VolumeAttributes + } + return nil +} + +type NodeStageVolumeResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeStageVolumeResponse) Reset() { *m = NodeStageVolumeResponse{} } +func (m *NodeStageVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*NodeStageVolumeResponse) ProtoMessage() {} +func (*NodeStageVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{39} +} +func (m *NodeStageVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeStageVolumeResponse.Unmarshal(m, b) +} +func (m *NodeStageVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeStageVolumeResponse.Marshal(b, m, deterministic) +} +func (dst *NodeStageVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeStageVolumeResponse.Merge(dst, src) +} +func (m *NodeStageVolumeResponse) XXX_Size() int { + return xxx_messageInfo_NodeStageVolumeResponse.Size(m) +} +func (m *NodeStageVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeStageVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeStageVolumeResponse proto.InternalMessageInfo + +type NodeUnstageVolumeRequest struct { + // The ID of the volume. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + // The path at which the volume was published. It MUST be an absolute + // path in the root filesystem of the process serving this request. + // This is a REQUIRED field. + StagingTargetPath string `protobuf:"bytes,2,opt,name=staging_target_path,json=stagingTargetPath" json:"staging_target_path,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeUnstageVolumeRequest) Reset() { *m = NodeUnstageVolumeRequest{} } +func (m *NodeUnstageVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*NodeUnstageVolumeRequest) ProtoMessage() {} +func (*NodeUnstageVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{40} +} +func (m *NodeUnstageVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeUnstageVolumeRequest.Unmarshal(m, b) +} +func (m *NodeUnstageVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeUnstageVolumeRequest.Marshal(b, m, deterministic) +} +func (dst *NodeUnstageVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeUnstageVolumeRequest.Merge(dst, src) +} +func (m *NodeUnstageVolumeRequest) XXX_Size() int { + return xxx_messageInfo_NodeUnstageVolumeRequest.Size(m) +} +func (m *NodeUnstageVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodeUnstageVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeUnstageVolumeRequest proto.InternalMessageInfo + +func (m *NodeUnstageVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *NodeUnstageVolumeRequest) GetStagingTargetPath() string { + if m != nil { + return m.StagingTargetPath + } + return "" +} + +type NodeUnstageVolumeResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeUnstageVolumeResponse) Reset() { *m = NodeUnstageVolumeResponse{} } +func (m *NodeUnstageVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*NodeUnstageVolumeResponse) ProtoMessage() {} +func (*NodeUnstageVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{41} +} +func (m *NodeUnstageVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeUnstageVolumeResponse.Unmarshal(m, b) +} +func (m *NodeUnstageVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeUnstageVolumeResponse.Marshal(b, m, deterministic) +} +func (dst *NodeUnstageVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeUnstageVolumeResponse.Merge(dst, src) +} +func (m *NodeUnstageVolumeResponse) XXX_Size() int { + return xxx_messageInfo_NodeUnstageVolumeResponse.Size(m) +} +func (m *NodeUnstageVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeUnstageVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeUnstageVolumeResponse proto.InternalMessageInfo + +type NodePublishVolumeRequest struct { + // The ID of the volume to publish. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + // The CO SHALL set this field to the value returned by + // `ControllerPublishVolume` if the corresponding Controller Plugin + // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be + // left unset if the corresponding Controller Plugin does not have + // this capability. This is an OPTIONAL field. + PublishInfo map[string]string `protobuf:"bytes,2,rep,name=publish_info,json=publishInfo" json:"publish_info,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // The path to which the device was mounted by `NodeStageVolume`. + // It MUST be an absolute path in the root filesystem of the process + // serving this request. + // It MUST be set if the Node Plugin implements the + // `STAGE_UNSTAGE_VOLUME` node capability. + // This is an OPTIONAL field. + StagingTargetPath string `protobuf:"bytes,3,opt,name=staging_target_path,json=stagingTargetPath" json:"staging_target_path,omitempty"` + // The path to which the volume will be published. It MUST be an + // absolute path in the root filesystem of the process serving this + // request. The CO SHALL ensure uniqueness of target_path per volume. + // The CO SHALL ensure that the path exists, and that the process + // serving the request has `read` and `write` permissions to the path. + // This is a REQUIRED field. + TargetPath string `protobuf:"bytes,4,opt,name=target_path,json=targetPath" json:"target_path,omitempty"` + // The capability of the volume the CO expects the volume to have. + // This is a REQUIRED field. + VolumeCapability *VolumeCapability `protobuf:"bytes,5,opt,name=volume_capability,json=volumeCapability" json:"volume_capability,omitempty"` + // Whether to publish the volume in readonly mode. This field is + // REQUIRED. + Readonly bool `protobuf:"varint,6,opt,name=readonly" json:"readonly,omitempty"` + // Secrets required by plugin to complete node publish volume request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + NodePublishSecrets map[string]string `protobuf:"bytes,7,rep,name=node_publish_secrets,json=nodePublishSecrets" json:"node_publish_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Attributes of the volume to publish. This field is OPTIONAL and + // MUST match the attributes of the Volume identified by + // `volume_id`. + VolumeAttributes map[string]string `protobuf:"bytes,8,rep,name=volume_attributes,json=volumeAttributes" json:"volume_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodePublishVolumeRequest) Reset() { *m = NodePublishVolumeRequest{} } +func (m *NodePublishVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*NodePublishVolumeRequest) ProtoMessage() {} +func (*NodePublishVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{42} +} +func (m *NodePublishVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodePublishVolumeRequest.Unmarshal(m, b) +} +func (m *NodePublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodePublishVolumeRequest.Marshal(b, m, deterministic) +} +func (dst *NodePublishVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodePublishVolumeRequest.Merge(dst, src) +} +func (m *NodePublishVolumeRequest) XXX_Size() int { + return xxx_messageInfo_NodePublishVolumeRequest.Size(m) +} +func (m *NodePublishVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodePublishVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NodePublishVolumeRequest proto.InternalMessageInfo + +func (m *NodePublishVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *NodePublishVolumeRequest) GetPublishInfo() map[string]string { + if m != nil { + return m.PublishInfo + } + return nil +} + +func (m *NodePublishVolumeRequest) GetStagingTargetPath() string { + if m != nil { + return m.StagingTargetPath + } + return "" +} + +func (m *NodePublishVolumeRequest) GetTargetPath() string { + if m != nil { + return m.TargetPath + } + return "" +} + +func (m *NodePublishVolumeRequest) GetVolumeCapability() *VolumeCapability { + if m != nil { + return m.VolumeCapability + } + return nil +} + +func (m *NodePublishVolumeRequest) GetReadonly() bool { + if m != nil { + return m.Readonly + } + return false +} + +func (m *NodePublishVolumeRequest) GetNodePublishSecrets() map[string]string { + if m != nil { + return m.NodePublishSecrets + } + return nil +} + +func (m *NodePublishVolumeRequest) GetVolumeAttributes() map[string]string { + if m != nil { + return m.VolumeAttributes + } + return nil +} + +type NodePublishVolumeResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodePublishVolumeResponse) Reset() { *m = NodePublishVolumeResponse{} } +func (m *NodePublishVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*NodePublishVolumeResponse) ProtoMessage() {} +func (*NodePublishVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{43} +} +func (m *NodePublishVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodePublishVolumeResponse.Unmarshal(m, b) +} +func (m *NodePublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodePublishVolumeResponse.Marshal(b, m, deterministic) +} +func (dst *NodePublishVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodePublishVolumeResponse.Merge(dst, src) +} +func (m *NodePublishVolumeResponse) XXX_Size() int { + return xxx_messageInfo_NodePublishVolumeResponse.Size(m) +} +func (m *NodePublishVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodePublishVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodePublishVolumeResponse proto.InternalMessageInfo + +type NodeUnpublishVolumeRequest struct { + // The ID of the volume. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + // The path at which the volume was published. It MUST be an absolute + // path in the root filesystem of the process serving this request. + // This is a REQUIRED field. + TargetPath string `protobuf:"bytes,2,opt,name=target_path,json=targetPath" json:"target_path,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeUnpublishVolumeRequest) Reset() { *m = NodeUnpublishVolumeRequest{} } +func (m *NodeUnpublishVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*NodeUnpublishVolumeRequest) ProtoMessage() {} +func (*NodeUnpublishVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{44} +} +func (m *NodeUnpublishVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeUnpublishVolumeRequest.Unmarshal(m, b) +} +func (m *NodeUnpublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeUnpublishVolumeRequest.Marshal(b, m, deterministic) +} +func (dst *NodeUnpublishVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeUnpublishVolumeRequest.Merge(dst, src) +} +func (m *NodeUnpublishVolumeRequest) XXX_Size() int { + return xxx_messageInfo_NodeUnpublishVolumeRequest.Size(m) +} +func (m *NodeUnpublishVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodeUnpublishVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeUnpublishVolumeRequest proto.InternalMessageInfo + +func (m *NodeUnpublishVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *NodeUnpublishVolumeRequest) GetTargetPath() string { + if m != nil { + return m.TargetPath + } + return "" +} + +type NodeUnpublishVolumeResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeUnpublishVolumeResponse) Reset() { *m = NodeUnpublishVolumeResponse{} } +func (m *NodeUnpublishVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*NodeUnpublishVolumeResponse) ProtoMessage() {} +func (*NodeUnpublishVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{45} +} +func (m *NodeUnpublishVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeUnpublishVolumeResponse.Unmarshal(m, b) +} +func (m *NodeUnpublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeUnpublishVolumeResponse.Marshal(b, m, deterministic) +} +func (dst *NodeUnpublishVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeUnpublishVolumeResponse.Merge(dst, src) +} +func (m *NodeUnpublishVolumeResponse) XXX_Size() int { + return xxx_messageInfo_NodeUnpublishVolumeResponse.Size(m) +} +func (m *NodeUnpublishVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeUnpublishVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeUnpublishVolumeResponse proto.InternalMessageInfo + +type NodeGetIdRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeGetIdRequest) Reset() { *m = NodeGetIdRequest{} } +func (m *NodeGetIdRequest) String() string { return proto.CompactTextString(m) } +func (*NodeGetIdRequest) ProtoMessage() {} +func (*NodeGetIdRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{46} +} +func (m *NodeGetIdRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeGetIdRequest.Unmarshal(m, b) +} +func (m *NodeGetIdRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeGetIdRequest.Marshal(b, m, deterministic) +} +func (dst *NodeGetIdRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeGetIdRequest.Merge(dst, src) +} +func (m *NodeGetIdRequest) XXX_Size() int { + return xxx_messageInfo_NodeGetIdRequest.Size(m) +} +func (m *NodeGetIdRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodeGetIdRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeGetIdRequest proto.InternalMessageInfo + +type NodeGetIdResponse struct { + // The ID of the node as understood by the SP which SHALL be used by + // CO in subsequent `ControllerPublishVolume`. + // This is a REQUIRED field. + NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId" json:"node_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeGetIdResponse) Reset() { *m = NodeGetIdResponse{} } +func (m *NodeGetIdResponse) String() string { return proto.CompactTextString(m) } +func (*NodeGetIdResponse) ProtoMessage() {} +func (*NodeGetIdResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{47} +} +func (m *NodeGetIdResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeGetIdResponse.Unmarshal(m, b) +} +func (m *NodeGetIdResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeGetIdResponse.Marshal(b, m, deterministic) +} +func (dst *NodeGetIdResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeGetIdResponse.Merge(dst, src) +} +func (m *NodeGetIdResponse) XXX_Size() int { + return xxx_messageInfo_NodeGetIdResponse.Size(m) +} +func (m *NodeGetIdResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeGetIdResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeGetIdResponse proto.InternalMessageInfo + +func (m *NodeGetIdResponse) GetNodeId() string { + if m != nil { + return m.NodeId + } + return "" +} + +type NodeGetCapabilitiesRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeGetCapabilitiesRequest) Reset() { *m = NodeGetCapabilitiesRequest{} } +func (m *NodeGetCapabilitiesRequest) String() string { return proto.CompactTextString(m) } +func (*NodeGetCapabilitiesRequest) ProtoMessage() {} +func (*NodeGetCapabilitiesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{48} +} +func (m *NodeGetCapabilitiesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeGetCapabilitiesRequest.Unmarshal(m, b) +} +func (m *NodeGetCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeGetCapabilitiesRequest.Marshal(b, m, deterministic) +} +func (dst *NodeGetCapabilitiesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeGetCapabilitiesRequest.Merge(dst, src) +} +func (m *NodeGetCapabilitiesRequest) XXX_Size() int { + return xxx_messageInfo_NodeGetCapabilitiesRequest.Size(m) +} +func (m *NodeGetCapabilitiesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodeGetCapabilitiesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeGetCapabilitiesRequest proto.InternalMessageInfo + +type NodeGetCapabilitiesResponse struct { + // All the capabilities that the node service supports. This field + // is OPTIONAL. + Capabilities []*NodeServiceCapability `protobuf:"bytes,1,rep,name=capabilities" json:"capabilities,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeGetCapabilitiesResponse) Reset() { *m = NodeGetCapabilitiesResponse{} } +func (m *NodeGetCapabilitiesResponse) String() string { return proto.CompactTextString(m) } +func (*NodeGetCapabilitiesResponse) ProtoMessage() {} +func (*NodeGetCapabilitiesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{49} +} +func (m *NodeGetCapabilitiesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeGetCapabilitiesResponse.Unmarshal(m, b) +} +func (m *NodeGetCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeGetCapabilitiesResponse.Marshal(b, m, deterministic) +} +func (dst *NodeGetCapabilitiesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeGetCapabilitiesResponse.Merge(dst, src) +} +func (m *NodeGetCapabilitiesResponse) XXX_Size() int { + return xxx_messageInfo_NodeGetCapabilitiesResponse.Size(m) +} +func (m *NodeGetCapabilitiesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeGetCapabilitiesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeGetCapabilitiesResponse proto.InternalMessageInfo + +func (m *NodeGetCapabilitiesResponse) GetCapabilities() []*NodeServiceCapability { + if m != nil { + return m.Capabilities + } + return nil +} + +// Specifies a capability of the node service. +type NodeServiceCapability struct { + // Types that are valid to be assigned to Type: + // *NodeServiceCapability_Rpc + Type isNodeServiceCapability_Type `protobuf_oneof:"type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeServiceCapability) Reset() { *m = NodeServiceCapability{} } +func (m *NodeServiceCapability) String() string { return proto.CompactTextString(m) } +func (*NodeServiceCapability) ProtoMessage() {} +func (*NodeServiceCapability) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{50} +} +func (m *NodeServiceCapability) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeServiceCapability.Unmarshal(m, b) +} +func (m *NodeServiceCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeServiceCapability.Marshal(b, m, deterministic) +} +func (dst *NodeServiceCapability) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeServiceCapability.Merge(dst, src) +} +func (m *NodeServiceCapability) XXX_Size() int { + return xxx_messageInfo_NodeServiceCapability.Size(m) +} +func (m *NodeServiceCapability) XXX_DiscardUnknown() { + xxx_messageInfo_NodeServiceCapability.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeServiceCapability proto.InternalMessageInfo + +type isNodeServiceCapability_Type interface { + isNodeServiceCapability_Type() +} + +type NodeServiceCapability_Rpc struct { + Rpc *NodeServiceCapability_RPC `protobuf:"bytes,1,opt,name=rpc,oneof"` +} + +func (*NodeServiceCapability_Rpc) isNodeServiceCapability_Type() {} + +func (m *NodeServiceCapability) GetType() isNodeServiceCapability_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *NodeServiceCapability) GetRpc() *NodeServiceCapability_RPC { + if x, ok := m.GetType().(*NodeServiceCapability_Rpc); ok { + return x.Rpc + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*NodeServiceCapability) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _NodeServiceCapability_OneofMarshaler, _NodeServiceCapability_OneofUnmarshaler, _NodeServiceCapability_OneofSizer, []interface{}{ + (*NodeServiceCapability_Rpc)(nil), + } +} + +func _NodeServiceCapability_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*NodeServiceCapability) + // type + switch x := m.Type.(type) { + case *NodeServiceCapability_Rpc: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Rpc); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("NodeServiceCapability.Type has unexpected type %T", x) + } + return nil +} + +func _NodeServiceCapability_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*NodeServiceCapability) + switch tag { + case 1: // type.rpc + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(NodeServiceCapability_RPC) + err := b.DecodeMessage(msg) + m.Type = &NodeServiceCapability_Rpc{msg} + return true, err + default: + return false, nil + } +} + +func _NodeServiceCapability_OneofSizer(msg proto.Message) (n int) { + m := msg.(*NodeServiceCapability) + // type + switch x := m.Type.(type) { + case *NodeServiceCapability_Rpc: + s := proto.Size(x.Rpc) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type NodeServiceCapability_RPC struct { + Type NodeServiceCapability_RPC_Type `protobuf:"varint,1,opt,name=type,enum=csi.v0.NodeServiceCapability_RPC_Type" json:"type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeServiceCapability_RPC) Reset() { *m = NodeServiceCapability_RPC{} } +func (m *NodeServiceCapability_RPC) String() string { return proto.CompactTextString(m) } +func (*NodeServiceCapability_RPC) ProtoMessage() {} +func (*NodeServiceCapability_RPC) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{50, 0} +} +func (m *NodeServiceCapability_RPC) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeServiceCapability_RPC.Unmarshal(m, b) +} +func (m *NodeServiceCapability_RPC) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeServiceCapability_RPC.Marshal(b, m, deterministic) +} +func (dst *NodeServiceCapability_RPC) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeServiceCapability_RPC.Merge(dst, src) +} +func (m *NodeServiceCapability_RPC) XXX_Size() int { + return xxx_messageInfo_NodeServiceCapability_RPC.Size(m) +} +func (m *NodeServiceCapability_RPC) XXX_DiscardUnknown() { + xxx_messageInfo_NodeServiceCapability_RPC.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeServiceCapability_RPC proto.InternalMessageInfo + +func (m *NodeServiceCapability_RPC) GetType() NodeServiceCapability_RPC_Type { + if m != nil { + return m.Type + } + return NodeServiceCapability_RPC_UNKNOWN +} + +type NodeGetInfoRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeGetInfoRequest) Reset() { *m = NodeGetInfoRequest{} } +func (m *NodeGetInfoRequest) String() string { return proto.CompactTextString(m) } +func (*NodeGetInfoRequest) ProtoMessage() {} +func (*NodeGetInfoRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{51} +} +func (m *NodeGetInfoRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeGetInfoRequest.Unmarshal(m, b) +} +func (m *NodeGetInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeGetInfoRequest.Marshal(b, m, deterministic) +} +func (dst *NodeGetInfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeGetInfoRequest.Merge(dst, src) +} +func (m *NodeGetInfoRequest) XXX_Size() int { + return xxx_messageInfo_NodeGetInfoRequest.Size(m) +} +func (m *NodeGetInfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodeGetInfoRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeGetInfoRequest proto.InternalMessageInfo + +type NodeGetInfoResponse struct { + // The ID of the node as understood by the SP which SHALL be used by + // CO in subsequent calls to `ControllerPublishVolume`. + // This is a REQUIRED field. + NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId" json:"node_id,omitempty"` + // Maximum number of volumes that controller can publish to the node. + // If value is not set or zero CO SHALL decide how many volumes of + // this type can be published by the controller to the node. The + // plugin MUST NOT set negative values here. + // This field is OPTIONAL. + MaxVolumesPerNode int64 `protobuf:"varint,2,opt,name=max_volumes_per_node,json=maxVolumesPerNode" json:"max_volumes_per_node,omitempty"` + // Specifies where (regions, zones, racks, etc.) the node is + // accessible from. + // A plugin that returns this field MUST also set the + // ACCESSIBILITY_CONSTRAINTS plugin capability. + // COs MAY use this information along with the topology information + // returned in CreateVolumeResponse to ensure that a given volume is + // accessible from a given node when scheduling workloads. + // This field is OPTIONAL. If it is not specified, the CO MAY assume + // the node is not subject to any topological constraint, and MAY + // schedule workloads that reference any volume V, such that there are + // no topological constraints declared for V. + // + // Example 1: + // accessible_topology = + // {"region": "R1", "zone": "R2"} + // Indicates the node exists within the "region" "R1" and the "zone" + // "Z2". + AccessibleTopology *Topology `protobuf:"bytes,3,opt,name=accessible_topology,json=accessibleTopology" json:"accessible_topology,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeGetInfoResponse) Reset() { *m = NodeGetInfoResponse{} } +func (m *NodeGetInfoResponse) String() string { return proto.CompactTextString(m) } +func (*NodeGetInfoResponse) ProtoMessage() {} +func (*NodeGetInfoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{52} +} +func (m *NodeGetInfoResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeGetInfoResponse.Unmarshal(m, b) +} +func (m *NodeGetInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeGetInfoResponse.Marshal(b, m, deterministic) +} +func (dst *NodeGetInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeGetInfoResponse.Merge(dst, src) +} +func (m *NodeGetInfoResponse) XXX_Size() int { + return xxx_messageInfo_NodeGetInfoResponse.Size(m) +} +func (m *NodeGetInfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeGetInfoResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeGetInfoResponse proto.InternalMessageInfo + +func (m *NodeGetInfoResponse) GetNodeId() string { + if m != nil { + return m.NodeId + } + return "" +} + +func (m *NodeGetInfoResponse) GetMaxVolumesPerNode() int64 { + if m != nil { + return m.MaxVolumesPerNode + } + return 0 +} + +func (m *NodeGetInfoResponse) GetAccessibleTopology() *Topology { + if m != nil { + return m.AccessibleTopology + } + return nil +} + +func init() { + proto.RegisterType((*GetPluginInfoRequest)(nil), "csi.v0.GetPluginInfoRequest") + proto.RegisterType((*GetPluginInfoResponse)(nil), "csi.v0.GetPluginInfoResponse") + proto.RegisterMapType((map[string]string)(nil), "csi.v0.GetPluginInfoResponse.ManifestEntry") + proto.RegisterType((*GetPluginCapabilitiesRequest)(nil), "csi.v0.GetPluginCapabilitiesRequest") + proto.RegisterType((*GetPluginCapabilitiesResponse)(nil), "csi.v0.GetPluginCapabilitiesResponse") + proto.RegisterType((*PluginCapability)(nil), "csi.v0.PluginCapability") + proto.RegisterType((*PluginCapability_Service)(nil), "csi.v0.PluginCapability.Service") + proto.RegisterType((*ProbeRequest)(nil), "csi.v0.ProbeRequest") + proto.RegisterType((*ProbeResponse)(nil), "csi.v0.ProbeResponse") + proto.RegisterType((*CreateVolumeRequest)(nil), "csi.v0.CreateVolumeRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v0.CreateVolumeRequest.ControllerCreateSecretsEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v0.CreateVolumeRequest.ParametersEntry") + proto.RegisterType((*VolumeContentSource)(nil), "csi.v0.VolumeContentSource") + proto.RegisterType((*VolumeContentSource_SnapshotSource)(nil), "csi.v0.VolumeContentSource.SnapshotSource") + proto.RegisterType((*CreateVolumeResponse)(nil), "csi.v0.CreateVolumeResponse") + proto.RegisterType((*VolumeCapability)(nil), "csi.v0.VolumeCapability") + proto.RegisterType((*VolumeCapability_BlockVolume)(nil), "csi.v0.VolumeCapability.BlockVolume") + proto.RegisterType((*VolumeCapability_MountVolume)(nil), "csi.v0.VolumeCapability.MountVolume") + proto.RegisterType((*VolumeCapability_AccessMode)(nil), "csi.v0.VolumeCapability.AccessMode") + proto.RegisterType((*CapacityRange)(nil), "csi.v0.CapacityRange") + proto.RegisterType((*Volume)(nil), "csi.v0.Volume") + proto.RegisterMapType((map[string]string)(nil), "csi.v0.Volume.AttributesEntry") + proto.RegisterType((*TopologyRequirement)(nil), "csi.v0.TopologyRequirement") + proto.RegisterType((*Topology)(nil), "csi.v0.Topology") + proto.RegisterMapType((map[string]string)(nil), "csi.v0.Topology.SegmentsEntry") + proto.RegisterType((*DeleteVolumeRequest)(nil), "csi.v0.DeleteVolumeRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v0.DeleteVolumeRequest.ControllerDeleteSecretsEntry") + proto.RegisterType((*DeleteVolumeResponse)(nil), "csi.v0.DeleteVolumeResponse") + proto.RegisterType((*ControllerPublishVolumeRequest)(nil), "csi.v0.ControllerPublishVolumeRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v0.ControllerPublishVolumeRequest.ControllerPublishSecretsEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v0.ControllerPublishVolumeRequest.VolumeAttributesEntry") + proto.RegisterType((*ControllerPublishVolumeResponse)(nil), "csi.v0.ControllerPublishVolumeResponse") + proto.RegisterMapType((map[string]string)(nil), "csi.v0.ControllerPublishVolumeResponse.PublishInfoEntry") + proto.RegisterType((*ControllerUnpublishVolumeRequest)(nil), "csi.v0.ControllerUnpublishVolumeRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v0.ControllerUnpublishVolumeRequest.ControllerUnpublishSecretsEntry") + proto.RegisterType((*ControllerUnpublishVolumeResponse)(nil), "csi.v0.ControllerUnpublishVolumeResponse") + proto.RegisterType((*ValidateVolumeCapabilitiesRequest)(nil), "csi.v0.ValidateVolumeCapabilitiesRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v0.ValidateVolumeCapabilitiesRequest.VolumeAttributesEntry") + proto.RegisterType((*ValidateVolumeCapabilitiesResponse)(nil), "csi.v0.ValidateVolumeCapabilitiesResponse") + proto.RegisterType((*ListVolumesRequest)(nil), "csi.v0.ListVolumesRequest") + proto.RegisterType((*ListVolumesResponse)(nil), "csi.v0.ListVolumesResponse") + proto.RegisterType((*ListVolumesResponse_Entry)(nil), "csi.v0.ListVolumesResponse.Entry") + proto.RegisterType((*GetCapacityRequest)(nil), "csi.v0.GetCapacityRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v0.GetCapacityRequest.ParametersEntry") + proto.RegisterType((*GetCapacityResponse)(nil), "csi.v0.GetCapacityResponse") + proto.RegisterType((*ControllerGetCapabilitiesRequest)(nil), "csi.v0.ControllerGetCapabilitiesRequest") + proto.RegisterType((*ControllerGetCapabilitiesResponse)(nil), "csi.v0.ControllerGetCapabilitiesResponse") + proto.RegisterType((*ControllerServiceCapability)(nil), "csi.v0.ControllerServiceCapability") + proto.RegisterType((*ControllerServiceCapability_RPC)(nil), "csi.v0.ControllerServiceCapability.RPC") + proto.RegisterType((*CreateSnapshotRequest)(nil), "csi.v0.CreateSnapshotRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v0.CreateSnapshotRequest.CreateSnapshotSecretsEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v0.CreateSnapshotRequest.ParametersEntry") + proto.RegisterType((*CreateSnapshotResponse)(nil), "csi.v0.CreateSnapshotResponse") + proto.RegisterType((*Snapshot)(nil), "csi.v0.Snapshot") + proto.RegisterType((*SnapshotStatus)(nil), "csi.v0.SnapshotStatus") + proto.RegisterType((*DeleteSnapshotRequest)(nil), "csi.v0.DeleteSnapshotRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v0.DeleteSnapshotRequest.DeleteSnapshotSecretsEntry") + proto.RegisterType((*DeleteSnapshotResponse)(nil), "csi.v0.DeleteSnapshotResponse") + proto.RegisterType((*ListSnapshotsRequest)(nil), "csi.v0.ListSnapshotsRequest") + proto.RegisterType((*ListSnapshotsResponse)(nil), "csi.v0.ListSnapshotsResponse") + proto.RegisterType((*ListSnapshotsResponse_Entry)(nil), "csi.v0.ListSnapshotsResponse.Entry") + proto.RegisterType((*NodeStageVolumeRequest)(nil), "csi.v0.NodeStageVolumeRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v0.NodeStageVolumeRequest.NodeStageSecretsEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v0.NodeStageVolumeRequest.PublishInfoEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v0.NodeStageVolumeRequest.VolumeAttributesEntry") + proto.RegisterType((*NodeStageVolumeResponse)(nil), "csi.v0.NodeStageVolumeResponse") + proto.RegisterType((*NodeUnstageVolumeRequest)(nil), "csi.v0.NodeUnstageVolumeRequest") + proto.RegisterType((*NodeUnstageVolumeResponse)(nil), "csi.v0.NodeUnstageVolumeResponse") + proto.RegisterType((*NodePublishVolumeRequest)(nil), "csi.v0.NodePublishVolumeRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v0.NodePublishVolumeRequest.NodePublishSecretsEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v0.NodePublishVolumeRequest.PublishInfoEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v0.NodePublishVolumeRequest.VolumeAttributesEntry") + proto.RegisterType((*NodePublishVolumeResponse)(nil), "csi.v0.NodePublishVolumeResponse") + proto.RegisterType((*NodeUnpublishVolumeRequest)(nil), "csi.v0.NodeUnpublishVolumeRequest") + proto.RegisterType((*NodeUnpublishVolumeResponse)(nil), "csi.v0.NodeUnpublishVolumeResponse") + proto.RegisterType((*NodeGetIdRequest)(nil), "csi.v0.NodeGetIdRequest") + proto.RegisterType((*NodeGetIdResponse)(nil), "csi.v0.NodeGetIdResponse") + proto.RegisterType((*NodeGetCapabilitiesRequest)(nil), "csi.v0.NodeGetCapabilitiesRequest") + proto.RegisterType((*NodeGetCapabilitiesResponse)(nil), "csi.v0.NodeGetCapabilitiesResponse") + proto.RegisterType((*NodeServiceCapability)(nil), "csi.v0.NodeServiceCapability") + proto.RegisterType((*NodeServiceCapability_RPC)(nil), "csi.v0.NodeServiceCapability.RPC") + proto.RegisterType((*NodeGetInfoRequest)(nil), "csi.v0.NodeGetInfoRequest") + proto.RegisterType((*NodeGetInfoResponse)(nil), "csi.v0.NodeGetInfoResponse") + proto.RegisterEnum("csi.v0.PluginCapability_Service_Type", PluginCapability_Service_Type_name, PluginCapability_Service_Type_value) + proto.RegisterEnum("csi.v0.VolumeCapability_AccessMode_Mode", VolumeCapability_AccessMode_Mode_name, VolumeCapability_AccessMode_Mode_value) + proto.RegisterEnum("csi.v0.ControllerServiceCapability_RPC_Type", ControllerServiceCapability_RPC_Type_name, ControllerServiceCapability_RPC_Type_value) + proto.RegisterEnum("csi.v0.SnapshotStatus_Type", SnapshotStatus_Type_name, SnapshotStatus_Type_value) + proto.RegisterEnum("csi.v0.NodeServiceCapability_RPC_Type", NodeServiceCapability_RPC_Type_name, NodeServiceCapability_RPC_Type_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Identity service + +type IdentityClient interface { + GetPluginInfo(ctx context.Context, in *GetPluginInfoRequest, opts ...grpc.CallOption) (*GetPluginInfoResponse, error) + GetPluginCapabilities(ctx context.Context, in *GetPluginCapabilitiesRequest, opts ...grpc.CallOption) (*GetPluginCapabilitiesResponse, error) + Probe(ctx context.Context, in *ProbeRequest, opts ...grpc.CallOption) (*ProbeResponse, error) +} + +type identityClient struct { + cc *grpc.ClientConn +} + +func NewIdentityClient(cc *grpc.ClientConn) IdentityClient { + return &identityClient{cc} +} + +func (c *identityClient) GetPluginInfo(ctx context.Context, in *GetPluginInfoRequest, opts ...grpc.CallOption) (*GetPluginInfoResponse, error) { + out := new(GetPluginInfoResponse) + err := grpc.Invoke(ctx, "/csi.v0.Identity/GetPluginInfo", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *identityClient) GetPluginCapabilities(ctx context.Context, in *GetPluginCapabilitiesRequest, opts ...grpc.CallOption) (*GetPluginCapabilitiesResponse, error) { + out := new(GetPluginCapabilitiesResponse) + err := grpc.Invoke(ctx, "/csi.v0.Identity/GetPluginCapabilities", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *identityClient) Probe(ctx context.Context, in *ProbeRequest, opts ...grpc.CallOption) (*ProbeResponse, error) { + out := new(ProbeResponse) + err := grpc.Invoke(ctx, "/csi.v0.Identity/Probe", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Identity service + +type IdentityServer interface { + GetPluginInfo(context.Context, *GetPluginInfoRequest) (*GetPluginInfoResponse, error) + GetPluginCapabilities(context.Context, *GetPluginCapabilitiesRequest) (*GetPluginCapabilitiesResponse, error) + Probe(context.Context, *ProbeRequest) (*ProbeResponse, error) +} + +func RegisterIdentityServer(s *grpc.Server, srv IdentityServer) { + s.RegisterService(&_Identity_serviceDesc, srv) +} + +func _Identity_GetPluginInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPluginInfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IdentityServer).GetPluginInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Identity/GetPluginInfo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IdentityServer).GetPluginInfo(ctx, req.(*GetPluginInfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Identity_GetPluginCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPluginCapabilitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IdentityServer).GetPluginCapabilities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Identity/GetPluginCapabilities", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IdentityServer).GetPluginCapabilities(ctx, req.(*GetPluginCapabilitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Identity_Probe_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ProbeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IdentityServer).Probe(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Identity/Probe", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IdentityServer).Probe(ctx, req.(*ProbeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Identity_serviceDesc = grpc.ServiceDesc{ + ServiceName: "csi.v0.Identity", + HandlerType: (*IdentityServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetPluginInfo", + Handler: _Identity_GetPluginInfo_Handler, + }, + { + MethodName: "GetPluginCapabilities", + Handler: _Identity_GetPluginCapabilities_Handler, + }, + { + MethodName: "Probe", + Handler: _Identity_Probe_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "csi.proto", +} + +// Client API for Controller service + +type ControllerClient interface { + CreateVolume(ctx context.Context, in *CreateVolumeRequest, opts ...grpc.CallOption) (*CreateVolumeResponse, error) + DeleteVolume(ctx context.Context, in *DeleteVolumeRequest, opts ...grpc.CallOption) (*DeleteVolumeResponse, error) + ControllerPublishVolume(ctx context.Context, in *ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*ControllerPublishVolumeResponse, error) + ControllerUnpublishVolume(ctx context.Context, in *ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*ControllerUnpublishVolumeResponse, error) + ValidateVolumeCapabilities(ctx context.Context, in *ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*ValidateVolumeCapabilitiesResponse, error) + ListVolumes(ctx context.Context, in *ListVolumesRequest, opts ...grpc.CallOption) (*ListVolumesResponse, error) + GetCapacity(ctx context.Context, in *GetCapacityRequest, opts ...grpc.CallOption) (*GetCapacityResponse, error) + ControllerGetCapabilities(ctx context.Context, in *ControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*ControllerGetCapabilitiesResponse, error) + CreateSnapshot(ctx context.Context, in *CreateSnapshotRequest, opts ...grpc.CallOption) (*CreateSnapshotResponse, error) + DeleteSnapshot(ctx context.Context, in *DeleteSnapshotRequest, opts ...grpc.CallOption) (*DeleteSnapshotResponse, error) + ListSnapshots(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (*ListSnapshotsResponse, error) +} + +type controllerClient struct { + cc *grpc.ClientConn +} + +func NewControllerClient(cc *grpc.ClientConn) ControllerClient { + return &controllerClient{cc} +} + +func (c *controllerClient) CreateVolume(ctx context.Context, in *CreateVolumeRequest, opts ...grpc.CallOption) (*CreateVolumeResponse, error) { + out := new(CreateVolumeResponse) + err := grpc.Invoke(ctx, "/csi.v0.Controller/CreateVolume", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) DeleteVolume(ctx context.Context, in *DeleteVolumeRequest, opts ...grpc.CallOption) (*DeleteVolumeResponse, error) { + out := new(DeleteVolumeResponse) + err := grpc.Invoke(ctx, "/csi.v0.Controller/DeleteVolume", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ControllerPublishVolume(ctx context.Context, in *ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*ControllerPublishVolumeResponse, error) { + out := new(ControllerPublishVolumeResponse) + err := grpc.Invoke(ctx, "/csi.v0.Controller/ControllerPublishVolume", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ControllerUnpublishVolume(ctx context.Context, in *ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*ControllerUnpublishVolumeResponse, error) { + out := new(ControllerUnpublishVolumeResponse) + err := grpc.Invoke(ctx, "/csi.v0.Controller/ControllerUnpublishVolume", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ValidateVolumeCapabilities(ctx context.Context, in *ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*ValidateVolumeCapabilitiesResponse, error) { + out := new(ValidateVolumeCapabilitiesResponse) + err := grpc.Invoke(ctx, "/csi.v0.Controller/ValidateVolumeCapabilities", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ListVolumes(ctx context.Context, in *ListVolumesRequest, opts ...grpc.CallOption) (*ListVolumesResponse, error) { + out := new(ListVolumesResponse) + err := grpc.Invoke(ctx, "/csi.v0.Controller/ListVolumes", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) GetCapacity(ctx context.Context, in *GetCapacityRequest, opts ...grpc.CallOption) (*GetCapacityResponse, error) { + out := new(GetCapacityResponse) + err := grpc.Invoke(ctx, "/csi.v0.Controller/GetCapacity", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ControllerGetCapabilities(ctx context.Context, in *ControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*ControllerGetCapabilitiesResponse, error) { + out := new(ControllerGetCapabilitiesResponse) + err := grpc.Invoke(ctx, "/csi.v0.Controller/ControllerGetCapabilities", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) CreateSnapshot(ctx context.Context, in *CreateSnapshotRequest, opts ...grpc.CallOption) (*CreateSnapshotResponse, error) { + out := new(CreateSnapshotResponse) + err := grpc.Invoke(ctx, "/csi.v0.Controller/CreateSnapshot", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) DeleteSnapshot(ctx context.Context, in *DeleteSnapshotRequest, opts ...grpc.CallOption) (*DeleteSnapshotResponse, error) { + out := new(DeleteSnapshotResponse) + err := grpc.Invoke(ctx, "/csi.v0.Controller/DeleteSnapshot", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ListSnapshots(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (*ListSnapshotsResponse, error) { + out := new(ListSnapshotsResponse) + err := grpc.Invoke(ctx, "/csi.v0.Controller/ListSnapshots", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Controller service + +type ControllerServer interface { + CreateVolume(context.Context, *CreateVolumeRequest) (*CreateVolumeResponse, error) + DeleteVolume(context.Context, *DeleteVolumeRequest) (*DeleteVolumeResponse, error) + ControllerPublishVolume(context.Context, *ControllerPublishVolumeRequest) (*ControllerPublishVolumeResponse, error) + ControllerUnpublishVolume(context.Context, *ControllerUnpublishVolumeRequest) (*ControllerUnpublishVolumeResponse, error) + ValidateVolumeCapabilities(context.Context, *ValidateVolumeCapabilitiesRequest) (*ValidateVolumeCapabilitiesResponse, error) + ListVolumes(context.Context, *ListVolumesRequest) (*ListVolumesResponse, error) + GetCapacity(context.Context, *GetCapacityRequest) (*GetCapacityResponse, error) + ControllerGetCapabilities(context.Context, *ControllerGetCapabilitiesRequest) (*ControllerGetCapabilitiesResponse, error) + CreateSnapshot(context.Context, *CreateSnapshotRequest) (*CreateSnapshotResponse, error) + DeleteSnapshot(context.Context, *DeleteSnapshotRequest) (*DeleteSnapshotResponse, error) + ListSnapshots(context.Context, *ListSnapshotsRequest) (*ListSnapshotsResponse, error) +} + +func RegisterControllerServer(s *grpc.Server, srv ControllerServer) { + s.RegisterService(&_Controller_serviceDesc, srv) +} + +func _Controller_CreateVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).CreateVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Controller/CreateVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).CreateVolume(ctx, req.(*CreateVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_DeleteVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).DeleteVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Controller/DeleteVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).DeleteVolume(ctx, req.(*DeleteVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ControllerPublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerPublishVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ControllerPublishVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Controller/ControllerPublishVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ControllerPublishVolume(ctx, req.(*ControllerPublishVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ControllerUnpublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerUnpublishVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ControllerUnpublishVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Controller/ControllerUnpublishVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ControllerUnpublishVolume(ctx, req.(*ControllerUnpublishVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ValidateVolumeCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateVolumeCapabilitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ValidateVolumeCapabilities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Controller/ValidateVolumeCapabilities", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ValidateVolumeCapabilities(ctx, req.(*ValidateVolumeCapabilitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ListVolumes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListVolumesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ListVolumes(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Controller/ListVolumes", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ListVolumes(ctx, req.(*ListVolumesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_GetCapacity_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetCapacityRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).GetCapacity(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Controller/GetCapacity", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).GetCapacity(ctx, req.(*GetCapacityRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ControllerGetCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerGetCapabilitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ControllerGetCapabilities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Controller/ControllerGetCapabilities", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ControllerGetCapabilities(ctx, req.(*ControllerGetCapabilitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_CreateSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).CreateSnapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Controller/CreateSnapshot", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).CreateSnapshot(ctx, req.(*CreateSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_DeleteSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).DeleteSnapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Controller/DeleteSnapshot", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).DeleteSnapshot(ctx, req.(*DeleteSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ListSnapshots_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListSnapshotsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ListSnapshots(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Controller/ListSnapshots", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ListSnapshots(ctx, req.(*ListSnapshotsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Controller_serviceDesc = grpc.ServiceDesc{ + ServiceName: "csi.v0.Controller", + HandlerType: (*ControllerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateVolume", + Handler: _Controller_CreateVolume_Handler, + }, + { + MethodName: "DeleteVolume", + Handler: _Controller_DeleteVolume_Handler, + }, + { + MethodName: "ControllerPublishVolume", + Handler: _Controller_ControllerPublishVolume_Handler, + }, + { + MethodName: "ControllerUnpublishVolume", + Handler: _Controller_ControllerUnpublishVolume_Handler, + }, + { + MethodName: "ValidateVolumeCapabilities", + Handler: _Controller_ValidateVolumeCapabilities_Handler, + }, + { + MethodName: "ListVolumes", + Handler: _Controller_ListVolumes_Handler, + }, + { + MethodName: "GetCapacity", + Handler: _Controller_GetCapacity_Handler, + }, + { + MethodName: "ControllerGetCapabilities", + Handler: _Controller_ControllerGetCapabilities_Handler, + }, + { + MethodName: "CreateSnapshot", + Handler: _Controller_CreateSnapshot_Handler, + }, + { + MethodName: "DeleteSnapshot", + Handler: _Controller_DeleteSnapshot_Handler, + }, + { + MethodName: "ListSnapshots", + Handler: _Controller_ListSnapshots_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "csi.proto", +} + +// Client API for Node service + +type NodeClient interface { + NodeStageVolume(ctx context.Context, in *NodeStageVolumeRequest, opts ...grpc.CallOption) (*NodeStageVolumeResponse, error) + NodeUnstageVolume(ctx context.Context, in *NodeUnstageVolumeRequest, opts ...grpc.CallOption) (*NodeUnstageVolumeResponse, error) + NodePublishVolume(ctx context.Context, in *NodePublishVolumeRequest, opts ...grpc.CallOption) (*NodePublishVolumeResponse, error) + NodeUnpublishVolume(ctx context.Context, in *NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*NodeUnpublishVolumeResponse, error) + // NodeGetId is being deprecated in favor of NodeGetInfo and will be + // removed in CSI 1.0. Existing drivers, however, may depend on this + // RPC call and hence this RPC call MUST be implemented by the CSI + // plugin prior to v1.0. + NodeGetId(ctx context.Context, in *NodeGetIdRequest, opts ...grpc.CallOption) (*NodeGetIdResponse, error) + NodeGetCapabilities(ctx context.Context, in *NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*NodeGetCapabilitiesResponse, error) + // Prior to CSI 1.0 - CSI plugins MUST implement both NodeGetId and + // NodeGetInfo RPC calls. + NodeGetInfo(ctx context.Context, in *NodeGetInfoRequest, opts ...grpc.CallOption) (*NodeGetInfoResponse, error) +} + +type nodeClient struct { + cc *grpc.ClientConn +} + +func NewNodeClient(cc *grpc.ClientConn) NodeClient { + return &nodeClient{cc} +} + +func (c *nodeClient) NodeStageVolume(ctx context.Context, in *NodeStageVolumeRequest, opts ...grpc.CallOption) (*NodeStageVolumeResponse, error) { + out := new(NodeStageVolumeResponse) + err := grpc.Invoke(ctx, "/csi.v0.Node/NodeStageVolume", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodeUnstageVolume(ctx context.Context, in *NodeUnstageVolumeRequest, opts ...grpc.CallOption) (*NodeUnstageVolumeResponse, error) { + out := new(NodeUnstageVolumeResponse) + err := grpc.Invoke(ctx, "/csi.v0.Node/NodeUnstageVolume", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodePublishVolume(ctx context.Context, in *NodePublishVolumeRequest, opts ...grpc.CallOption) (*NodePublishVolumeResponse, error) { + out := new(NodePublishVolumeResponse) + err := grpc.Invoke(ctx, "/csi.v0.Node/NodePublishVolume", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodeUnpublishVolume(ctx context.Context, in *NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*NodeUnpublishVolumeResponse, error) { + out := new(NodeUnpublishVolumeResponse) + err := grpc.Invoke(ctx, "/csi.v0.Node/NodeUnpublishVolume", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Deprecated: Do not use. +func (c *nodeClient) NodeGetId(ctx context.Context, in *NodeGetIdRequest, opts ...grpc.CallOption) (*NodeGetIdResponse, error) { + out := new(NodeGetIdResponse) + err := grpc.Invoke(ctx, "/csi.v0.Node/NodeGetId", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodeGetCapabilities(ctx context.Context, in *NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*NodeGetCapabilitiesResponse, error) { + out := new(NodeGetCapabilitiesResponse) + err := grpc.Invoke(ctx, "/csi.v0.Node/NodeGetCapabilities", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodeGetInfo(ctx context.Context, in *NodeGetInfoRequest, opts ...grpc.CallOption) (*NodeGetInfoResponse, error) { + out := new(NodeGetInfoResponse) + err := grpc.Invoke(ctx, "/csi.v0.Node/NodeGetInfo", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Node service + +type NodeServer interface { + NodeStageVolume(context.Context, *NodeStageVolumeRequest) (*NodeStageVolumeResponse, error) + NodeUnstageVolume(context.Context, *NodeUnstageVolumeRequest) (*NodeUnstageVolumeResponse, error) + NodePublishVolume(context.Context, *NodePublishVolumeRequest) (*NodePublishVolumeResponse, error) + NodeUnpublishVolume(context.Context, *NodeUnpublishVolumeRequest) (*NodeUnpublishVolumeResponse, error) + // NodeGetId is being deprecated in favor of NodeGetInfo and will be + // removed in CSI 1.0. Existing drivers, however, may depend on this + // RPC call and hence this RPC call MUST be implemented by the CSI + // plugin prior to v1.0. + NodeGetId(context.Context, *NodeGetIdRequest) (*NodeGetIdResponse, error) + NodeGetCapabilities(context.Context, *NodeGetCapabilitiesRequest) (*NodeGetCapabilitiesResponse, error) + // Prior to CSI 1.0 - CSI plugins MUST implement both NodeGetId and + // NodeGetInfo RPC calls. + NodeGetInfo(context.Context, *NodeGetInfoRequest) (*NodeGetInfoResponse, error) +} + +func RegisterNodeServer(s *grpc.Server, srv NodeServer) { + s.RegisterService(&_Node_serviceDesc, srv) +} + +func _Node_NodeStageVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeStageVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeStageVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Node/NodeStageVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeStageVolume(ctx, req.(*NodeStageVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeUnstageVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeUnstageVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeUnstageVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Node/NodeUnstageVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeUnstageVolume(ctx, req.(*NodeUnstageVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodePublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodePublishVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodePublishVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Node/NodePublishVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodePublishVolume(ctx, req.(*NodePublishVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeUnpublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeUnpublishVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeUnpublishVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Node/NodeUnpublishVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeUnpublishVolume(ctx, req.(*NodeUnpublishVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeGetId_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeGetIdRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeGetId(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Node/NodeGetId", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeGetId(ctx, req.(*NodeGetIdRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeGetCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeGetCapabilitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeGetCapabilities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Node/NodeGetCapabilities", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeGetCapabilities(ctx, req.(*NodeGetCapabilitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeGetInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeGetInfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeGetInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Node/NodeGetInfo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeGetInfo(ctx, req.(*NodeGetInfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Node_serviceDesc = grpc.ServiceDesc{ + ServiceName: "csi.v0.Node", + HandlerType: (*NodeServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "NodeStageVolume", + Handler: _Node_NodeStageVolume_Handler, + }, + { + MethodName: "NodeUnstageVolume", + Handler: _Node_NodeUnstageVolume_Handler, + }, + { + MethodName: "NodePublishVolume", + Handler: _Node_NodePublishVolume_Handler, + }, + { + MethodName: "NodeUnpublishVolume", + Handler: _Node_NodeUnpublishVolume_Handler, + }, + { + MethodName: "NodeGetId", + Handler: _Node_NodeGetId_Handler, + }, + { + MethodName: "NodeGetCapabilities", + Handler: _Node_NodeGetCapabilities_Handler, + }, + { + MethodName: "NodeGetInfo", + Handler: _Node_NodeGetInfo_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "csi.proto", +} + +func init() { proto.RegisterFile("csi.proto", fileDescriptor_csi_31237507707d37ec) } + +var fileDescriptor_csi_31237507707d37ec = []byte{ + // 2932 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x1a, 0x4d, 0x73, 0x23, 0x47, + 0xd5, 0xa3, 0x0f, 0xdb, 0x7a, 0x5e, 0x3b, 0xda, 0xf6, 0x97, 0x3c, 0xb6, 0x77, 0xbd, 0xb3, 0xd9, + 0x64, 0x13, 0x12, 0x6d, 0x30, 0x24, 0x15, 0x92, 0x4d, 0x40, 0x96, 0x15, 0x5b, 0x59, 0x5b, 0x36, + 0x23, 0xd9, 0xa9, 0x5d, 0x42, 0x4d, 0xc6, 0x52, 0x5b, 0x3b, 0xac, 0x3c, 0xa3, 0xcc, 0x8c, 0xcc, + 0x9a, 0x1b, 0x70, 0x01, 0x4e, 0xf0, 0x0b, 0x52, 0x95, 0x1b, 0x14, 0xb9, 0x50, 0xdc, 0xa8, 0xe2, + 0x46, 0x15, 0x27, 0xce, 0x9c, 0xb8, 0xa7, 0xe0, 0xc8, 0x89, 0x2a, 0xaa, 0xa8, 0x9e, 0xee, 0x19, + 0x4d, 0xb7, 0x7a, 0xf4, 0x91, 0xdd, 0x4a, 0x71, 0x92, 0xe6, 0x7d, 0xf5, 0xeb, 0xd7, 0xef, 0xbd, + 0x7e, 0xef, 0xcd, 0x40, 0xae, 0xe9, 0x59, 0xc5, 0xae, 0xeb, 0xf8, 0x0e, 0x9a, 0x26, 0x7f, 0x2f, + 0xdf, 0x50, 0x6f, 0xb4, 0x1d, 0xa7, 0xdd, 0xc1, 0xf7, 0x02, 0xe8, 0x59, 0xef, 0xfc, 0xde, 0x8f, + 0x5d, 0xb3, 0xdb, 0xc5, 0xae, 0x47, 0xe9, 0xb4, 0x15, 0x58, 0xda, 0xc3, 0xfe, 0x71, 0xa7, 0xd7, + 0xb6, 0xec, 0xaa, 0x7d, 0xee, 0xe8, 0xf8, 0xd3, 0x1e, 0xf6, 0x7c, 0xed, 0xef, 0x0a, 0x2c, 0x0b, + 0x08, 0xaf, 0xeb, 0xd8, 0x1e, 0x46, 0x08, 0x32, 0xb6, 0x79, 0x81, 0x0b, 0xca, 0x96, 0x72, 0x37, + 0xa7, 0x07, 0xff, 0xd1, 0x1d, 0x58, 0xb8, 0xc4, 0x76, 0xcb, 0x71, 0x8d, 0x4b, 0xec, 0x7a, 0x96, + 0x63, 0x17, 0x52, 0x01, 0x76, 0x9e, 0x42, 0x4f, 0x29, 0x10, 0xed, 0xc1, 0xec, 0x85, 0x69, 0x5b, + 0xe7, 0xd8, 0xf3, 0x0b, 0xe9, 0xad, 0xf4, 0xdd, 0xb9, 0xed, 0x6f, 0x14, 0xa9, 0x9e, 0x45, 0xe9, + 0x5a, 0xc5, 0x43, 0x46, 0x5d, 0xb1, 0x7d, 0xf7, 0x4a, 0x8f, 0x98, 0xd5, 0x77, 0x61, 0x9e, 0x43, + 0xa1, 0x3c, 0xa4, 0x9f, 0xe0, 0x2b, 0xa6, 0x13, 0xf9, 0x8b, 0x96, 0x20, 0x7b, 0x69, 0x76, 0x7a, + 0x98, 0x69, 0x42, 0x1f, 0xde, 0x49, 0xbd, 0xad, 0x68, 0x37, 0x60, 0x23, 0x5a, 0xad, 0x6c, 0x76, + 0xcd, 0x33, 0xab, 0x63, 0xf9, 0x16, 0xf6, 0xc2, 0xad, 0xff, 0x10, 0x36, 0x13, 0xf0, 0xcc, 0x02, + 0xf7, 0xe1, 0x5a, 0x33, 0x06, 0x2f, 0xa4, 0x82, 0xad, 0x14, 0xc2, 0xad, 0x08, 0x9c, 0x57, 0x3a, + 0x47, 0xad, 0xfd, 0x53, 0x81, 0xbc, 0x48, 0x82, 0xee, 0xc3, 0x8c, 0x87, 0xdd, 0x4b, 0xab, 0x49, + 0xed, 0x3a, 0xb7, 0xbd, 0x95, 0x24, 0xad, 0x58, 0xa7, 0x74, 0xfb, 0x53, 0x7a, 0xc8, 0xa2, 0xfe, + 0x5a, 0x81, 0x19, 0x06, 0x46, 0xdf, 0x81, 0x8c, 0x7f, 0xd5, 0xa5, 0x62, 0x16, 0xb6, 0xef, 0x8c, + 0x12, 0x53, 0x6c, 0x5c, 0x75, 0xb1, 0x1e, 0xb0, 0x68, 0x1f, 0x42, 0x86, 0x3c, 0xa1, 0x39, 0x98, + 0x39, 0xa9, 0x3d, 0xa8, 0x1d, 0x7d, 0x54, 0xcb, 0x4f, 0xa1, 0x15, 0x40, 0xe5, 0xa3, 0x5a, 0x43, + 0x3f, 0x3a, 0x38, 0xa8, 0xe8, 0x46, 0xbd, 0xa2, 0x9f, 0x56, 0xcb, 0x95, 0xbc, 0x82, 0x36, 0x61, + 0xad, 0x54, 0x2e, 0x57, 0xea, 0xf5, 0xea, 0x4e, 0xf5, 0xa0, 0xda, 0x78, 0x68, 0x94, 0x8f, 0x6a, + 0xf5, 0x86, 0x5e, 0xaa, 0xd6, 0x1a, 0xf5, 0x7c, 0x6a, 0x67, 0x9a, 0xaa, 0xa1, 0x2d, 0xc0, 0xb5, + 0x63, 0xd7, 0x39, 0xc3, 0xa1, 0x71, 0x4b, 0x30, 0xcf, 0x9e, 0x99, 0x31, 0xdf, 0x80, 0xac, 0x8b, + 0xcd, 0xd6, 0x15, 0xdb, 0xb7, 0x5a, 0xa4, 0x0e, 0x5b, 0x0c, 0x1d, 0xb6, 0xb8, 0xe3, 0x38, 0x9d, + 0x53, 0x72, 0x78, 0x3a, 0x25, 0xd4, 0xbe, 0xc8, 0xc2, 0x62, 0xd9, 0xc5, 0xa6, 0x8f, 0x4f, 0x9d, + 0x4e, 0xef, 0x22, 0x14, 0x2d, 0x75, 0xcc, 0xfb, 0xb0, 0x40, 0x8c, 0xdf, 0xb4, 0xfc, 0x2b, 0xc3, + 0x35, 0xed, 0x36, 0x75, 0x87, 0xb9, 0xed, 0xe5, 0xd0, 0x2e, 0x65, 0x86, 0xd5, 0x09, 0x52, 0x9f, + 0x6f, 0xc6, 0x1f, 0x51, 0x15, 0x16, 0x2f, 0x83, 0x25, 0x0c, 0xee, 0xbc, 0xd3, 0xfc, 0x79, 0x53, + 0x2d, 0x62, 0xe7, 0x8d, 0x2e, 0x79, 0x88, 0x85, 0x3d, 0xf4, 0x00, 0xa0, 0x6b, 0xba, 0xe6, 0x05, + 0xf6, 0xb1, 0xeb, 0x15, 0x32, 0xbc, 0xf3, 0x4b, 0x76, 0x53, 0x3c, 0x8e, 0xa8, 0xa9, 0xf3, 0xc7, + 0xd8, 0x91, 0x0f, 0x6b, 0x4d, 0xc7, 0xf6, 0x5d, 0xa7, 0xd3, 0xc1, 0xae, 0xd1, 0x0c, 0xb8, 0x0d, + 0x0f, 0x37, 0x5d, 0xec, 0x7b, 0x85, 0x6c, 0x20, 0xfb, 0xed, 0x61, 0xb2, 0xcb, 0x11, 0x33, 0xc5, + 0xd6, 0x29, 0x2b, 0x5d, 0x68, 0xb5, 0x29, 0xc7, 0xa2, 0x23, 0x58, 0x0e, 0xad, 0xe1, 0xd8, 0x3e, + 0xb6, 0x7d, 0xc3, 0x73, 0x7a, 0x6e, 0x13, 0x17, 0xa6, 0x03, 0x93, 0xae, 0x0b, 0xf6, 0xa0, 0x34, + 0xf5, 0x80, 0x44, 0x67, 0x76, 0xe4, 0x80, 0xe8, 0x11, 0xa8, 0x66, 0xb3, 0x89, 0x3d, 0xcf, 0xa2, + 0x86, 0x33, 0x5c, 0xfc, 0x69, 0xcf, 0x72, 0xf1, 0x05, 0xb6, 0x7d, 0xaf, 0x30, 0xc3, 0x4b, 0x6d, + 0x38, 0x5d, 0xa7, 0xe3, 0xb4, 0xaf, 0xf4, 0x3e, 0x8d, 0xbe, 0xc6, 0xb1, 0xc7, 0x30, 0x9e, 0xfa, + 0x1e, 0xbc, 0x20, 0x58, 0x70, 0x92, 0x1c, 0xa1, 0x7e, 0x08, 0x1b, 0xc3, 0x8c, 0x34, 0x51, 0xbe, + 0xf9, 0xa5, 0x02, 0x8b, 0x12, 0x9b, 0xa0, 0x7d, 0x98, 0xf5, 0x6c, 0xb3, 0xeb, 0x3d, 0x76, 0x7c, + 0xe6, 0xfc, 0xaf, 0x0e, 0x31, 0x61, 0xb1, 0xce, 0x68, 0xe9, 0xe3, 0xfe, 0x94, 0x1e, 0x71, 0xab, + 0x5b, 0xb0, 0xc0, 0x63, 0xd1, 0x02, 0xa4, 0xac, 0x16, 0x53, 0x2f, 0x65, 0xb5, 0xa2, 0x70, 0x7c, + 0x1f, 0x96, 0x78, 0x87, 0x60, 0x51, 0xf8, 0x12, 0x4c, 0xd3, 0x13, 0x62, 0x9a, 0x2c, 0xf0, 0x9a, + 0xe8, 0x0c, 0xab, 0xfd, 0x2e, 0x03, 0x79, 0xd1, 0xdf, 0xd1, 0x7d, 0xc8, 0x9e, 0x75, 0x9c, 0xe6, + 0x13, 0xc6, 0xfb, 0x62, 0x52, 0x60, 0x14, 0x77, 0x08, 0x15, 0x85, 0xee, 0x4f, 0xe9, 0x94, 0x89, + 0x70, 0x5f, 0x38, 0x3d, 0xdb, 0x67, 0x91, 0x99, 0xcc, 0x7d, 0x48, 0xa8, 0xfa, 0xdc, 0x01, 0x13, + 0xda, 0x85, 0x39, 0xea, 0x04, 0xc6, 0x85, 0xd3, 0xc2, 0x85, 0x74, 0x20, 0xe3, 0x76, 0xa2, 0x8c, + 0x52, 0x40, 0x7b, 0xe8, 0xb4, 0xb0, 0x0e, 0x66, 0xf4, 0x5f, 0x9d, 0x87, 0xb9, 0x98, 0x6e, 0xea, + 0x1e, 0xcc, 0xc5, 0x16, 0x43, 0xab, 0x30, 0x73, 0xee, 0x19, 0x51, 0x56, 0xcd, 0xe9, 0xd3, 0xe7, + 0x5e, 0x90, 0x28, 0x6f, 0xc2, 0x5c, 0xa0, 0x85, 0x71, 0xde, 0x31, 0xdb, 0xf4, 0x1e, 0xc8, 0xe9, + 0x10, 0x80, 0x3e, 0x20, 0x10, 0xf5, 0x5f, 0x0a, 0x40, 0x7f, 0x49, 0x74, 0x1f, 0x32, 0x81, 0x96, + 0x34, 0x37, 0xdf, 0x1d, 0x43, 0xcb, 0x62, 0xa0, 0x6a, 0xc0, 0xa5, 0x7d, 0xa6, 0x40, 0x26, 0x10, + 0x23, 0xe6, 0xe7, 0x7a, 0xb5, 0xb6, 0x77, 0x50, 0x31, 0x6a, 0x47, 0xbb, 0x15, 0xe3, 0x23, 0xbd, + 0xda, 0xa8, 0xe8, 0x79, 0x05, 0xad, 0xc3, 0x6a, 0x1c, 0xae, 0x57, 0x4a, 0xbb, 0x15, 0xdd, 0x38, + 0xaa, 0x1d, 0x3c, 0xcc, 0xa7, 0x90, 0x0a, 0x2b, 0x87, 0x27, 0x07, 0x8d, 0xea, 0x20, 0x2e, 0x8d, + 0x36, 0xa0, 0x10, 0xc3, 0x31, 0x19, 0x4c, 0x6c, 0x86, 0x88, 0x8d, 0x61, 0xe9, 0x5f, 0x86, 0xcc, + 0xee, 0xcc, 0x47, 0x87, 0x11, 0x38, 0xdb, 0x47, 0x30, 0xcf, 0xa5, 0x57, 0x52, 0x26, 0xb0, 0x10, + 0x6f, 0x19, 0x67, 0x57, 0x3e, 0xf6, 0x02, 0x4b, 0xa4, 0xf5, 0xf9, 0x10, 0xba, 0x43, 0x80, 0xc4, + 0xac, 0x1d, 0xeb, 0xc2, 0xf2, 0x19, 0x4d, 0x2a, 0xa0, 0x81, 0x00, 0x14, 0x10, 0x68, 0x7f, 0x49, + 0xc1, 0x34, 0x3b, 0x9b, 0x3b, 0xb1, 0x04, 0xcf, 0x89, 0x0c, 0xa1, 0x54, 0x24, 0x8d, 0x87, 0x54, + 0x18, 0x0f, 0xe8, 0x7d, 0x00, 0xd3, 0xf7, 0x5d, 0xeb, 0xac, 0xe7, 0x47, 0x09, 0xfd, 0x06, 0x7f, + 0x1e, 0xc5, 0x52, 0x44, 0xc0, 0x32, 0x70, 0x9f, 0x03, 0xed, 0xc0, 0x82, 0x90, 0x04, 0x33, 0xa3, + 0x93, 0xe0, 0x7c, 0x93, 0x8b, 0xff, 0x12, 0x2c, 0x86, 0xf9, 0xab, 0x83, 0x0d, 0x9f, 0xe5, 0x37, + 0x96, 0xbf, 0xf3, 0x03, 0x79, 0x0f, 0xf5, 0x89, 0x43, 0x18, 0xc9, 0x72, 0x82, 0x96, 0x13, 0x65, + 0xa6, 0x1e, 0x2c, 0x4a, 0xd2, 0x2a, 0x2a, 0x42, 0x2e, 0x38, 0x10, 0xcf, 0xf2, 0x89, 0xaf, 0xca, + 0xd5, 0xe9, 0x93, 0x10, 0xfa, 0xae, 0x8b, 0xcf, 0xb1, 0xeb, 0xe2, 0x16, 0x2b, 0x86, 0x24, 0xf4, + 0x11, 0x89, 0xf6, 0x73, 0x05, 0x66, 0x43, 0x38, 0x7a, 0x07, 0x66, 0x3d, 0xdc, 0xa6, 0x29, 0x5f, + 0xe1, 0xcf, 0x21, 0xa4, 0x29, 0xd6, 0x19, 0x01, 0x2b, 0x03, 0x43, 0x7a, 0x52, 0x06, 0x72, 0xa8, + 0x89, 0x36, 0xff, 0x6f, 0x05, 0x16, 0x77, 0x71, 0x07, 0x8b, 0x65, 0xc4, 0x3a, 0xe4, 0xd8, 0x35, + 0x17, 0x65, 0xd0, 0x59, 0x0a, 0xa8, 0xb6, 0x84, 0x9b, 0xb7, 0x15, 0xb0, 0x47, 0x37, 0x6f, 0x8a, + 0xbf, 0x79, 0x25, 0xc2, 0x63, 0x37, 0x2f, 0xc5, 0x26, 0xdd, 0xbc, 0x1c, 0x96, 0xbf, 0x8d, 0x06, + 0x19, 0x27, 0xda, 0xf6, 0x0a, 0x2c, 0xf1, 0x8a, 0xd1, 0x1b, 0x40, 0xfb, 0x53, 0x06, 0x6e, 0xf4, + 0x17, 0x39, 0xee, 0x9d, 0x75, 0x2c, 0xef, 0xf1, 0x04, 0x96, 0x59, 0x85, 0x19, 0xdb, 0x69, 0x05, + 0x28, 0xba, 0xe6, 0x34, 0x79, 0xac, 0xb6, 0x50, 0x05, 0xae, 0x8b, 0x45, 0xd4, 0x15, 0xcb, 0xd3, + 0xc9, 0x25, 0x54, 0xfe, 0x52, 0xbc, 0x64, 0x54, 0x98, 0x25, 0xe5, 0x9f, 0x63, 0x77, 0xae, 0x82, + 0x58, 0x9b, 0xd5, 0xa3, 0x67, 0xf4, 0x33, 0x05, 0xd4, 0xd8, 0xb1, 0x74, 0xa9, 0xf2, 0x42, 0x45, + 0xb4, 0x1b, 0x55, 0x44, 0x43, 0x77, 0x39, 0x88, 0xe6, 0xce, 0xa8, 0xd0, 0x4c, 0x40, 0x23, 0x2b, + 0xda, 0x67, 0x2c, 0xb3, 0x4c, 0x07, 0x4b, 0xdf, 0x1f, 0x73, 0x69, 0xfa, 0x24, 0xe6, 0x1d, 0x66, + 0x8b, 0x3e, 0x58, 0x7d, 0x00, 0x9b, 0x43, 0xb5, 0x9c, 0xa8, 0xd4, 0x29, 0xc3, 0xb2, 0x74, 0xdd, + 0x89, 0xbc, 0xea, 0xcf, 0x0a, 0xdc, 0x4c, 0xdc, 0x1c, 0xab, 0x31, 0x7e, 0x00, 0xd7, 0xc2, 0x93, + 0xb1, 0xec, 0x73, 0x87, 0x45, 0xfb, 0xdb, 0x23, 0x6d, 0xc3, 0x7a, 0x41, 0x06, 0x25, 0xfd, 0x21, + 0xb5, 0xcb, 0x5c, 0xb7, 0x0f, 0x51, 0xdf, 0x87, 0xbc, 0x48, 0x30, 0xd1, 0x06, 0xfe, 0x98, 0x82, + 0xad, 0xbe, 0x06, 0x27, 0x76, 0xf7, 0xf9, 0x05, 0xc0, 0xaf, 0x14, 0xd8, 0x88, 0x79, 0x67, 0xcf, + 0x16, 0xfd, 0x93, 0x5e, 0x3f, 0xfb, 0x83, 0x86, 0x90, 0xab, 0x21, 0x23, 0xe0, 0x7c, 0x34, 0x16, + 0x0b, 0x22, 0x81, 0x7a, 0x18, 0x3f, 0x27, 0x29, 0xfb, 0x44, 0x66, 0xbb, 0x0d, 0xb7, 0x86, 0xa8, + 0xcb, 0x52, 0xcb, 0x4f, 0xd3, 0x70, 0xeb, 0xd4, 0xec, 0x58, 0xad, 0xa8, 0xee, 0x94, 0xb4, 0xdd, + 0xc3, 0x8d, 0x9b, 0xd0, 0x89, 0xa5, 0xbe, 0x42, 0x27, 0xd6, 0x91, 0xc5, 0x29, 0x3d, 0x82, 0xef, + 0x46, 0x82, 0x46, 0x69, 0x3b, 0x6e, 0xa8, 0x26, 0x5d, 0xf2, 0x99, 0x09, 0x2e, 0xf9, 0xe7, 0x12, + 0xa0, 0x1f, 0x83, 0x36, 0x6c, 0x53, 0x2c, 0x44, 0x37, 0x20, 0xe7, 0xf5, 0xba, 0x5d, 0xc7, 0xf5, + 0x31, 0x3d, 0x83, 0x59, 0xbd, 0x0f, 0x40, 0x05, 0x98, 0xb9, 0xc0, 0x9e, 0x67, 0xb6, 0x43, 0xf9, + 0xe1, 0xa3, 0xf6, 0x31, 0xa0, 0x03, 0xcb, 0x63, 0xf5, 0x72, 0x74, 0xa2, 0xa4, 0x3c, 0x36, 0x9f, + 0x1a, 0xd8, 0xf6, 0x5d, 0x8b, 0x15, 0x66, 0x59, 0x1d, 0x2e, 0xcc, 0xa7, 0x15, 0x0a, 0x21, 0xc5, + 0x9b, 0xe7, 0x9b, 0xae, 0x6f, 0xd9, 0x6d, 0xc3, 0x77, 0x9e, 0xe0, 0x68, 0x6c, 0x14, 0x42, 0x1b, + 0x04, 0xa8, 0x7d, 0xae, 0xc0, 0x22, 0x27, 0x9e, 0x69, 0xfb, 0x2e, 0xcc, 0xf4, 0x65, 0x13, 0x7b, + 0xde, 0x0a, 0xed, 0x29, 0xa1, 0x2e, 0xd2, 0x13, 0x0a, 0x39, 0xd0, 0x26, 0x80, 0x8d, 0x9f, 0xfa, + 0xdc, 0xba, 0x39, 0x02, 0x09, 0xd6, 0x54, 0xef, 0x41, 0x96, 0x1a, 0x79, 0xdc, 0xce, 0xe8, 0x8b, + 0x14, 0xa0, 0x3d, 0xec, 0x47, 0x05, 0x2f, 0xb3, 0x41, 0x82, 0xe3, 0x2a, 0x5f, 0xc1, 0x71, 0x3f, + 0xe4, 0x46, 0x08, 0xd4, 0xf5, 0x5f, 0x8d, 0xcd, 0xcf, 0x84, 0xa5, 0x87, 0x4e, 0x10, 0x12, 0xdc, + 0x92, 0x5e, 0xcb, 0x63, 0xd7, 0x9e, 0xcf, 0xd0, 0x61, 0x6b, 0xbb, 0xb0, 0xc8, 0xe9, 0xcc, 0xce, + 0xf4, 0x75, 0x40, 0xe6, 0xa5, 0x69, 0x75, 0x4c, 0xa2, 0x57, 0x58, 0xc3, 0xb3, 0x9a, 0xfe, 0x7a, + 0x84, 0x09, 0xd9, 0x34, 0x2d, 0x9e, 0xb5, 0x99, 0x3c, 0x71, 0x9e, 0xd7, 0x89, 0xe7, 0xa8, 0x01, + 0x1a, 0xb6, 0xee, 0x9e, 0x74, 0xa6, 0x77, 0x7b, 0x30, 0x27, 0xb3, 0xb9, 0x59, 0xe2, 0x78, 0xef, + 0x6f, 0x29, 0x58, 0x1f, 0x42, 0x8d, 0xde, 0x85, 0xb4, 0xdb, 0x6d, 0x32, 0x67, 0x7a, 0x79, 0x0c, + 0xf9, 0x45, 0xfd, 0xb8, 0xbc, 0x3f, 0xa5, 0x13, 0x2e, 0xf5, 0x4b, 0x05, 0xd2, 0xfa, 0x71, 0x19, + 0x7d, 0x8f, 0x1b, 0xf2, 0xbd, 0x36, 0xa6, 0x94, 0xf8, 0xac, 0x8f, 0x34, 0x93, 0x83, 0xc3, 0xbe, + 0x02, 0x2c, 0x95, 0xf5, 0x4a, 0xa9, 0x51, 0x31, 0x76, 0x2b, 0x07, 0x95, 0x46, 0xc5, 0x38, 0x3d, + 0x3a, 0x38, 0x39, 0xac, 0xe4, 0x15, 0xd2, 0x15, 0x1e, 0x9f, 0xec, 0x1c, 0x54, 0xeb, 0xfb, 0xc6, + 0x49, 0x2d, 0xfc, 0xc7, 0xb0, 0x29, 0x94, 0x87, 0x6b, 0x07, 0xd5, 0x7a, 0x83, 0x01, 0xea, 0xf9, + 0x34, 0x81, 0xec, 0x55, 0x1a, 0x46, 0xb9, 0x74, 0x5c, 0x2a, 0x57, 0x1b, 0x0f, 0xf3, 0x19, 0xd2, + 0x73, 0xf2, 0xb2, 0xeb, 0xb5, 0xd2, 0x71, 0x7d, 0xff, 0xa8, 0x91, 0xcf, 0x22, 0x04, 0x0b, 0x01, + 0x7f, 0x08, 0xaa, 0xe7, 0xa7, 0xa3, 0x91, 0xc5, 0x67, 0x69, 0x58, 0x66, 0x13, 0x18, 0x36, 0xe3, + 0x08, 0x63, 0xeb, 0x2e, 0xe4, 0x69, 0xf3, 0x65, 0x88, 0x17, 0xc7, 0x02, 0x85, 0x9f, 0x86, 0xd7, + 0x47, 0x38, 0x1a, 0x4c, 0xc5, 0x46, 0x83, 0x5d, 0x58, 0x0d, 0x27, 0x67, 0x4c, 0xae, 0x70, 0x21, + 0x0b, 0x23, 0x34, 0x61, 0x75, 0x01, 0xca, 0x5d, 0xc0, 0xcb, 0x4d, 0x19, 0x0e, 0x1d, 0x4a, 0x66, + 0x80, 0xaf, 0x0f, 0x5f, 0x64, 0x48, 0x0c, 0xab, 0xfb, 0xa0, 0x26, 0xeb, 0x30, 0x51, 0x09, 0xf8, + 0x8c, 0xa1, 0xfc, 0x01, 0xac, 0x88, 0xda, 0xb3, 0xa8, 0x7a, 0x6d, 0x60, 0xc4, 0x15, 0xe5, 0x96, + 0x88, 0x36, 0xa2, 0xd0, 0xfe, 0xa0, 0xc0, 0x6c, 0x08, 0x26, 0xf9, 0xd9, 0xb3, 0x7e, 0x82, 0xb9, + 0xa6, 0x3e, 0x47, 0x20, 0xf2, 0x86, 0x5e, 0xe6, 0x0b, 0x69, 0xa9, 0x2f, 0x6c, 0x02, 0xd0, 0xe3, + 0x69, 0x19, 0xa6, 0x1f, 0xb4, 0x12, 0x69, 0x3d, 0xc7, 0x20, 0x25, 0xd2, 0xfc, 0x4e, 0x7b, 0xbe, + 0xe9, 0xf7, 0x48, 0xdb, 0x40, 0x14, 0x5e, 0x11, 0x15, 0xae, 0x07, 0x58, 0x9d, 0x51, 0x91, 0x40, + 0x5a, 0xe0, 0x51, 0xe8, 0x1e, 0x17, 0x9d, 0xeb, 0x72, 0x01, 0xb1, 0x60, 0x24, 0x17, 0x6b, 0x0b, + 0xfb, 0xa6, 0xd5, 0xf1, 0xc2, 0x8b, 0x95, 0x3d, 0x6a, 0x3b, 0xb2, 0x28, 0xcd, 0x41, 0x56, 0xaf, + 0x94, 0x76, 0x1f, 0xe6, 0x15, 0x34, 0x0f, 0xb9, 0x93, 0xe3, 0x83, 0xa3, 0xd2, 0x6e, 0xb5, 0xb6, + 0x97, 0x4f, 0xa1, 0x45, 0x78, 0xa1, 0xa2, 0xeb, 0x47, 0xba, 0xd1, 0x07, 0xa6, 0x49, 0xa3, 0xbb, + 0xcc, 0x9a, 0x46, 0x21, 0x80, 0x6e, 0xc2, 0x5c, 0xe4, 0xfb, 0x51, 0xec, 0x40, 0x08, 0xaa, 0xb6, + 0x48, 0x8c, 0x84, 0x3d, 0xae, 0x18, 0x23, 0xd2, 0x66, 0x57, 0x74, 0x5f, 0x1e, 0xca, 0xc7, 0x48, + 0x4b, 0x86, 0x23, 0x4e, 0x9d, 0xcc, 0x34, 0x91, 0x57, 0x16, 0x60, 0x45, 0x54, 0x8a, 0xd5, 0xa3, + 0xbf, 0x55, 0x60, 0x89, 0x54, 0x08, 0x21, 0xe2, 0x79, 0x17, 0x2c, 0x13, 0x38, 0xa3, 0x70, 0x02, + 0x19, 0xf1, 0x04, 0xb4, 0xdf, 0x2b, 0xb0, 0x2c, 0xe8, 0xca, 0x62, 0xeb, 0x3d, 0xb1, 0xfa, 0xb9, + 0x1d, 0xaf, 0x7e, 0x06, 0xe8, 0x27, 0xac, 0x7f, 0xde, 0x0c, 0xeb, 0x9f, 0xc9, 0x42, 0xf8, 0x37, + 0x59, 0x58, 0xa9, 0x39, 0x2d, 0x5c, 0xf7, 0xcd, 0xf6, 0x24, 0x73, 0x15, 0x5d, 0xe8, 0x0d, 0xa9, + 0x77, 0xdd, 0x0b, 0x57, 0x92, 0x8b, 0x1c, 0xde, 0x12, 0xa2, 0x22, 0x2c, 0x7a, 0xbe, 0xd9, 0x0e, + 0xce, 0xca, 0x74, 0xdb, 0xd8, 0x37, 0xba, 0xa6, 0xff, 0x98, 0x1d, 0xc4, 0x75, 0x86, 0x6a, 0x04, + 0x98, 0x63, 0xd3, 0x7f, 0x2c, 0x1f, 0x54, 0x64, 0x26, 0x1e, 0x54, 0x9c, 0x01, 0x0a, 0xfa, 0x40, + 0xb2, 0x80, 0xf8, 0x56, 0xe6, 0xdb, 0x23, 0x36, 0x14, 0x81, 0xb9, 0x50, 0xc9, 0xdb, 0x02, 0x18, + 0x99, 0xc9, 0xb3, 0x86, 0x51, 0x4b, 0x8c, 0x3b, 0x63, 0x78, 0xc6, 0x86, 0x9a, 0x74, 0x2d, 0xd2, + 0xdd, 0x7c, 0xfd, 0xb3, 0x89, 0x35, 0x58, 0x1d, 0xb0, 0x05, 0xcb, 0x04, 0x6d, 0x28, 0x10, 0xd4, + 0x89, 0xed, 0x4d, 0xe8, 0xaf, 0x09, 0xbe, 0x95, 0x4a, 0xf0, 0x2d, 0x6d, 0x1d, 0xd6, 0x24, 0x0b, + 0x31, 0x2d, 0xfe, 0x91, 0xa5, 0x6a, 0x4c, 0x3e, 0x74, 0x6b, 0x48, 0xc3, 0xe6, 0x9b, 0x71, 0x17, + 0x90, 0x0e, 0x9a, 0x9e, 0x6f, 0xe0, 0xdc, 0x84, 0xb9, 0x38, 0x1d, 0x4b, 0x62, 0xfe, 0x88, 0xc8, + 0xca, 0x3e, 0xd3, 0x08, 0x70, 0x5a, 0x18, 0x01, 0xfe, 0x08, 0x96, 0x82, 0xa8, 0x13, 0x67, 0x2b, + 0x33, 0xfc, 0x35, 0x95, 0x68, 0x91, 0x18, 0x82, 0x8b, 0xbd, 0x20, 0x96, 0x85, 0x49, 0x5f, 0x53, + 0x16, 0x7d, 0xb3, 0xc1, 0x42, 0x6f, 0x8d, 0x5c, 0xe8, 0xeb, 0x8a, 0xbf, 0x0a, 0xf5, 0xfa, 0xff, + 0x8b, 0xe9, 0x20, 0xf3, 0x7e, 0xe9, 0x5c, 0x4f, 0x7b, 0x04, 0x2a, 0x0d, 0x8d, 0xc9, 0x47, 0x6e, + 0x82, 0xe3, 0xa5, 0x44, 0xc7, 0xd3, 0x36, 0x61, 0x5d, 0x2a, 0x9b, 0x2d, 0x8d, 0x20, 0x4f, 0xd0, + 0x7b, 0xd8, 0xaf, 0xb6, 0xc2, 0x6e, 0xf1, 0x35, 0xb8, 0x1e, 0x83, 0xb1, 0xbb, 0x36, 0x36, 0xdb, + 0x53, 0xe2, 0xb3, 0x3d, 0x6d, 0x83, 0x2a, 0x9f, 0xd0, 0x79, 0x7e, 0x42, 0x97, 0x4f, 0xea, 0x39, + 0x4b, 0x42, 0xcf, 0x49, 0xaf, 0xf1, 0x4d, 0x2e, 0x81, 0x8f, 0xe8, 0x36, 0xff, 0xaa, 0xb0, 0x34, + 0x3b, 0xd0, 0x67, 0xbe, 0x19, 0xef, 0x33, 0x6f, 0x0d, 0x95, 0x19, 0xef, 0x30, 0xbb, 0xb4, 0xc1, + 0x7c, 0x87, 0x2b, 0x61, 0x5f, 0x1a, 0xc9, 0x1e, 0x6f, 0x2d, 0x5f, 0x4f, 0xe8, 0x2c, 0xeb, 0x8d, + 0xd2, 0x5e, 0xc5, 0x38, 0xa9, 0xd1, 0xdf, 0xb0, 0xb3, 0x8c, 0xfa, 0xbc, 0x25, 0x40, 0xa1, 0xe1, + 0x63, 0xdf, 0x21, 0x7d, 0xae, 0xc0, 0x22, 0x07, 0x1e, 0x71, 0x22, 0xe8, 0x1e, 0x2c, 0x91, 0x1a, + 0x8e, 0xfa, 0x88, 0x67, 0x74, 0xb1, 0x6b, 0x10, 0x0c, 0x7b, 0x8b, 0x78, 0xfd, 0xc2, 0x7c, 0xca, + 0x06, 0x43, 0xc7, 0xd8, 0x25, 0x82, 0x9f, 0xc3, 0x28, 0x64, 0xfb, 0x3f, 0x0a, 0xcc, 0x56, 0x5b, + 0xd8, 0xf6, 0x89, 0xe1, 0x6b, 0x30, 0xcf, 0x7d, 0xcc, 0x84, 0x36, 0x12, 0xbe, 0x71, 0x0a, 0x36, + 0xa8, 0x6e, 0x0e, 0xfd, 0x02, 0x4a, 0x9b, 0x42, 0xe7, 0xb1, 0x0f, 0xb1, 0xb8, 0x79, 0xd0, 0x8b, + 0x03, 0x9c, 0x12, 0x1f, 0x54, 0xef, 0x8c, 0xa0, 0x8a, 0xd6, 0x79, 0x0b, 0xb2, 0xc1, 0x97, 0x39, + 0x68, 0x29, 0xfa, 0x66, 0x28, 0xf6, 0xe1, 0x8e, 0xba, 0x2c, 0x40, 0x43, 0xbe, 0xed, 0xff, 0xce, + 0x00, 0xf4, 0x07, 0x0f, 0xe8, 0x01, 0x5c, 0x8b, 0x7f, 0x61, 0x80, 0xd6, 0x87, 0x7c, 0x88, 0xa2, + 0x6e, 0xc8, 0x91, 0x91, 0x4e, 0x0f, 0xe0, 0x5a, 0xfc, 0x65, 0x55, 0x5f, 0x98, 0xe4, 0xdd, 0x5a, + 0x5f, 0x98, 0xf4, 0xfd, 0xd6, 0x14, 0xea, 0xc0, 0x6a, 0xc2, 0x3b, 0x06, 0xf4, 0xd2, 0x78, 0x2f, + 0x68, 0xd4, 0x97, 0xc7, 0x7c, 0x59, 0xa1, 0x4d, 0x21, 0x17, 0xd6, 0x12, 0x27, 0xe3, 0xe8, 0xee, + 0xb8, 0xb3, 0x7e, 0xf5, 0x95, 0x31, 0x28, 0xa3, 0x35, 0x7b, 0xa0, 0x26, 0x0f, 0x79, 0xd1, 0x2b, + 0x63, 0x4f, 0xb7, 0xd5, 0x57, 0xc7, 0x21, 0x8d, 0x96, 0xdd, 0x87, 0xb9, 0xd8, 0xc0, 0x15, 0xa9, + 0xd2, 0x29, 0x2c, 0x15, 0xbc, 0x3e, 0x64, 0x42, 0x4b, 0x25, 0xc5, 0x86, 0x82, 0x7d, 0x49, 0x83, + 0xd3, 0xcd, 0xbe, 0x24, 0xc9, 0x14, 0x51, 0x34, 0xbf, 0x90, 0x80, 0x65, 0xe6, 0x97, 0x67, 0x70, + 0x99, 0xf9, 0x13, 0xb2, 0xb9, 0x36, 0x85, 0xbe, 0x0f, 0x0b, 0xfc, 0x1c, 0x04, 0x6d, 0x0e, 0x9d, + 0xee, 0xa8, 0x37, 0x92, 0xd0, 0x71, 0x91, 0x7c, 0x13, 0xdb, 0x17, 0x29, 0xed, 0xb8, 0xfb, 0x22, + 0x13, 0x7a, 0xdf, 0x29, 0x92, 0x9f, 0xb8, 0x06, 0xb1, 0x9f, 0x9f, 0x64, 0x3d, 0x71, 0x3f, 0x3f, + 0x49, 0xbb, 0x4a, 0x6d, 0x6a, 0xfb, 0xcb, 0x0c, 0x64, 0x82, 0x44, 0xda, 0x80, 0x17, 0x84, 0x3a, + 0x1b, 0xdd, 0x18, 0xde, 0x8c, 0xa8, 0x37, 0x13, 0xf1, 0x91, 0xba, 0x8f, 0xe8, 0x7d, 0xcc, 0x55, + 0xce, 0x68, 0x2b, 0xce, 0x27, 0xab, 0xde, 0xd5, 0x5b, 0x43, 0x28, 0x44, 0xd9, 0x7c, 0x2e, 0xd8, + 0x1a, 0x55, 0xc2, 0xf1, 0xb2, 0x93, 0xe2, 0xff, 0x13, 0x7a, 0x6f, 0x89, 0x91, 0xaf, 0xf1, 0x7a, + 0x49, 0x63, 0xfe, 0xf6, 0x50, 0x9a, 0x68, 0x85, 0x0a, 0xe4, 0xa2, 0x4a, 0x05, 0x15, 0xe2, 0x3c, + 0xf1, 0x82, 0x46, 0x5d, 0x93, 0x60, 0x98, 0x8c, 0xf4, 0x2f, 0x52, 0x4a, 0xa8, 0xa8, 0x18, 0x23, + 0x9a, 0xc0, 0x26, 0x8b, 0x8e, 0xdb, 0x43, 0x69, 0xe2, 0x51, 0x1d, 0xbb, 0xc2, 0xfb, 0x51, 0x3d, + 0x78, 0xdd, 0xf7, 0xa3, 0x5a, 0x72, 0xe7, 0x6b, 0x53, 0x3b, 0xd9, 0x47, 0xe9, 0xa6, 0x67, 0x9d, + 0x4d, 0x07, 0x1f, 0x87, 0x7e, 0xeb, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x9c, 0x3b, 0x5a, 0x51, + 0xf0, 0x2c, 0x00, 0x00, +} diff --git a/pkg/volume/csi/main_test.go b/pkg/volume/csi/main_test.go deleted file mode 100644 index 5322fcd7edc..00000000000 --- a/pkg/volume/csi/main_test.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package csi - -import ( - "testing" - - utilfeature "k8s.io/apiserver/pkg/util/feature" - utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" - _ "k8s.io/kubernetes/pkg/features" -) - -func TestMain(m *testing.M) { - utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run) -} diff --git a/pkg/volume/csi/nodeinfomanager/BUILD b/pkg/volume/csi/nodeinfomanager/BUILD index e949893feba..6af576c4221 100644 --- a/pkg/volume/csi/nodeinfomanager/BUILD +++ b/pkg/volume/csi/nodeinfomanager/BUILD @@ -21,7 +21,6 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/csi-api/pkg/apis/csi/v1alpha1:go_default_library", "//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library", - "//vendor/github.com/container-storage-interface/spec/lib/go/csi:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], ) @@ -63,7 +62,6 @@ go_test( "//staging/src/k8s.io/client-go/util/testing:go_default_library", "//staging/src/k8s.io/csi-api/pkg/apis/csi/v1alpha1:go_default_library", "//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned/fake:go_default_library", - "//vendor/github.com/container-storage-interface/spec/lib/go/csi:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", ], ) diff --git a/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go b/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go index cd832e169ff..099edaed2a6 100644 --- a/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go +++ b/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go @@ -23,7 +23,8 @@ import ( "fmt" "strings" - csipb "github.com/container-storage-interface/spec/lib/go/csi" + "time" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" @@ -40,7 +41,6 @@ import ( nodeutil "k8s.io/kubernetes/pkg/util/node" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" - "time" ) const ( @@ -75,7 +75,7 @@ type Interface interface { // Record in the cluster the given node information from the CSI driver with the given name. // Concurrent calls to InstallCSIDriver() is allowed, but they should not be intertwined with calls // to other methods in this interface. - InstallCSIDriver(driverName string, driverNodeID string, maxVolumeLimit int64, topology *csipb.Topology) error + InstallCSIDriver(driverName string, driverNodeID string, maxVolumeLimit int64, topology map[string]string) error // Remove in the cluster node information from the CSI driver with the given name. // Concurrent calls to UninstallCSIDriver() is allowed, but they should not be intertwined with calls @@ -97,7 +97,7 @@ func NewNodeInfoManager( // CSINodeInfo object. If the CSINodeInfo object doesn't yet exist, it will be created. // If multiple calls to InstallCSIDriver() are made in parallel, some calls might receive Node or // CSINodeInfo update conflicts, which causes the function to retry the corresponding update. -func (nim *nodeInfoManager) InstallCSIDriver(driverName string, driverNodeID string, maxAttachLimit int64, topology *csipb.Topology) error { +func (nim *nodeInfoManager) InstallCSIDriver(driverName string, driverNodeID string, maxAttachLimit int64, topology map[string]string) error { if driverNodeID == "" { return fmt.Errorf("error adding CSI driver node info: driverNodeID must not be empty") } @@ -133,12 +133,14 @@ func (nim *nodeInfoManager) InstallCSIDriver(driverName string, driverNodeID str // If multiple calls to UninstallCSIDriver() are made in parallel, some calls might receive Node or // CSINodeInfo update conflicts, which causes the function to retry the corresponding update. func (nim *nodeInfoManager) UninstallCSIDriver(driverName string) error { - err := nim.uninstallDriverFromCSINodeInfo(driverName) - if err != nil { - return fmt.Errorf("error uninstalling CSI driver from CSINodeInfo object %v", err) + if utilfeature.DefaultFeatureGate.Enabled(features.CSINodeInfo) { + err := nim.uninstallDriverFromCSINodeInfo(driverName) + if err != nil { + return fmt.Errorf("error uninstalling CSI driver from CSINodeInfo object %v", err) + } } - err = nim.updateNode( + err := nim.updateNode( removeMaxAttachLimit(driverName), removeNodeIDFromNode(driverName), ) @@ -320,13 +322,13 @@ func removeNodeIDFromNode(csiDriverName string) nodeUpdateFunc { // updateTopologyLabels returns a function that updates labels of a Node object with the given // topology information. -func updateTopologyLabels(topology *csipb.Topology) nodeUpdateFunc { +func updateTopologyLabels(topology map[string]string) nodeUpdateFunc { return func(node *v1.Node) (*v1.Node, bool, error) { - if topology == nil || len(topology.Segments) == 0 { + if topology == nil || len(topology) == 0 { return node, false, nil } - for k, v := range topology.Segments { + for k, v := range topology { if curVal, exists := node.Labels[k]; exists && curVal != v { return nil, false, fmt.Errorf("detected topology value collision: driver reported %q:%q but existing label is %q:%q", k, v, k, curVal) } @@ -335,7 +337,7 @@ func updateTopologyLabels(topology *csipb.Topology) nodeUpdateFunc { if node.Labels == nil { node.Labels = make(map[string]string) } - for k, v := range topology.Segments { + for k, v := range topology { node.Labels[k] = v } return node, true, nil @@ -345,7 +347,7 @@ func updateTopologyLabels(topology *csipb.Topology) nodeUpdateFunc { func (nim *nodeInfoManager) updateCSINodeInfo( driverName string, driverNodeID string, - topology *csipb.Topology) error { + topology map[string]string) error { csiKubeClient := nim.volumeHost.GetCSIClient() if csiKubeClient == nil { @@ -370,7 +372,7 @@ func (nim *nodeInfoManager) tryUpdateCSINodeInfo( csiKubeClient csiclientset.Interface, driverName string, driverNodeID string, - topology *csipb.Topology) error { + topology map[string]string) error { nodeInfo, err := csiKubeClient.CsiV1alpha1().CSINodeInfos().Get(string(nim.nodeName), metav1.GetOptions{}) if nodeInfo == nil || errors.IsNotFound(err) { @@ -427,7 +429,7 @@ func (nim *nodeInfoManager) installDriverToCSINodeInfo( nodeInfo *csiv1alpha1.CSINodeInfo, driverName string, driverNodeID string, - topology *csipb.Topology) error { + topology map[string]string) error { csiKubeClient := nim.volumeHost.GetCSIClient() if csiKubeClient == nil { @@ -435,10 +437,8 @@ func (nim *nodeInfoManager) installDriverToCSINodeInfo( } topologyKeys := make(sets.String) - if topology != nil { - for k := range topology.Segments { - topologyKeys.Insert(k) - } + for k := range topology { + topologyKeys.Insert(k) } specModified := true diff --git a/pkg/volume/csi/nodeinfomanager/nodeinfomanager_test.go b/pkg/volume/csi/nodeinfomanager/nodeinfomanager_test.go index 39ee2fd0cdf..14f0fc67f6d 100644 --- a/pkg/volume/csi/nodeinfomanager/nodeinfomanager_test.go +++ b/pkg/volume/csi/nodeinfomanager/nodeinfomanager_test.go @@ -21,7 +21,6 @@ import ( "fmt" "testing" - "github.com/container-storage-interface/spec/lib/go/csi" "github.com/stretchr/testify/assert" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -49,7 +48,7 @@ type testcase struct { existingNode *v1.Node existingNodeInfo *csiv1alpha1.CSINodeInfo inputNodeID string - inputTopology *csi.Topology + inputTopology map[string]string inputVolumeLimit int64 expectedNodeIDMap map[string]string expectedTopologyMap map[string]sets.String @@ -71,10 +70,8 @@ func TestInstallCSIDriver(t *testing.T) { driverName: "com.example.csi/driver1", existingNode: generateNode(nil /* nodeIDs */, nil /* labels */, nil /*capacity*/), inputNodeID: "com.example.csi/csi-node1", - inputTopology: &csi.Topology{ - Segments: map[string]string{ - "com.example.csi/zone": "zoneA", - }, + inputTopology: map[string]string{ + "com.example.csi/zone": "zoneA", }, expectedNodeIDMap: map[string]string{ "com.example.csi/driver1": "com.example.csi/csi-node1", @@ -104,10 +101,8 @@ func TestInstallCSIDriver(t *testing.T) { }, ), inputNodeID: "com.example.csi/csi-node1", - inputTopology: &csi.Topology{ - Segments: map[string]string{ - "com.example.csi/zone": "zoneA", - }, + inputTopology: map[string]string{ + "com.example.csi/zone": "zoneA", }, expectedNodeIDMap: map[string]string{ "com.example.csi/driver1": "com.example.csi/csi-node1", @@ -134,10 +129,8 @@ func TestInstallCSIDriver(t *testing.T) { nil, /* topologyKeys */ ), inputNodeID: "com.example.csi/csi-node1", - inputTopology: &csi.Topology{ - Segments: map[string]string{ - "com.example.csi/zone": "zoneA", - }, + inputTopology: map[string]string{ + "com.example.csi/zone": "zoneA", }, expectedNodeIDMap: map[string]string{ "com.example.csi/driver1": "com.example.csi/csi-node1", @@ -168,10 +161,8 @@ func TestInstallCSIDriver(t *testing.T) { }, ), inputNodeID: "com.example.csi/csi-node1", - inputTopology: &csi.Topology{ - Segments: map[string]string{ - "com.example.csi/zone": "zoneA", - }, + inputTopology: map[string]string{ + "com.example.csi/zone": "zoneA", }, expectedNodeIDMap: map[string]string{ "com.example.csi/driver1": "com.example.csi/csi-node1", @@ -205,10 +196,8 @@ func TestInstallCSIDriver(t *testing.T) { }, ), inputNodeID: "com.example.csi/csi-node1", - inputTopology: &csi.Topology{ - Segments: map[string]string{ - "com.example.csi/zone": "other-zone", - }, + inputTopology: map[string]string{ + "com.example.csi/zone": "other-zone", }, expectFail: true, }, @@ -231,10 +220,8 @@ func TestInstallCSIDriver(t *testing.T) { }, ), inputNodeID: "com.example.csi/other-node", - inputTopology: &csi.Topology{ - Segments: map[string]string{ - "com.example.csi/rack": "rack1", - }, + inputTopology: map[string]string{ + "com.example.csi/rack": "rack1", }, expectedNodeIDMap: map[string]string{ "com.example.csi/driver1": "com.example.csi/other-node", diff --git a/pkg/volume/downwardapi/downwardapi.go b/pkg/volume/downwardapi/downwardapi.go index 391198ad9dd..9616f335738 100644 --- a/pkg/volume/downwardapi/downwardapi.go +++ b/pkg/volume/downwardapi/downwardapi.go @@ -48,6 +48,10 @@ type downwardAPIPlugin struct { var _ volume.VolumePlugin = &downwardAPIPlugin{} +func getPath(uid types.UID, volName string, host volume.VolumeHost) string { + return host.GetPodVolumeDir(uid, utilstrings.EscapeQualifiedNameForDisk(downwardAPIPluginName), volName) +} + func wrappedVolumeSpec() volume.Spec { return volume.Spec{ Volume: &v1.Volume{VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{Medium: v1.StorageMediumMemory}}}, @@ -91,11 +95,12 @@ func (plugin *downwardAPIPlugin) SupportsBulkVolumeVerification() bool { func (plugin *downwardAPIPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, opts volume.VolumeOptions) (volume.Mounter, error) { v := &downwardAPIVolume{ - volName: spec.Name(), - items: spec.Volume.DownwardAPI.Items, - pod: pod, - podUID: pod.UID, - plugin: plugin, + volName: spec.Name(), + items: spec.Volume.DownwardAPI.Items, + pod: pod, + podUID: pod.UID, + plugin: plugin, + MetricsProvider: volume.NewCachedMetrics(volume.NewMetricsDu(getPath(pod.UID, spec.Name(), plugin.host))), } return &downwardAPIVolumeMounter{ downwardAPIVolume: v, @@ -107,9 +112,10 @@ func (plugin *downwardAPIPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, opts func (plugin *downwardAPIPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) { return &downwardAPIVolumeUnmounter{ &downwardAPIVolume{ - volName: volName, - podUID: podUID, - plugin: plugin, + volName: volName, + podUID: podUID, + plugin: plugin, + MetricsProvider: volume.NewCachedMetrics(volume.NewMetricsDu(getPath(podUID, volName, plugin.host))), }, }, nil } @@ -131,7 +137,7 @@ type downwardAPIVolume struct { pod *v1.Pod podUID types.UID // TODO: remove this redundancy as soon NewUnmounter func will have *v1.POD and not only types.UID plugin *downwardAPIPlugin - volume.MetricsNil + volume.MetricsProvider } // downwardAPIVolumeMounter fetches info from downward API from the pod diff --git a/pkg/volume/fc/fc.go b/pkg/volume/fc/fc.go index cbd247aebca..af257011077 100644 --- a/pkg/volume/fc/fc.go +++ b/pkg/volume/fc/fc.go @@ -355,9 +355,8 @@ type fcDisk struct { } func (fc *fcDisk) GetPath() string { - name := fcPluginName // safe to use PodVolumeDir now: volume teardown occurs before pod is cleaned up - return fc.plugin.host.GetPodVolumeDir(fc.podUID, utilstrings.EscapeQualifiedNameForDisk(name), fc.volName) + return fc.plugin.host.GetPodVolumeDir(fc.podUID, utilstrings.EscapeQualifiedNameForDisk(fcPluginName), fc.volName) } func (fc *fcDisk) fcGlobalMapPath(spec *volume.Spec) (string, error) { @@ -370,8 +369,7 @@ func (fc *fcDisk) fcGlobalMapPath(spec *volume.Spec) (string, error) { } func (fc *fcDisk) fcPodDeviceMapPath() (string, string) { - name := fcPluginName - return fc.plugin.host.GetPodVolumeDeviceDir(fc.podUID, utilstrings.EscapeQualifiedNameForDisk(name)), fc.volName + return fc.plugin.host.GetPodVolumeDeviceDir(fc.podUID, utilstrings.EscapeQualifiedNameForDisk(fcPluginName)), fc.volName } type fcDiskMounter struct { @@ -462,9 +460,8 @@ func (c *fcDiskUnmapper) TearDownDevice(mapPath, devicePath string) error { if err != nil { return fmt.Errorf("fc: failed to detach disk: %s\nError: %v", mapPath, err) } - klog.V(4).Infof("fc: %q is unmounted, deleting the directory", mapPath) - err = os.RemoveAll(mapPath) - if err != nil { + klog.V(4).Infof("fc: %s is unmounted, deleting the directory", mapPath) + if err = os.RemoveAll(mapPath); err != nil { return fmt.Errorf("fc: failed to delete the directory: %s\nError: %v", mapPath, err) } klog.V(4).Infof("fc: successfully detached disk: %s", mapPath) diff --git a/pkg/volume/glusterfs/glusterfs.go b/pkg/volume/glusterfs/glusterfs.go index 5e1811263d6..408898c114e 100644 --- a/pkg/volume/glusterfs/glusterfs.go +++ b/pkg/volume/glusterfs/glusterfs.go @@ -106,30 +106,7 @@ func (plugin *glusterfsPlugin) GetPluginName() string { } func (plugin *glusterfsPlugin) GetVolumeName(spec *volume.Spec) (string, error) { - var endpointName string - var endpointsNsPtr *string - - volPath, _, err := getVolumeInfo(spec) - if err != nil { - return "", err - } - - if spec.Volume != nil && spec.Volume.Glusterfs != nil { - endpointName = spec.Volume.Glusterfs.EndpointsName - } else if spec.PersistentVolume != nil && - spec.PersistentVolume.Spec.Glusterfs != nil { - endpointName = spec.PersistentVolume.Spec.Glusterfs.EndpointsName - endpointsNsPtr = spec.PersistentVolume.Spec.Glusterfs.EndpointsNamespace - if endpointsNsPtr != nil && *endpointsNsPtr != "" { - return fmt.Sprintf("%v:%v:%v", endpointName, *endpointsNsPtr, volPath), nil - } - return "", fmt.Errorf("invalid endpointsnamespace in provided glusterfs PV spec") - - } else { - return "", fmt.Errorf("unable to fetch required parameters from provided glusterfs spec") - } - - return fmt.Sprintf("%v:%v", endpointName, volPath), nil + return "", fmt.Errorf("GetVolumeName() is unimplemented for GlusterFS") } func (plugin *glusterfsPlugin) CanSupport(spec *volume.Spec) bool { diff --git a/pkg/volume/iscsi/iscsi.go b/pkg/volume/iscsi/iscsi.go index f8612d9dc19..54ad75febfe 100644 --- a/pkg/volume/iscsi/iscsi.go +++ b/pkg/volume/iscsi/iscsi.go @@ -262,17 +262,17 @@ func (plugin *iscsiPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName } type iscsiDisk struct { - VolName string - podUID types.UID - Portals []string - Iqn string - Lun string - Iface string - chap_discovery bool - chap_session bool - secret map[string]string - InitiatorName string - plugin *iscsiPlugin + VolName string + podUID types.UID + Portals []string + Iqn string + Lun string + Iface string + chapDiscovery bool + chapSession bool + secret map[string]string + InitiatorName string + plugin *iscsiPlugin // Utility interface that provides API calls to the provider to attach/detach disks. manager diskManager volume.MetricsProvider @@ -539,18 +539,18 @@ func createISCSIDisk(spec *volume.Spec, podUID types.UID, plugin *iscsiPlugin, m } return &iscsiDisk{ - podUID: podUID, - VolName: spec.Name(), - Portals: bkportal, - Iqn: iqn, - Lun: lun, - Iface: iface, - chap_discovery: chapDiscovery, - chap_session: chapSession, - secret: secret, - InitiatorName: initiatorName, - manager: manager, - plugin: plugin}, nil + podUID: podUID, + VolName: spec.Name(), + Portals: bkportal, + Iqn: iqn, + Lun: lun, + Iface: iface, + chapDiscovery: chapDiscovery, + chapSession: chapSession, + secret: secret, + InitiatorName: initiatorName, + manager: manager, + plugin: plugin}, nil } func createSecretMap(spec *volume.Spec, plugin *iscsiPlugin, namespace string) (map[string]string, error) { diff --git a/pkg/volume/iscsi/iscsi_util.go b/pkg/volume/iscsi/iscsi_util.go index ae9c7a70fc8..e5e04c518fd 100644 --- a/pkg/volume/iscsi/iscsi_util.go +++ b/pkg/volume/iscsi/iscsi_util.go @@ -54,12 +54,12 @@ const ( ) var ( - chap_st = []string{ + chapSt = []string{ "discovery.sendtargets.auth.username", "discovery.sendtargets.auth.password", "discovery.sendtargets.auth.username_in", "discovery.sendtargets.auth.password_in"} - chap_sess = []string{ + chapSess = []string{ "node.session.auth.username", "node.session.auth.password", "node.session.auth.username_in", @@ -69,7 +69,7 @@ var ( ) func updateISCSIDiscoverydb(b iscsiDiskMounter, tp string) error { - if !b.chap_discovery { + if !b.chapDiscovery { return nil } out, err := b.exec.Run("iscsiadm", "-m", "discoverydb", "-t", "sendtargets", "-p", tp, "-I", b.Iface, "-o", "update", "-n", "discovery.sendtargets.auth.authmethod", "-v", "CHAP") @@ -77,7 +77,7 @@ func updateISCSIDiscoverydb(b iscsiDiskMounter, tp string) error { return fmt.Errorf("iscsi: failed to update discoverydb with CHAP, output: %v", string(out)) } - for _, k := range chap_st { + for _, k := range chapSt { v := b.secret[k] if len(v) > 0 { out, err := b.exec.Run("iscsiadm", "-m", "discoverydb", "-t", "sendtargets", "-p", tp, "-I", b.Iface, "-o", "update", "-n", k, "-v", v) @@ -90,7 +90,7 @@ func updateISCSIDiscoverydb(b iscsiDiskMounter, tp string) error { } func updateISCSINode(b iscsiDiskMounter, tp string) error { - if !b.chap_session { + if !b.chapSession { return nil } @@ -99,7 +99,7 @@ func updateISCSINode(b iscsiDiskMounter, tp string) error { return fmt.Errorf("iscsi: failed to update node with CHAP, output: %v", string(out)) } - for _, k := range chap_sess { + for _, k := range chapSess { v := b.secret[k] if len(v) > 0 { out, err := b.exec.Run("iscsiadm", "-m", "node", "-p", tp, "-T", b.Iqn, "-I", b.Iface, "-o", "update", "-n", k, "-v", v) @@ -210,7 +210,7 @@ func (util *ISCSIUtil) persistISCSI(conf iscsiDisk, mnt string) error { defer fp.Close() encoder := json.NewEncoder(fp) if err = encoder.Encode(conf); err != nil { - return fmt.Errorf("iscsi: encode err: %v.", err) + return fmt.Errorf("iscsi: encode err: %v", err) } return nil } @@ -224,7 +224,7 @@ func (util *ISCSIUtil) loadISCSI(conf *iscsiDisk, mnt string) error { defer fp.Close() decoder := json.NewDecoder(fp) if err = decoder.Decode(conf); err != nil { - return fmt.Errorf("iscsi: decode err: %v.", err) + return fmt.Errorf("iscsi: decode err: %v", err) } return nil } diff --git a/pkg/volume/nfs/nfs_test.go b/pkg/volume/nfs/nfs_test.go index 924978454b3..2c6b7667c76 100644 --- a/pkg/volume/nfs/nfs_test.go +++ b/pkg/volume/nfs/nfs_test.go @@ -89,8 +89,8 @@ func TestRecycler(t *testing.T) { plugMgr.InitPlugins([]volume.VolumePlugin{&nfsPlugin{nil, volume.VolumeConfig{}}}, nil, volumetest.NewFakeVolumeHost(tmpDir, nil, nil)) spec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{NFS: &v1.NFSVolumeSource{Path: "/foo"}}}}} - _, plugin_err := plugMgr.FindRecyclablePluginBySpec(spec) - if plugin_err != nil { + _, pluginErr := plugMgr.FindRecyclablePluginBySpec(spec) + if pluginErr != nil { t.Errorf("Can't find the plugin by name") } } diff --git a/pkg/volume/portworx/portworx.go b/pkg/volume/portworx/portworx.go index 212a4c23d79..e2cb8982986 100644 --- a/pkg/volume/portworx/portworx.go +++ b/pkg/volume/portworx/portworx.go @@ -20,6 +20,7 @@ import ( "fmt" "os" + volumeclient "github.com/libopenstorage/openstorage/api/client/volume" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -43,7 +44,7 @@ func ProbeVolumePlugins() []volume.VolumePlugin { type portworxVolumePlugin struct { host volume.VolumeHost - util *PortworxVolumeUtil + util *portworxVolumeUtil } var _ volume.VolumePlugin = &portworxVolumePlugin{} @@ -61,8 +62,18 @@ func getPath(uid types.UID, volName string, host volume.VolumeHost) string { } func (plugin *portworxVolumePlugin) Init(host volume.VolumeHost) error { + client, err := volumeclient.NewDriverClient( + fmt.Sprintf("http://%s:%d", host.GetHostName(), osdMgmtDefaultPort), + pxdDriverName, osdDriverVersion, pxDriverName) + if err != nil { + return err + } + plugin.host = host - plugin.util = &PortworxVolumeUtil{} + plugin.util = &portworxVolumeUtil{ + portworxClient: client, + } + return nil } diff --git a/pkg/volume/portworx/portworx_util.go b/pkg/volume/portworx/portworx_util.go index 62b3e3f4d31..7ac5522dac1 100644 --- a/pkg/volume/portworx/portworx_util.go +++ b/pkg/volume/portworx/portworx_util.go @@ -34,22 +34,22 @@ import ( ) const ( - osdMgmtPort = "9001" - osdDriverVersion = "v1" - pxdDriverName = "pxd" - pvcClaimLabel = "pvc" - pvcNamespaceLabel = "namespace" - pxServiceName = "portworx-service" - pxDriverName = "pxd-sched" + osdMgmtDefaultPort = 9001 + osdDriverVersion = "v1" + pxdDriverName = "pxd" + pvcClaimLabel = "pvc" + pvcNamespaceLabel = "namespace" + pxServiceName = "portworx-service" + pxDriverName = "pxd-sched" ) -type PortworxVolumeUtil struct { +type portworxVolumeUtil struct { portworxClient *osdclient.Client } // CreateVolume creates a Portworx volume. -func (util *PortworxVolumeUtil) CreateVolume(p *portworxVolumeProvisioner) (string, int64, map[string]string, error) { - driver, err := util.getPortworxDriver(p.plugin.host, false /*localOnly*/) +func (util *portworxVolumeUtil) CreateVolume(p *portworxVolumeProvisioner) (string, int64, map[string]string, error) { + driver, err := util.getPortworxDriver(p.plugin.host) if err != nil || driver == nil { klog.Errorf("Failed to get portworx driver. Err: %v", err) return "", 0, nil, err @@ -112,8 +112,8 @@ func (util *PortworxVolumeUtil) CreateVolume(p *portworxVolumeProvisioner) (stri } // DeleteVolume deletes a Portworx volume -func (util *PortworxVolumeUtil) DeleteVolume(d *portworxVolumeDeleter) error { - driver, err := util.getPortworxDriver(d.plugin.host, false /*localOnly*/) +func (util *portworxVolumeUtil) DeleteVolume(d *portworxVolumeDeleter) error { + driver, err := util.getPortworxDriver(d.plugin.host) if err != nil || driver == nil { klog.Errorf("Failed to get portworx driver. Err: %v", err) return err @@ -128,8 +128,8 @@ func (util *PortworxVolumeUtil) DeleteVolume(d *portworxVolumeDeleter) error { } // AttachVolume attaches a Portworx Volume -func (util *PortworxVolumeUtil) AttachVolume(m *portworxVolumeMounter, attachOptions map[string]string) (string, error) { - driver, err := util.getPortworxDriver(m.plugin.host, true /*localOnly*/) +func (util *portworxVolumeUtil) AttachVolume(m *portworxVolumeMounter, attachOptions map[string]string) (string, error) { + driver, err := util.getLocalPortworxDriver(m.plugin.host) if err != nil || driver == nil { klog.Errorf("Failed to get portworx driver. Err: %v", err) return "", err @@ -144,8 +144,8 @@ func (util *PortworxVolumeUtil) AttachVolume(m *portworxVolumeMounter, attachOpt } // DetachVolume detaches a Portworx Volume -func (util *PortworxVolumeUtil) DetachVolume(u *portworxVolumeUnmounter) error { - driver, err := util.getPortworxDriver(u.plugin.host, true /*localOnly*/) +func (util *portworxVolumeUtil) DetachVolume(u *portworxVolumeUnmounter) error { + driver, err := util.getLocalPortworxDriver(u.plugin.host) if err != nil || driver == nil { klog.Errorf("Failed to get portworx driver. Err: %v", err) return err @@ -160,8 +160,8 @@ func (util *PortworxVolumeUtil) DetachVolume(u *portworxVolumeUnmounter) error { } // MountVolume mounts a Portworx Volume on the specified mountPath -func (util *PortworxVolumeUtil) MountVolume(m *portworxVolumeMounter, mountPath string) error { - driver, err := util.getPortworxDriver(m.plugin.host, true /*localOnly*/) +func (util *portworxVolumeUtil) MountVolume(m *portworxVolumeMounter, mountPath string) error { + driver, err := util.getLocalPortworxDriver(m.plugin.host) if err != nil || driver == nil { klog.Errorf("Failed to get portworx driver. Err: %v", err) return err @@ -176,8 +176,8 @@ func (util *PortworxVolumeUtil) MountVolume(m *portworxVolumeMounter, mountPath } // UnmountVolume unmounts a Portworx Volume -func (util *PortworxVolumeUtil) UnmountVolume(u *portworxVolumeUnmounter, mountPath string) error { - driver, err := util.getPortworxDriver(u.plugin.host, true /*localOnly*/) +func (util *portworxVolumeUtil) UnmountVolume(u *portworxVolumeUnmounter, mountPath string) error { + driver, err := util.getLocalPortworxDriver(u.plugin.host) if err != nil || driver == nil { klog.Errorf("Failed to get portworx driver. Err: %v", err) return err @@ -191,8 +191,8 @@ func (util *PortworxVolumeUtil) UnmountVolume(u *portworxVolumeUnmounter, mountP return nil } -func (util *PortworxVolumeUtil) ResizeVolume(spec *volume.Spec, newSize resource.Quantity, volumeHost volume.VolumeHost) error { - driver, err := util.getPortworxDriver(volumeHost, false /*localOnly*/) +func (util *portworxVolumeUtil) ResizeVolume(spec *volume.Spec, newSize resource.Quantity, volumeHost volume.VolumeHost) error { + driver, err := util.getPortworxDriver(volumeHost) if err != nil || driver == nil { klog.Errorf("Failed to get portworx driver. Err: %v", err) return err @@ -254,8 +254,8 @@ func isClientValid(client *osdclient.Client) (bool, error) { return true, nil } -func createDriverClient(hostname string) (*osdclient.Client, error) { - client, err := volumeclient.NewDriverClient("http://"+hostname+":"+osdMgmtPort, +func createDriverClient(hostname string, port int32) (*osdclient.Client, error) { + client, err := volumeclient.NewDriverClient(fmt.Sprintf("http://%s:%d", hostname, port), pxdDriverName, osdDriverVersion, pxDriverName) if err != nil { return nil, err @@ -268,65 +268,105 @@ func createDriverClient(hostname string) (*osdclient.Client, error) { } } -// getPortworxDriver() returns a Portworx volume driver which can be used for volume operations -// localOnly: If true, the returned driver will be connected to Portworx API server on volume host. -// If false, driver will be connected to API server on volume host or Portworx k8s service cluster IP -// This flag is required to explicitly force certain operations (mount, unmount, detach, attach) to -// go to the volume host instead of the k8s service which might route it to any host. This pertains to how -// Portworx mounts and attaches a volume to the running container. The node getting these requests needs to -// see the pod container mounts (specifically /var/lib/kubelet/pods/) -// Operations like create and delete volume don't need to be restricted to local volume host since -// any node in the Portworx cluster can co-ordinate the create/delete request and forward the operations to -// the Portworx node that will own/owns the data. -func (util *PortworxVolumeUtil) getPortworxDriver(volumeHost volume.VolumeHost, localOnly bool) (volumeapi.VolumeDriver, error) { - var err error - if localOnly { - util.portworxClient, err = createDriverClient(volumeHost.GetHostName()) - if err != nil { - return nil, err - } else { - klog.V(4).Infof("Using portworx local service at: %v as api endpoint", volumeHost.GetHostName()) - return volumeclient.VolumeDriver(util.portworxClient), nil - } - } - +// getPortworxDriver returns a Portworx volume driver which can be used for cluster wide operations. +// Operations like create and delete volume don't need to be restricted to local volume host since +// any node in the Portworx cluster can co-ordinate the create/delete request and forward the operations to +// the Portworx node that will own/owns the data. +func (util *portworxVolumeUtil) getPortworxDriver(volumeHost volume.VolumeHost) (volumeapi.VolumeDriver, error) { // check if existing saved client is valid if isValid, _ := isClientValid(util.portworxClient); isValid { return volumeclient.VolumeDriver(util.portworxClient), nil } // create new client - util.portworxClient, err = createDriverClient(volumeHost.GetHostName()) // for backward compatibility + var err error + util.portworxClient, err = createDriverClient(volumeHost.GetHostName(), osdMgmtDefaultPort) // for backward compatibility if err != nil || util.portworxClient == nil { - // Create client from portworx service - kubeClient := volumeHost.GetKubeClient() - if kubeClient == nil { - klog.Error("Failed to get kubeclient when creating portworx client") - return nil, nil - } - - opts := metav1.GetOptions{} - svc, err := kubeClient.CoreV1().Services(api.NamespaceSystem).Get(pxServiceName, opts) + // Create client from portworx k8s service. + svc, err := getPortworxService(volumeHost) if err != nil { - klog.Errorf("Failed to get service. Err: %v", err) return nil, err } - if svc == nil { - klog.Errorf("Service: %v not found. Consult Portworx docs to deploy it.", pxServiceName) - return nil, err - } - - util.portworxClient, err = createDriverClient(svc.Spec.ClusterIP) + // The port here is always the default one since it's the service port + util.portworxClient, err = createDriverClient(svc.Spec.ClusterIP, osdMgmtDefaultPort) if err != nil || util.portworxClient == nil { klog.Errorf("Failed to connect to portworx service. Err: %v", err) return nil, err } - klog.Infof("Using portworx cluster service at: %v as api endpoint", svc.Spec.ClusterIP) + klog.Infof("Using portworx cluster service at: %v:%d as api endpoint", + svc.Spec.ClusterIP, osdMgmtDefaultPort) } else { - klog.Infof("Using portworx service at: %v as api endpoint", volumeHost.GetHostName()) + klog.Infof("Using portworx service at: %v:%d as api endpoint", + volumeHost.GetHostName(), osdMgmtDefaultPort) } return volumeclient.VolumeDriver(util.portworxClient), nil } + +// getLocalPortworxDriver returns driver connected to Portworx API server on volume host. +// This is required to force certain operations (mount, unmount, detach, attach) to +// go to the volume host instead of the k8s service which might route it to any host. This pertains to how +// Portworx mounts and attaches a volume to the running container. The node getting these requests needs to +// see the pod container mounts (specifically /var/lib/kubelet/pods/) +func (util *portworxVolumeUtil) getLocalPortworxDriver(volumeHost volume.VolumeHost) (volumeapi.VolumeDriver, error) { + if util.portworxClient != nil { + // check if existing saved client is valid + if isValid, _ := isClientValid(util.portworxClient); isValid { + return volumeclient.VolumeDriver(util.portworxClient), nil + } + } + + // Lookup port + svc, err := getPortworxService(volumeHost) + if err != nil { + return nil, err + } + + osgMgmtPort := lookupPXAPIPortFromService(svc) + util.portworxClient, err = createDriverClient(volumeHost.GetHostName(), osgMgmtPort) + if err != nil { + return nil, err + } + + klog.Infof("Using portworx local service at: %v:%d as api endpoint", + volumeHost.GetHostName(), osgMgmtPort) + return volumeclient.VolumeDriver(util.portworxClient), nil +} + +// lookupPXAPIPortFromService goes over all the ports in the given service and returns the target +// port for osdMgmtDefaultPort +func lookupPXAPIPortFromService(svc *v1.Service) int32 { + for _, p := range svc.Spec.Ports { + if p.Port == osdMgmtDefaultPort { + return p.TargetPort.IntVal + } + } + return osdMgmtDefaultPort // default +} + +// getPortworxService returns the portworx cluster service from the API server +func getPortworxService(host volume.VolumeHost) (*v1.Service, error) { + kubeClient := host.GetKubeClient() + if kubeClient == nil { + err := fmt.Errorf("Failed to get kubeclient when creating portworx client") + klog.Errorf(err.Error()) + return nil, err + } + + opts := metav1.GetOptions{} + svc, err := kubeClient.CoreV1().Services(api.NamespaceSystem).Get(pxServiceName, opts) + if err != nil { + klog.Errorf("Failed to get service. Err: %v", err) + return nil, err + } + + if svc == nil { + err = fmt.Errorf("Service: %v not found. Consult Portworx docs to deploy it.", pxServiceName) + klog.Errorf(err.Error()) + return nil, err + } + + return svc, nil +} diff --git a/pkg/volume/projected/projected.go b/pkg/volume/projected/projected.go index e93588ce676..06ea997292d 100644 --- a/pkg/volume/projected/projected.go +++ b/pkg/volume/projected/projected.go @@ -111,10 +111,11 @@ func (plugin *projectedPlugin) SupportsBulkVolumeVerification() bool { func (plugin *projectedPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, opts volume.VolumeOptions) (volume.Mounter, error) { return &projectedVolumeMounter{ projectedVolume: &projectedVolume{ - volName: spec.Name(), - sources: spec.Volume.Projected.Sources, - podUID: pod.UID, - plugin: plugin, + volName: spec.Name(), + sources: spec.Volume.Projected.Sources, + podUID: pod.UID, + plugin: plugin, + MetricsProvider: volume.NewCachedMetrics(volume.NewMetricsDu(getPath(pod.UID, spec.Name(), plugin.host))), }, source: *spec.Volume.Projected, pod: pod, @@ -125,9 +126,10 @@ func (plugin *projectedPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, opts v func (plugin *projectedPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) { return &projectedVolumeUnmounter{ &projectedVolume{ - volName: volName, - podUID: podUID, - plugin: plugin, + volName: volName, + podUID: podUID, + plugin: plugin, + MetricsProvider: volume.NewCachedMetrics(volume.NewMetricsDu(getPath(podUID, volName, plugin.host))), }, }, nil } @@ -148,7 +150,7 @@ type projectedVolume struct { sources []v1.VolumeProjection podUID types.UID plugin *projectedPlugin - volume.MetricsNil + volume.MetricsProvider } var _ volume.Volume = &projectedVolume{} diff --git a/pkg/volume/rbd/attacher.go b/pkg/volume/rbd/attacher.go index 97d944f2b5c..626dc321489 100644 --- a/pkg/volume/rbd/attacher.go +++ b/pkg/volume/rbd/attacher.go @@ -103,7 +103,7 @@ func (attacher *rbdAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName t return volumesAttachedCheck, nil } -// WaitForAttach implements Attacher.WaitForAttach. It's called by kublet to +// WaitForAttach implements Attacher.WaitForAttach. It's called by kubelet to // attach volume onto the node. // This method is idempotent, callers are responsible for retrying on failure. func (attacher *rbdAttacher) WaitForAttach(spec *volume.Spec, devicePath string, pod *v1.Pod, timeout time.Duration) (string, error) { diff --git a/pkg/volume/util/BUILD b/pkg/volume/util/BUILD index 64eac778de2..ffd1d032151 100644 --- a/pkg/volume/util/BUILD +++ b/pkg/volume/util/BUILD @@ -52,7 +52,6 @@ go_test( "atomic_writer_test.go", "attach_limit_test.go", "device_util_linux_test.go", - "main_test.go", "nested_volumes_test.go", "resize_util_test.go", "util_test.go", diff --git a/pkg/volume/util/main_test.go b/pkg/volume/util/main_test.go deleted file mode 100644 index 6af02d0a11d..00000000000 --- a/pkg/volume/util/main_test.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "testing" - - utilfeature "k8s.io/apiserver/pkg/util/feature" - utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" - _ "k8s.io/kubernetes/pkg/features" -) - -func TestMain(m *testing.M) { - utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run) -} diff --git a/pkg/volume/util/operationexecutor/operation_executor.go b/pkg/volume/util/operationexecutor/operation_executor.go index 745910dc220..cfcc249f8bd 100644 --- a/pkg/volume/util/operationexecutor/operation_executor.go +++ b/pkg/volume/util/operationexecutor/operation_executor.go @@ -725,7 +725,7 @@ func (oe *operationExecutor) MountVolume( if fsVolume { // Filesystem volume case // Mount/remount a volume when a volume is attached - generatedOperations, err = oe.operationGenerator.GenerateMountVolumeFunc( + generatedOperations = oe.operationGenerator.GenerateMountVolumeFunc( waitForAttachTimeout, volumeToMount, actualStateOfWorld, isRemount) } else { diff --git a/pkg/volume/util/operationexecutor/operation_executor_test.go b/pkg/volume/util/operationexecutor/operation_executor_test.go index c7d0c9c8097..00816341cd6 100644 --- a/pkg/volume/util/operationexecutor/operation_executor_test.go +++ b/pkg/volume/util/operationexecutor/operation_executor_test.go @@ -389,14 +389,14 @@ func newFakeOperationGenerator(ch chan interface{}, quit chan interface{}) Opera } } -func (fopg *fakeOperationGenerator) GenerateMountVolumeFunc(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorldMounterUpdater ActualStateOfWorldMounterUpdater, isRemount bool) (volumetypes.GeneratedOperations, error) { +func (fopg *fakeOperationGenerator) GenerateMountVolumeFunc(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorldMounterUpdater ActualStateOfWorldMounterUpdater, isRemount bool) volumetypes.GeneratedOperations { opFunc := func() (error, error) { startOperationAndBlock(fopg.ch, fopg.quit) return nil, nil } return volumetypes.GeneratedOperations{ OperationFunc: opFunc, - }, nil + } } func (fopg *fakeOperationGenerator) GenerateUnmountVolumeFunc(volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, podsDir string) (volumetypes.GeneratedOperations, error) { opFunc := func() (error, error) { diff --git a/pkg/volume/util/operationexecutor/operation_generator.go b/pkg/volume/util/operationexecutor/operation_generator.go index 36c10b67752..a185a5f33f8 100644 --- a/pkg/volume/util/operationexecutor/operation_generator.go +++ b/pkg/volume/util/operationexecutor/operation_generator.go @@ -40,6 +40,10 @@ import ( "k8s.io/kubernetes/pkg/volume/util/volumepathhandler" ) +const ( + unknownVolumePlugin string = "UnknownVolumePlugin" +) + var _ OperationGenerator = &operationGenerator{} type operationGenerator struct { @@ -82,7 +86,7 @@ func NewOperationGenerator(kubeClient clientset.Interface, // OperationGenerator interface that extracts out the functions from operation_executor to make it dependency injectable type OperationGenerator interface { // Generates the MountVolume function needed to perform the mount of a volume plugin - GenerateMountVolumeFunc(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorldMounterUpdater ActualStateOfWorldMounterUpdater, isRemount bool) (volumetypes.GeneratedOperations, error) + GenerateMountVolumeFunc(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorldMounterUpdater ActualStateOfWorldMounterUpdater, isRemount bool) volumetypes.GeneratedOperations // Generates the UnmountVolume function needed to perform the unmount of a volume plugin GenerateUnmountVolumeFunc(volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, podsDir string) (volumetypes.GeneratedOperations, error) @@ -387,8 +391,8 @@ func (og *operationGenerator) GenerateDetachVolumeFunc( return volumetypes.GeneratedOperations{}, volumeToDetach.GenerateErrorDetailed("DetachVolume.SplitUniqueName failed", err) } attachableVolumePlugin, err = og.volumePluginMgr.FindAttachablePluginByName(pluginName) - if err != nil { - return volumetypes.GeneratedOperations{}, volumeToDetach.GenerateErrorDetailed("DetachVolume.FindAttachablePluginBySpec failed", err) + if err != nil || attachableVolumePlugin == nil { + return volumetypes.GeneratedOperations{}, volumeToDetach.GenerateErrorDetailed("DetachVolume.FindAttachablePluginByName failed", err) } } @@ -436,61 +440,61 @@ func (og *operationGenerator) GenerateMountVolumeFunc( waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorld ActualStateOfWorldMounterUpdater, - isRemount bool) (volumetypes.GeneratedOperations, error) { + isRemount bool) volumetypes.GeneratedOperations { // Get mounter plugin + volumePluginName := unknownVolumePlugin volumePlugin, err := og.volumePluginMgr.FindPluginBySpec(volumeToMount.VolumeSpec) - if err != nil || volumePlugin == nil { - return volumetypes.GeneratedOperations{}, volumeToMount.GenerateErrorDetailed("MountVolume.FindPluginBySpec failed", err) - } - - affinityErr := checkNodeAffinity(og, volumeToMount, volumePlugin) - if affinityErr != nil { - eventErr, detailedErr := volumeToMount.GenerateError("MountVolume.NodeAffinity check failed", affinityErr) - og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FailedMountVolume, eventErr.Error()) - return volumetypes.GeneratedOperations{}, detailedErr - } - - volumeMounter, newMounterErr := volumePlugin.NewMounter( - volumeToMount.VolumeSpec, - volumeToMount.Pod, - volume.VolumeOptions{}) - if newMounterErr != nil { - eventErr, detailedErr := volumeToMount.GenerateError("MountVolume.NewMounter initialization failed", newMounterErr) - og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FailedMountVolume, eventErr.Error()) - return volumetypes.GeneratedOperations{}, detailedErr - } - - mountCheckError := checkMountOptionSupport(og, volumeToMount, volumePlugin) - - if mountCheckError != nil { - eventErr, detailedErr := volumeToMount.GenerateError("MountVolume.MountOptionSupport check failed", mountCheckError) - og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.UnsupportedMountOption, eventErr.Error()) - return volumetypes.GeneratedOperations{}, detailedErr - } - - // Get attacher, if possible - attachableVolumePlugin, _ := - og.volumePluginMgr.FindAttachablePluginBySpec(volumeToMount.VolumeSpec) - var volumeAttacher volume.Attacher - if attachableVolumePlugin != nil { - volumeAttacher, _ = attachableVolumePlugin.NewAttacher() - } - - // get deviceMounter, if possible - deviceMountableVolumePlugin, _ := og.volumePluginMgr.FindDeviceMountablePluginBySpec(volumeToMount.VolumeSpec) - var volumeDeviceMounter volume.DeviceMounter - if deviceMountableVolumePlugin != nil { - volumeDeviceMounter, _ = deviceMountableVolumePlugin.NewDeviceMounter() - } - - var fsGroup *int64 - if volumeToMount.Pod.Spec.SecurityContext != nil && - volumeToMount.Pod.Spec.SecurityContext.FSGroup != nil { - fsGroup = volumeToMount.Pod.Spec.SecurityContext.FSGroup + if err == nil && volumePlugin != nil { + volumePluginName = volumePlugin.GetPluginName() } mountVolumeFunc := func() (error, error) { + if err != nil || volumePlugin == nil { + return volumeToMount.GenerateError("MountVolume.FindPluginBySpec failed", err) + } + + affinityErr := checkNodeAffinity(og, volumeToMount, volumePlugin) + if affinityErr != nil { + return volumeToMount.GenerateError("MountVolume.NodeAffinity check failed", affinityErr) + } + + volumeMounter, newMounterErr := volumePlugin.NewMounter( + volumeToMount.VolumeSpec, + volumeToMount.Pod, + volume.VolumeOptions{}) + if newMounterErr != nil { + return volumeToMount.GenerateError("MountVolume.NewMounter initialization failed", newMounterErr) + + } + + mountCheckError := checkMountOptionSupport(og, volumeToMount, volumePlugin) + + if mountCheckError != nil { + return volumeToMount.GenerateError("MountVolume.MountOptionSupport check failed", mountCheckError) + } + + // Get attacher, if possible + attachableVolumePlugin, _ := + og.volumePluginMgr.FindAttachablePluginBySpec(volumeToMount.VolumeSpec) + var volumeAttacher volume.Attacher + if attachableVolumePlugin != nil { + volumeAttacher, _ = attachableVolumePlugin.NewAttacher() + } + + // get deviceMounter, if possible + deviceMountableVolumePlugin, _ := og.volumePluginMgr.FindDeviceMountablePluginBySpec(volumeToMount.VolumeSpec) + var volumeDeviceMounter volume.DeviceMounter + if deviceMountableVolumePlugin != nil { + volumeDeviceMounter, _ = deviceMountableVolumePlugin.NewDeviceMounter() + } + + var fsGroup *int64 + if volumeToMount.Pod.Spec.SecurityContext != nil && + volumeToMount.Pod.Spec.SecurityContext.FSGroup != nil { + fsGroup = volumeToMount.Pod.Spec.SecurityContext.FSGroup + } + devicePath := volumeToMount.DevicePath if volumeAttacher != nil { // Wait for attachable volumes to finish attaching @@ -536,7 +540,7 @@ func (og *operationGenerator) GenerateMountVolumeFunc( // resizeFileSystem will resize the file system if user has requested a resize of // underlying persistent volume and is allowed to do so. - resizeSimpleError, resizeDetailedError := og.resizeFileSystem(volumeToMount, devicePath, deviceMountPath, volumePlugin.GetPluginName()) + resizeSimpleError, resizeDetailedError := og.resizeFileSystem(volumeToMount, devicePath, deviceMountPath, volumePluginName) if resizeSimpleError != nil || resizeDetailedError != nil { return resizeSimpleError, resizeDetailedError @@ -593,8 +597,8 @@ func (og *operationGenerator) GenerateMountVolumeFunc( return volumetypes.GeneratedOperations{ OperationFunc: mountVolumeFunc, EventRecorderFunc: eventRecorderFunc, - CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(volumePlugin.GetPluginName(), volumeToMount.VolumeSpec), "volume_mount"), - }, nil + CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(volumePluginName, volumeToMount.VolumeSpec), "volume_mount"), + } } func (og *operationGenerator) resizeFileSystem(volumeToMount VolumeToMount, devicePath, deviceMountPath, pluginName string) (simpleErr, detailedErr error) { diff --git a/pkg/volume/util/util.go b/pkg/volume/util/util.go index 070961c2822..8f5f4f6ff65 100644 --- a/pkg/volume/util/util.go +++ b/pkg/volume/util/util.go @@ -201,9 +201,9 @@ func PathExists(path string) (bool, error) { return false, nil } else if IsCorruptedMnt(err) { return true, err - } else { - return false, err } + + return false, err } // IsCorruptedMnt return true if err is about corrupted mount point diff --git a/plugin/pkg/admission/noderestriction/admission.go b/plugin/pkg/admission/noderestriction/admission.go index 222156a100a..e44f0263924 100644 --- a/plugin/pkg/admission/noderestriction/admission.go +++ b/plugin/pkg/admission/noderestriction/admission.go @@ -124,7 +124,7 @@ func (c *nodePlugin) Admit(a admission.Attributes) error { case "eviction": return c.admitPodEviction(nodeName, a) default: - return admission.NewForbidden(a, fmt.Errorf("unexpected pod subresource %q", a.GetSubresource())) + return admission.NewForbidden(a, fmt.Errorf("unexpected pod subresource %q, only 'status' and 'eviction' are allowed", a.GetSubresource())) } case nodeResource: @@ -218,7 +218,7 @@ func (c *nodePlugin) admitPod(nodeName string, a admission.Attributes) error { return nil default: - return admission.NewForbidden(a, fmt.Errorf("unexpected operation %q", a.GetOperation())) + return admission.NewForbidden(a, fmt.Errorf("unexpected operation %q, node %q can only create and delete mirror pods", a.GetOperation(), nodeName)) } } @@ -280,7 +280,7 @@ func (c *nodePlugin) admitPVCStatus(nodeName string, a admission.Attributes) err switch a.GetOperation() { case admission.Update: if !c.features.Enabled(features.ExpandPersistentVolumes) { - return admission.NewForbidden(a, fmt.Errorf("node %q may not update persistentvolumeclaim metadata", nodeName)) + return admission.NewForbidden(a, fmt.Errorf("node %q is not allowed to update persistentvolumeclaim metadata", nodeName)) } oldPVC, ok := a.GetOldObject().(*api.PersistentVolumeClaim) @@ -310,7 +310,7 @@ func (c *nodePlugin) admitPVCStatus(nodeName string, a admission.Attributes) err // ensure no metadata changed. nodes should not be able to relabel, add finalizers/owners, etc if !apiequality.Semantic.DeepEqual(oldPVC, newPVC) { - return admission.NewForbidden(a, fmt.Errorf("node %q may not update fields other than status.capacity and status.conditions: %v", nodeName, diff.ObjectReflectDiff(oldPVC, newPVC))) + return admission.NewForbidden(a, fmt.Errorf("node %q is not allowed to update fields other than status.capacity and status.conditions: %v", nodeName, diff.ObjectReflectDiff(oldPVC, newPVC))) } return nil @@ -331,14 +331,14 @@ func (c *nodePlugin) admitNode(nodeName string, a admission.Attributes) error { // Don't allow a node to create its Node API object with the config source set. // We scope node access to things listed in the Node.Spec, so allowing this would allow a view escalation. if node.Spec.ConfigSource != nil { - return admission.NewForbidden(a, fmt.Errorf("cannot create with non-nil configSource")) + return admission.NewForbidden(a, fmt.Errorf("node %q is not allowed to create pods with a non-nil configSource", nodeName)) } // Don't allow a node to register with labels outside the allowed set. // This would allow a node to add or modify its labels in a way that would let it steer privileged workloads to itself. modifiedLabels := getModifiedLabels(node.Labels, nil) if forbiddenLabels := c.getForbiddenCreateLabels(modifiedLabels); len(forbiddenLabels) > 0 { - return admission.NewForbidden(a, fmt.Errorf("cannot set labels: %s", strings.Join(forbiddenLabels.List(), ", "))) + return admission.NewForbidden(a, fmt.Errorf("node %q is not allowed to set the following labels: %s", nodeName, strings.Join(forbiddenLabels.List(), ", "))) } // check and warn if nodes set labels on create that would have been forbidden on update // TODO(liggitt): in 1.17, expand getForbiddenCreateLabels to match getForbiddenUpdateLabels and drop this @@ -352,7 +352,7 @@ func (c *nodePlugin) admitNode(nodeName string, a admission.Attributes) error { } } if requestedName != nodeName { - return admission.NewForbidden(a, fmt.Errorf("node %q cannot modify node %q", nodeName, requestedName)) + return admission.NewForbidden(a, fmt.Errorf("node %q is not allowed to modify node %q", nodeName, requestedName)) } if a.GetOperation() == admission.Update { @@ -369,20 +369,20 @@ func (c *nodePlugin) admitNode(nodeName string, a admission.Attributes) error { // We scope node access to things listed in the Node.Spec, so allowing this would allow a view escalation. // We only do the check if the new node's configSource is non-nil; old kubelets might drop the field during a status update. if node.Spec.ConfigSource != nil && !apiequality.Semantic.DeepEqual(node.Spec.ConfigSource, oldNode.Spec.ConfigSource) { - return admission.NewForbidden(a, fmt.Errorf("node %q cannot update configSource to a new non-nil configSource", nodeName)) + return admission.NewForbidden(a, fmt.Errorf("node %q is not allowed to update configSource to a new non-nil configSource", nodeName)) } // Don't allow a node to update its own taints. This would allow a node to remove or modify its // taints in a way that would let it steer disallowed workloads to itself. if !apiequality.Semantic.DeepEqual(node.Spec.Taints, oldNode.Spec.Taints) { - return admission.NewForbidden(a, fmt.Errorf("node %q cannot modify taints", nodeName)) + return admission.NewForbidden(a, fmt.Errorf("node %q is not allowed to modify taints", nodeName)) } // Don't allow a node to update labels outside the allowed set. // This would allow a node to add or modify its labels in a way that would let it steer privileged workloads to itself. modifiedLabels := getModifiedLabels(node.Labels, oldNode.Labels) if forbiddenUpdateLabels := c.getForbiddenUpdateLabels(modifiedLabels); len(forbiddenUpdateLabels) > 0 { - return admission.NewForbidden(a, fmt.Errorf("cannot modify labels: %s", strings.Join(forbiddenUpdateLabels.List(), ", "))) + return admission.NewForbidden(a, fmt.Errorf("is not allowed to modify labels: %s", strings.Join(forbiddenUpdateLabels.List(), ", "))) } } diff --git a/plugin/pkg/admission/noderestriction/admission_test.go b/plugin/pkg/admission/noderestriction/admission_test.go index b271109e2a1..1ebadaf9f94 100644 --- a/plugin/pkg/admission/noderestriction/admission_test.go +++ b/plugin/pkg/admission/noderestriction/admission_test.go @@ -868,7 +868,7 @@ func Test_nodePlugin_Admit(t *testing.T) { name: "forbid create of my node with forbidden labels", podsGetter: noExistingPods, attributes: admission.NewAttributesRecord(setForbiddenCreateLabels(mynodeObj, ""), nil, nodeKind, mynodeObj.Namespace, "", nodeResource, "", admission.Create, false, mynode), - err: `cannot set labels: foo.node-restriction.kubernetes.io/foo, node-restriction.kubernetes.io/foo`, + err: `is not allowed to set the following labels: foo.node-restriction.kubernetes.io/foo, node-restriction.kubernetes.io/foo`, }, { name: "allow update of my node", @@ -892,7 +892,7 @@ func Test_nodePlugin_Admit(t *testing.T) { name: "forbid create of my node with non-nil configSource", podsGetter: noExistingPods, attributes: admission.NewAttributesRecord(mynodeObjConfigA, nil, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Create, false, mynode), - err: "create with non-nil configSource", + err: "is not allowed to create pods with a non-nil configSource", }, { name: "forbid update of my node: nil configSource to new non-nil configSource", @@ -964,37 +964,37 @@ func Test_nodePlugin_Admit(t *testing.T) { name: "forbid update of my node: add taints", podsGetter: existingPods, attributes: admission.NewAttributesRecord(mynodeObjTaintA, mynodeObj, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, false, mynode), - err: "cannot modify taints", + err: "is not allowed to modify taints", }, { name: "forbid update of my node: remove taints", podsGetter: existingPods, attributes: admission.NewAttributesRecord(mynodeObj, mynodeObjTaintA, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, false, mynode), - err: "cannot modify taints", + err: "is not allowed to modify taints", }, { name: "forbid update of my node: change taints", podsGetter: existingPods, attributes: admission.NewAttributesRecord(mynodeObjTaintA, mynodeObjTaintB, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, false, mynode), - err: "cannot modify taints", + err: "is not allowed to modify taints", }, { name: "forbid update of my node: add labels", podsGetter: existingPods, attributes: admission.NewAttributesRecord(setForbiddenUpdateLabels(mynodeObj, ""), mynodeObj, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, false, mynode), - err: `cannot modify labels: foo.node-restriction.kubernetes.io/foo, node-restriction.kubernetes.io/foo, other.k8s.io/foo, other.kubernetes.io/foo`, + err: `is not allowed to modify labels: foo.node-restriction.kubernetes.io/foo, node-restriction.kubernetes.io/foo, other.k8s.io/foo, other.kubernetes.io/foo`, }, { name: "forbid update of my node: remove labels", podsGetter: existingPods, attributes: admission.NewAttributesRecord(mynodeObj, setForbiddenUpdateLabels(mynodeObj, ""), nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, false, mynode), - err: `cannot modify labels: foo.node-restriction.kubernetes.io/foo, node-restriction.kubernetes.io/foo, other.k8s.io/foo, other.kubernetes.io/foo`, + err: `is not allowed to modify labels: foo.node-restriction.kubernetes.io/foo, node-restriction.kubernetes.io/foo, other.k8s.io/foo, other.kubernetes.io/foo`, }, { name: "forbid update of my node: change labels", podsGetter: existingPods, attributes: admission.NewAttributesRecord(setForbiddenUpdateLabels(mynodeObj, "new"), setForbiddenUpdateLabels(mynodeObj, "old"), nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, false, mynode), - err: `cannot modify labels: foo.node-restriction.kubernetes.io/foo, node-restriction.kubernetes.io/foo, other.k8s.io/foo, other.kubernetes.io/foo`, + err: `is not allowed to modify labels: foo.node-restriction.kubernetes.io/foo, node-restriction.kubernetes.io/foo, other.k8s.io/foo, other.kubernetes.io/foo`, }, // Other node object @@ -1002,31 +1002,31 @@ func Test_nodePlugin_Admit(t *testing.T) { name: "forbid create of other node", podsGetter: noExistingPods, attributes: admission.NewAttributesRecord(othernodeObj, nil, nodeKind, othernodeObj.Namespace, othernodeObj.Name, nodeResource, "", admission.Create, false, mynode), - err: "cannot modify node", + err: "is not allowed to modify node", }, { name: "forbid create of other node pulling name from object", podsGetter: noExistingPods, attributes: admission.NewAttributesRecord(othernodeObj, nil, nodeKind, othernodeObj.Namespace, "", nodeResource, "", admission.Create, false, mynode), - err: "cannot modify node", + err: "is not allowed to modify node", }, { name: "forbid update of other node", podsGetter: existingPods, attributes: admission.NewAttributesRecord(othernodeObj, othernodeObj, nodeKind, othernodeObj.Namespace, othernodeObj.Name, nodeResource, "", admission.Update, false, mynode), - err: "cannot modify node", + err: "is not allowed to modify node", }, { name: "forbid delete of other node", podsGetter: existingPods, attributes: admission.NewAttributesRecord(nil, nil, nodeKind, othernodeObj.Namespace, othernodeObj.Name, nodeResource, "", admission.Delete, false, mynode), - err: "cannot modify node", + err: "is not allowed to modify node", }, { name: "forbid update of other node status", podsGetter: existingPods, attributes: admission.NewAttributesRecord(othernodeObj, othernodeObj, nodeKind, othernodeObj.Namespace, othernodeObj.Name, nodeResource, "status", admission.Update, false, mynode), - err: "cannot modify node", + err: "is not allowed to modify node", }, // Service accounts diff --git a/plugin/pkg/admission/podtolerationrestriction/BUILD b/plugin/pkg/admission/podtolerationrestriction/BUILD index b519bf07ee7..20b854020c7 100644 --- a/plugin/pkg/admission/podtolerationrestriction/BUILD +++ b/plugin/pkg/admission/podtolerationrestriction/BUILD @@ -8,10 +8,7 @@ load( go_test( name = "go_default_test", - srcs = [ - "admission_test.go", - "main_test.go", - ], + srcs = ["admission_test.go"], embed = [":go_default_library"], deps = [ "//pkg/apis/core:go_default_library", diff --git a/plugin/pkg/admission/podtolerationrestriction/main_test.go b/plugin/pkg/admission/podtolerationrestriction/main_test.go deleted file mode 100644 index d500cb63af7..00000000000 --- a/plugin/pkg/admission/podtolerationrestriction/main_test.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package podtolerationrestriction - -import ( - "testing" - - utilfeature "k8s.io/apiserver/pkg/util/feature" - utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" - _ "k8s.io/kubernetes/pkg/features" -) - -func TestMain(m *testing.M) { - utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run) -} diff --git a/plugin/pkg/admission/priority/BUILD b/plugin/pkg/admission/priority/BUILD index 3737f0c2361..97abdf9f9c7 100644 --- a/plugin/pkg/admission/priority/BUILD +++ b/plugin/pkg/admission/priority/BUILD @@ -8,10 +8,7 @@ load( go_test( name = "go_default_test", - srcs = [ - "admission_test.go", - "main_test.go", - ], + srcs = ["admission_test.go"], embed = [":go_default_library"], deps = [ "//pkg/apis/core:go_default_library", diff --git a/plugin/pkg/admission/priority/main_test.go b/plugin/pkg/admission/priority/main_test.go deleted file mode 100644 index c17952fd3eb..00000000000 --- a/plugin/pkg/admission/priority/main_test.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package priority - -import ( - "testing" - - utilfeature "k8s.io/apiserver/pkg/util/feature" - utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" - _ "k8s.io/kubernetes/pkg/features" -) - -func TestMain(m *testing.M) { - utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run) -} diff --git a/plugin/pkg/admission/resourcequota/BUILD b/plugin/pkg/admission/resourcequota/BUILD index 96d889890e1..99c73f482fc 100644 --- a/plugin/pkg/admission/resourcequota/BUILD +++ b/plugin/pkg/admission/resourcequota/BUILD @@ -51,10 +51,7 @@ go_library( go_test( name = "go_default_test", - srcs = [ - "admission_test.go", - "main_test.go", - ], + srcs = ["admission_test.go"], embed = [":go_default_library"], deps = [ "//pkg/apis/core:go_default_library", diff --git a/plugin/pkg/admission/resourcequota/main_test.go b/plugin/pkg/admission/resourcequota/main_test.go deleted file mode 100644 index a9d7da30438..00000000000 --- a/plugin/pkg/admission/resourcequota/main_test.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resourcequota - -import ( - "testing" - - utilfeature "k8s.io/apiserver/pkg/util/feature" - utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" - _ "k8s.io/kubernetes/pkg/features" -) - -func TestMain(m *testing.M) { - utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run) -} diff --git a/plugin/pkg/admission/storage/persistentvolume/label/BUILD b/plugin/pkg/admission/storage/persistentvolume/label/BUILD index 35b7366258d..d93e3e75c1f 100644 --- a/plugin/pkg/admission/storage/persistentvolume/label/BUILD +++ b/plugin/pkg/admission/storage/persistentvolume/label/BUILD @@ -32,10 +32,7 @@ go_library( go_test( name = "go_default_test", - srcs = [ - "admission_test.go", - "main_test.go", - ], + srcs = ["admission_test.go"], embed = [":go_default_library"], deps = [ "//pkg/apis/core:go_default_library", diff --git a/plugin/pkg/admission/storage/persistentvolume/label/main_test.go b/plugin/pkg/admission/storage/persistentvolume/label/main_test.go deleted file mode 100644 index 60a5d5020e5..00000000000 --- a/plugin/pkg/admission/storage/persistentvolume/label/main_test.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package label - -import ( - "testing" - - utilfeature "k8s.io/apiserver/pkg/util/feature" - utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" - _ "k8s.io/kubernetes/pkg/features" -) - -func TestMain(m *testing.M) { - utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run) -} diff --git a/plugin/pkg/admission/storage/storageobjectinuseprotection/BUILD b/plugin/pkg/admission/storage/storageobjectinuseprotection/BUILD index 6073526d6bc..712b6555d56 100644 --- a/plugin/pkg/admission/storage/storageobjectinuseprotection/BUILD +++ b/plugin/pkg/admission/storage/storageobjectinuseprotection/BUILD @@ -17,10 +17,7 @@ go_library( go_test( name = "go_default_test", - srcs = [ - "admission_test.go", - "main_test.go", - ], + srcs = ["admission_test.go"], embed = [":go_default_library"], deps = [ "//pkg/apis/core:go_default_library", diff --git a/plugin/pkg/admission/storage/storageobjectinuseprotection/main_test.go b/plugin/pkg/admission/storage/storageobjectinuseprotection/main_test.go deleted file mode 100644 index 1c7d89390a1..00000000000 --- a/plugin/pkg/admission/storage/storageobjectinuseprotection/main_test.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package storageobjectinuseprotection - -import ( - "testing" - - utilfeature "k8s.io/apiserver/pkg/util/feature" - utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" - _ "k8s.io/kubernetes/pkg/features" -) - -func TestMain(m *testing.M) { - utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run) -} diff --git a/plugin/pkg/auth/authorizer/node/node_authorizer.go b/plugin/pkg/auth/authorizer/node/node_authorizer.go index 757b3868611..1e541ee22a6 100644 --- a/plugin/pkg/auth/authorizer/node/node_authorizer.go +++ b/plugin/pkg/auth/authorizer/node/node_authorizer.go @@ -196,11 +196,11 @@ func (r *NodeAuthorizer) authorize(nodeName string, startingType vertexType, att ok, err := r.hasPathFrom(nodeName, startingType, attrs.GetNamespace(), attrs.GetName()) if err != nil { klog.V(2).Infof("NODE DENY: %v", err) - return authorizer.DecisionNoOpinion, "no path found to object", nil + return authorizer.DecisionNoOpinion, fmt.Sprintf("no relationship found between node %q and this object", nodeName), nil } if !ok { klog.V(2).Infof("NODE DENY: %q %#v", nodeName, attrs) - return authorizer.DecisionNoOpinion, "no path found to object", nil + return authorizer.DecisionNoOpinion, fmt.Sprintf("no relationship found between node %q and this object", nodeName), nil } return authorizer.DecisionAllow, "", nil } @@ -221,11 +221,11 @@ func (r *NodeAuthorizer) authorizeCreateToken(nodeName string, startingType vert ok, err := r.hasPathFrom(nodeName, startingType, attrs.GetNamespace(), attrs.GetName()) if err != nil { klog.V(2).Infof("NODE DENY: %v", err) - return authorizer.DecisionNoOpinion, "no path found to object", nil + return authorizer.DecisionNoOpinion, fmt.Sprintf("no relationship found between node %q and this object", nodeName), nil } if !ok { klog.V(2).Infof("NODE DENY: %q %#v", nodeName, attrs) - return authorizer.DecisionNoOpinion, "no path found to object", nil + return authorizer.DecisionNoOpinion, fmt.Sprintf("no relationship found between node %q and this object", nodeName), nil } return authorizer.DecisionAllow, "", nil } @@ -333,7 +333,7 @@ func (r *NodeAuthorizer) hasPathFrom(nodeName string, startingType vertexType, s return found }) if !found { - return false, fmt.Errorf("node %q cannot get %s %s/%s, no path was found", nodeName, vertexTypes[startingType], startingNamespace, startingName) + return false, fmt.Errorf("node %q cannot get %s %s/%s, no relationship to this object was found in the node authorizer graph", nodeName, vertexTypes[startingType], startingNamespace, startingName) } return true, nil } diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go index b20c927d0a4..71485bc9866 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go @@ -353,7 +353,7 @@ func buildControllerRoles() ([]rbacv1.ClusterRole, []rbacv1.ClusterRoleBinding) }) } - if utilfeature.DefaultFeatureGate.Enabled(features.TokenRequest) { + if utilfeature.DefaultFeatureGate.Enabled(features.BoundServiceAccountTokenVolume) { addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "root-ca-cert-publisher"}, Rules: []rbacv1.PolicyRule{ diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml index 5f97d6d8308..543025b37f8 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml @@ -971,6 +971,16 @@ items: - volumeattachments verbs: - get + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - delete + - get + - patch + - update - apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-role-bindings.yaml b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-role-bindings.yaml index 4f259b16819..6d5cb73e50d 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-role-bindings.yaml +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-role-bindings.yaml @@ -357,23 +357,6 @@ items: - kind: ServiceAccount name: resourcequota-controller namespace: kube-system -- apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - annotations: - rbac.authorization.kubernetes.io/autoupdate: "true" - creationTimestamp: null - labels: - kubernetes.io/bootstrapping: rbac-defaults - name: system:controller:root-ca-cert-publisher - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:controller:root-ca-cert-publisher - subjects: - - kind: ServiceAccount - name: root-ca-cert-publisher - namespace: kube-system - apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-roles.yaml b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-roles.yaml index a853a9d9e9b..3344def7c28 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-roles.yaml +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-roles.yaml @@ -1031,31 +1031,6 @@ items: - create - patch - update -- apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - annotations: - rbac.authorization.kubernetes.io/autoupdate: "true" - creationTimestamp: null - labels: - kubernetes.io/bootstrapping: rbac-defaults - name: system:controller:root-ca-cert-publisher - rules: - - apiGroups: - - "" - resources: - - configmaps - verbs: - - create - - update - - apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - update - apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: diff --git a/staging/src/BUILD b/staging/src/BUILD index 07cb474cafe..eb8bba577c7 100644 --- a/staging/src/BUILD +++ b/staging/src/BUILD @@ -204,6 +204,7 @@ filegroup( "//staging/src/k8s.io/code-generator/cmd/register-gen:all-srcs", "//staging/src/k8s.io/code-generator/cmd/set-gen:all-srcs", "//staging/src/k8s.io/code-generator/hack:all-srcs", + "//staging/src/k8s.io/code-generator/pkg/namer:all-srcs", "//staging/src/k8s.io/code-generator/pkg/util:all-srcs", "//staging/src/k8s.io/code-generator/third_party/forked/golang/reflect:all-srcs", "//staging/src/k8s.io/csi-api/pkg/apis/csi/v1alpha1:all-srcs", diff --git a/staging/src/k8s.io/api/Godeps/Godeps.json b/staging/src/k8s.io/api/Godeps/Godeps.json index 953c5a7e02d..e5c05264b10 100644 --- a/staging/src/k8s.io/api/Godeps/Godeps.json +++ b/staging/src/k8s.io/api/Godeps/Godeps.json @@ -28,7 +28,7 @@ }, { "ImportPath": "github.com/json-iterator/go", - "Rev": "f2b4162afba35581b6d4a50d3b8f34e33c144682" + "Rev": "ab8a2e0c74be9d3be70b3184d9acc634935ded82" }, { "ImportPath": "github.com/modern-go/concurrent", diff --git a/staging/src/k8s.io/api/storage/v1/generated.proto b/staging/src/k8s.io/api/storage/v1/generated.proto index 668c8544743..7ac6cb2d2ab 100644 --- a/staging/src/k8s.io/api/storage/v1/generated.proto +++ b/staging/src/k8s.io/api/storage/v1/generated.proto @@ -178,7 +178,7 @@ message VolumeError { optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; // String detailing the error encountered during Attach or Detach operation. - // This string maybe logged, so it should not contain sensitive + // This string may be logged, so it should not contain sensitive // information. // +optional optional string message = 2; diff --git a/staging/src/k8s.io/api/storage/v1/types.go b/staging/src/k8s.io/api/storage/v1/types.go index 9f2f67b6b76..bd60e1026b7 100644 --- a/staging/src/k8s.io/api/storage/v1/types.go +++ b/staging/src/k8s.io/api/storage/v1/types.go @@ -204,7 +204,7 @@ type VolumeError struct { Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"` // String detailing the error encountered during Attach or Detach operation. - // This string maybe logged, so it should not contain sensitive + // This string may be logged, so it should not contain sensitive // information. // +optional Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` diff --git a/staging/src/k8s.io/api/storage/v1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/storage/v1/types_swagger_doc_generated.go index d4a022d52ec..e31dd7f712b 100644 --- a/staging/src/k8s.io/api/storage/v1/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/storage/v1/types_swagger_doc_generated.go @@ -109,7 +109,7 @@ func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { var map_VolumeError = map[string]string{ "": "VolumeError captures an error encountered during a volume operation.", "time": "Time the error was encountered.", - "message": "String detailing the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information.", + "message": "String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.", } func (VolumeError) SwaggerDoc() map[string]string { diff --git a/staging/src/k8s.io/api/storage/v1beta1/generated.proto b/staging/src/k8s.io/api/storage/v1beta1/generated.proto index db1f302a053..4efe7d7fea8 100644 --- a/staging/src/k8s.io/api/storage/v1beta1/generated.proto +++ b/staging/src/k8s.io/api/storage/v1beta1/generated.proto @@ -178,7 +178,7 @@ message VolumeError { optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; // String detailing the error encountered during Attach or Detach operation. - // This string maybe logged, so it should not contain sensitive + // This string may be logged, so it should not contain sensitive // information. // +optional optional string message = 2; diff --git a/staging/src/k8s.io/api/storage/v1beta1/types.go b/staging/src/k8s.io/api/storage/v1beta1/types.go index 5702c21bcc8..a955542256e 100644 --- a/staging/src/k8s.io/api/storage/v1beta1/types.go +++ b/staging/src/k8s.io/api/storage/v1beta1/types.go @@ -204,7 +204,7 @@ type VolumeError struct { Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"` // String detailing the error encountered during Attach or Detach operation. - // This string maybe logged, so it should not contain sensitive + // This string may be logged, so it should not contain sensitive // information. // +optional Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` diff --git a/staging/src/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go index 834553e1a80..e41197bd37e 100644 --- a/staging/src/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go @@ -109,7 +109,7 @@ func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { var map_VolumeError = map[string]string{ "": "VolumeError captures an error encountered during a volume operation.", "time": "Time the error was encountered.", - "message": "String detailing the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information.", + "message": "String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.", } func (VolumeError) SwaggerDoc() map[string]string { diff --git a/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json b/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json index df164ef7363..33fcdb34082 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json @@ -564,7 +564,7 @@ }, { "ImportPath": "github.com/json-iterator/go", - "Rev": "f2b4162afba35581b6d4a50d3b8f34e33c144682" + "Rev": "ab8a2e0c74be9d3be70b3184d9acc634935ded82" }, { "ImportPath": "github.com/mailru/easyjson/buffer", diff --git a/staging/src/k8s.io/apiextensions-apiserver/OWNERS b/staging/src/k8s.io/apiextensions-apiserver/OWNERS index f76281ad3be..6d8c639240d 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/OWNERS +++ b/staging/src/k8s.io/apiextensions-apiserver/OWNERS @@ -3,6 +3,7 @@ reviewers: - sttts - enisoc - mbohlool +- yue9944882 approvers: - deads2k - lavalamp diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go index 2ad59dfe937..264a794b364 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go @@ -248,7 +248,7 @@ func (r *crdHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { } if handler != nil { - handler = metrics.InstrumentHandlerFunc(verb, resource, subresource, scope, handler) + handler = metrics.InstrumentHandlerFunc(verb, requestInfo.APIGroup, requestInfo.APIVersion, resource, subresource, scope, metrics.APIServerComponent, handler) handler(w, req) return } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/cmd/server/testing/testserver.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/cmd/server/testing/testserver.go index 3d705576f11..c9b2ab511ae 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/cmd/server/testing/testserver.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/cmd/server/testing/testserver.go @@ -150,9 +150,10 @@ func StartTestServer(t Logger, instanceOptions *TestServerInstanceOptions, custo return result, fmt.Errorf("failed to create server: %v", err) } + errCh := make(chan error) go func(stopCh <-chan struct{}) { if err := server.GenericAPIServer.PrepareRun().Run(stopCh); err != nil { - t.Errorf("apiextensions-apiserver failed run: %v", err) + errCh <- err } }(stopCh) @@ -163,6 +164,12 @@ func StartTestServer(t Logger, instanceOptions *TestServerInstanceOptions, custo return result, fmt.Errorf("failed to create a client: %v", err) } err = wait.Poll(100*time.Millisecond, 30*time.Second, func() (bool, error) { + select { + case err := <-errCh: + return false, err + default: + } + result := client.CoreV1().RESTClient().Get().AbsPath("/healthz").Do() status := 0 result.StatusCode(&status) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go index 8af73d2e196..0ed34211eab 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go @@ -49,7 +49,7 @@ const ( ) func init() { - utilfeature.DefaultFeatureGate.Add(defaultKubernetesFeatureGates) + utilfeature.DefaultMutableFeatureGate.Add(defaultKubernetesFeatureGates) } // defaultKubernetesFeatureGates consists of all known Kubernetes-specific feature keys. diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/versioning_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/versioning_test.go index b56d3950e7d..c27945fe7af 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/versioning_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/versioning_test.go @@ -127,7 +127,7 @@ func TestInternalVersionIsHandlerVersion(t *testing.T) { } } -func TestVersionedNamspacedScopedCRD(t *testing.T) { +func TestVersionedNamespacedScopedCRD(t *testing.T) { tearDown, apiExtensionClient, dynamicClient, err := fixtures.StartDefaultServerWithClients(t) if err != nil { t.Fatal(err) diff --git a/staging/src/k8s.io/apimachinery/Godeps/Godeps.json b/staging/src/k8s.io/apimachinery/Godeps/Godeps.json index c9bd57586b8..2e2d4a62dd7 100644 --- a/staging/src/k8s.io/apimachinery/Godeps/Godeps.json +++ b/staging/src/k8s.io/apimachinery/Godeps/Godeps.json @@ -84,7 +84,7 @@ }, { "ImportPath": "github.com/json-iterator/go", - "Rev": "f2b4162afba35581b6d4a50d3b8f34e33c144682" + "Rev": "ab8a2e0c74be9d3be70b3184d9acc634935ded82" }, { "ImportPath": "github.com/modern-go/concurrent", diff --git a/staging/src/k8s.io/apimachinery/pkg/api/meta/help.go b/staging/src/k8s.io/apimachinery/pkg/api/meta/help.go index c70b3d2b6c7..3425055f6ec 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/meta/help.go +++ b/staging/src/k8s.io/apimachinery/pkg/api/meta/help.go @@ -158,6 +158,19 @@ func ExtractList(obj runtime.Object) ([]runtime.Object, error) { // objectSliceType is the type of a slice of Objects var objectSliceType = reflect.TypeOf([]runtime.Object{}) +// LenList returns the length of this list or 0 if it is not a list. +func LenList(list runtime.Object) int { + itemsPtr, err := GetItemsPtr(list) + if err != nil { + return 0 + } + items, err := conversion.EnforcePtr(itemsPtr) + if err != nil { + return 0 + } + return items.Len() +} + // SetList sets the given list object's Items member have the elements given in // objects. // Returns an error if list is not a List type (does not have an Items member), diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD index 267bccc4aa9..e05c8d4eb0b 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD @@ -25,6 +25,9 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer/json:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", + "//vendor/github.com/google/gofuzz:go_default_library", "//vendor/sigs.k8s.io/yaml:go_default_library", ], ) diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go index 2eaabf0794f..babe8a8b53b 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go @@ -48,3 +48,13 @@ func (d *Duration) UnmarshalJSON(b []byte) error { func (d Duration) MarshalJSON() ([]byte, error) { return json.Marshal(d.Duration.String()) } + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ Duration) OpenAPISchemaType() []string { return []string{"string"} } + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ Duration) OpenAPISchemaFormat() string { return "" } diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go index d845d7b0fff..604129ea101 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go @@ -232,3 +232,15 @@ func HasObjectMetaSystemFieldValues(meta Object) bool { return !meta.GetCreationTimestamp().Time.IsZero() || len(meta.GetUID()) != 0 } + +// ResetObjectMetaForStatus forces the meta fields for a status update to match the meta fields +// for a pre-existing object. This is opt-in for new objects with Status subresource. +func ResetObjectMetaForStatus(meta, existingMeta Object) { + meta.SetDeletionTimestamp(existingMeta.GetDeletionTimestamp()) + meta.SetGeneration(existingMeta.GetGeneration()) + meta.SetSelfLink(existingMeta.GetSelfLink()) + meta.SetLabels(existingMeta.GetLabels()) + meta.SetAnnotations(existingMeta.GetAnnotations()) + meta.SetFinalizers(existingMeta.GetFinalizers()) + meta.SetOwnerReferences(existingMeta.GetOwnerReferences()) +} diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/helpers_test.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/helpers_test.go index fa42493002d..656e53af22c 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/helpers_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/helpers_test.go @@ -22,7 +22,11 @@ import ( "strings" "testing" + "github.com/google/gofuzz" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/diff" ) func TestLabelSelectorAsSelector(t *testing.T) { @@ -159,3 +163,36 @@ func TestLabelSelectorAsMap(t *testing.T) { } } } + +func TestResetObjectMetaForStatus(t *testing.T) { + meta := &ObjectMeta{} + existingMeta := &ObjectMeta{} + + // fuzz the existingMeta to set every field, no nils + f := fuzz.New().NilChance(0).NumElements(1, 1) + f.Fuzz(existingMeta) + ResetObjectMetaForStatus(meta, existingMeta) + + // not all fields are stomped during the reset. These fields should not have been set. False + // set them all to their zero values. Before you add anything to this list, consider whether or not + // you're enforcing immutability (those are fine) and whether /status should be able to update + // these values (these are usually not fine). + + // generateName doesn't do anything after create + existingMeta.SetGenerateName("") + // resourceVersion is enforced in validation and used during the storage update + existingMeta.SetResourceVersion("") + // fields made immutable in validation + existingMeta.SetUID(types.UID("")) + existingMeta.SetName("") + existingMeta.SetNamespace("") + existingMeta.SetClusterName("") + existingMeta.SetCreationTimestamp(Time{}) + existingMeta.SetDeletionTimestamp(nil) + existingMeta.SetDeletionGracePeriodSeconds(nil) + existingMeta.SetInitializers(nil) + + if !reflect.DeepEqual(meta, existingMeta) { + t.Error(diff.ObjectDiff(meta, existingMeta)) + } +} diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go index fc138e75aa9..75ac693fe48 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go @@ -47,6 +47,9 @@ func NestedFieldNoCopy(obj map[string]interface{}, fields ...string) (interface{ var val interface{} = obj for i, field := range fields { + if val == nil { + return nil, false, nil + } if m, ok := val.(map[string]interface{}); ok { val, ok = m[field] if !ok { diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers_test.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers_test.go index d979962119b..529acab6bac 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers_test.go @@ -95,6 +95,19 @@ func TestNestedFieldNoCopy(t *testing.T) { assert.False(t, exists) assert.Nil(t, err) assert.Nil(t, res) + + // case 5: intermediate field does not exist + res, exists, err = NestedFieldNoCopy(obj, "a", "e", "f") + assert.False(t, exists) + assert.Nil(t, err) + assert.Nil(t, res) + + // case 6: intermediate field is null + // (background: happens easily in YAML) + res, exists, err = NestedFieldNoCopy(obj, "a", "c", "f") + assert.False(t, exists) + assert.Nil(t, err) + assert.Nil(t, res) } func TestNestedFieldCopy(t *testing.T) { diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go index 91fd4ed4f0b..a60a7c04156 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go @@ -64,7 +64,7 @@ func NewDecoder(r io.ReadCloser, d runtime.Decoder) Decoder { reader: r, decoder: d, buf: make([]byte, 1024), - maxBytes: 1024 * 1024, + maxBytes: 16 * 1024 * 1024, } } diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming_test.go b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming_test.go index 9cae6a32c43..1721423accc 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming_test.go @@ -51,7 +51,7 @@ func TestDecoder(t *testing.T) { frames := [][]byte{ make([]byte, 1025), make([]byte, 1024*5), - make([]byte, 1024*1024*5), + make([]byte, 1024*1024*17), make([]byte, 1025), } pr, pw := io.Pipe() diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/versioning/BUILD b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/versioning/BUILD index 785dc5fcfe4..9ba9d16f5a4 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/versioning/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/versioning/BUILD @@ -8,12 +8,17 @@ load( go_test( name = "go_default_test", - srcs = ["versioning_test.go"], + srcs = [ + "versioning_test.go", + "versioning_unstructured_test.go", + ], embed = [":go_default_library"], deps = [ + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", + "//vendor/github.com/stretchr/testify/assert:go_default_library", ], ) diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning_unstructured_test.go b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning_unstructured_test.go new file mode 100644 index 00000000000..e47a259f69d --- /dev/null +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning_unstructured_test.go @@ -0,0 +1,338 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package versioning + +import ( + "fmt" + "io/ioutil" + "testing" + + "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func buildUnstructuredDecodable(gvk schema.GroupVersionKind) runtime.Object { + obj := &unstructured.Unstructured{} + obj.SetGroupVersionKind(gvk) + return obj +} + +func buildUnstructuredListDecodable(gvk schema.GroupVersionKind) runtime.Object { + obj := &unstructured.UnstructuredList{} + obj.SetGroupVersionKind(gvk) + return obj +} + +func TestEncodeUnstructured(t *testing.T) { + v1GVK := schema.GroupVersionKind{ + Group: "crispy", + Version: "v1", + Kind: "Noxu", + } + v2GVK := schema.GroupVersionKind{ + Group: "crispy", + Version: "v2", + Kind: "Noxu", + } + elseGVK := schema.GroupVersionKind{ + Group: "crispy2", + Version: "else", + Kind: "Noxu", + } + elseUnstructuredDecodable := buildUnstructuredDecodable(elseGVK) + elseUnstructuredDecodableList := buildUnstructuredListDecodable(elseGVK) + v1UnstructuredDecodable := buildUnstructuredDecodable(v1GVK) + v1UnstructuredDecodableList := buildUnstructuredListDecodable(v1GVK) + v2UnstructuredDecodable := buildUnstructuredDecodable(v2GVK) + + testCases := []struct { + name string + convertor runtime.ObjectConvertor + targetVersion runtime.GroupVersioner + outObj runtime.Object + typer runtime.ObjectTyper + + errFunc func(error) bool + expectedObj runtime.Object + }{ + { + name: "encode v1 unstructured with v2 encode version", + typer: &mockTyper{ + gvks: []schema.GroupVersionKind{v1GVK}, + }, + outObj: v1UnstructuredDecodable, + targetVersion: v2GVK.GroupVersion(), + convertor: &checkConvertor{ + obj: v2UnstructuredDecodable, + groupVersion: v2GVK.GroupVersion(), + }, + expectedObj: v2UnstructuredDecodable, + }, + { + name: "both typer and conversion are bypassed when unstructured gvk matches encode gvk", + typer: &mockTyper{ + err: fmt.Errorf("unexpected typer call"), + }, + outObj: v1UnstructuredDecodable, + targetVersion: v1GVK.GroupVersion(), + convertor: &checkConvertor{ + err: fmt.Errorf("unexpected conversion happened"), + }, + expectedObj: v1UnstructuredDecodable, + }, + { + name: "encode will fail when unstructured object's gvk and encode gvk mismatches", + outObj: elseUnstructuredDecodable, + targetVersion: v1GVK.GroupVersion(), + errFunc: func(err error) bool { + return assert.Equal(t, runtime.NewNotRegisteredGVKErrForTarget("noxu-scheme", elseGVK, v1GVK.GroupVersion()), err) + }, + }, + { + name: "encode with unstructured list's gvk regardless of its elements' gvk", + outObj: elseUnstructuredDecodableList, + targetVersion: elseGVK.GroupVersion(), + }, + { + name: "typer fail to recognize unstructured object gvk will fail the encoding", + outObj: elseUnstructuredDecodable, + targetVersion: v1GVK.GroupVersion(), + typer: &mockTyper{ + err: fmt.Errorf("invalid obj gvk"), + }, + }, + { + name: "encoding unstructured object without encode version will fallback to typer suggested version", + targetVersion: v1GVK.GroupVersion(), + convertor: &checkConvertor{ + obj: v1UnstructuredDecodableList, + groupVersion: v1GVK.GroupVersion(), + }, + outObj: elseUnstructuredDecodable, + typer: &mockTyper{ + gvks: []schema.GroupVersionKind{v1GVK}, + }, + }, + } + for _, testCase := range testCases { + serializer := &mockSerializer{} + codec := NewCodec(serializer, serializer, testCase.convertor, nil, testCase.typer, nil, testCase.targetVersion, nil, "noxu-scheme") + err := codec.Encode(testCase.outObj, ioutil.Discard) + if testCase.errFunc != nil { + if !testCase.errFunc(err) { + t.Errorf("%v: failed: %v", testCase.name, err) + } + return + } + assert.NoError(t, err) + assert.Equal(t, testCase.expectedObj, serializer.obj) + } +} + +type errNotRecognizedGVK struct { + failedGVK schema.GroupVersionKind + claimingGVKs []schema.GroupVersionKind +} + +func (e errNotRecognizedGVK) Error() string { + return fmt.Sprintf("unrecognized gvk %v, should be one of %v", e.failedGVK, e.claimingGVKs) +} + +type mockUnstructuredNopConvertor struct { + claimingGVKs []schema.GroupVersionKind +} + +func (c *mockUnstructuredNopConvertor) recognizeGVK(gvkToCheck schema.GroupVersionKind) error { + matched := false + for _, gvk := range c.claimingGVKs { + if gvk == gvkToCheck { + matched = true + } + } + if !matched { + return errNotRecognizedGVK{ + failedGVK: gvkToCheck, + claimingGVKs: c.claimingGVKs, + } + } + return nil +} + +func (c *mockUnstructuredNopConvertor) Convert(in, out, context interface{}) error { + inObj := in.(*unstructured.Unstructured) + outObj := out.(*unstructured.Unstructured) + if err := c.recognizeGVK(outObj.GroupVersionKind()); err != nil { + return err + } + outGVK := outObj.GetObjectKind().GroupVersionKind() + *outObj = *inObj.DeepCopy() + outObj.GetObjectKind().SetGroupVersionKind(outGVK) + return nil +} + +func (c *mockUnstructuredNopConvertor) ConvertToVersion(in runtime.Object, outVersion runtime.GroupVersioner) (runtime.Object, error) { + out := in.DeepCopyObject() + targetGVK, matched := outVersion.KindForGroupVersionKinds([]schema.GroupVersionKind{in.GetObjectKind().GroupVersionKind()}) + if !matched { + return nil, fmt.Errorf("attempt to convert to mismatched gv %v", outVersion) + } + if err := c.recognizeGVK(out.GetObjectKind().GroupVersionKind()); err != nil { + return nil, err + } + out.GetObjectKind().SetGroupVersionKind(targetGVK) + return out, nil +} + +func (c *mockUnstructuredNopConvertor) ConvertFieldLabel(gvk schema.GroupVersionKind, label, value string) (string, string, error) { + return "", "", fmt.Errorf("unexpected call to ConvertFieldLabel") +} + +func TestDecodeUnstructured(t *testing.T) { + internalGVK := schema.GroupVersionKind{ + Group: "crispy", + Version: runtime.APIVersionInternal, + Kind: "Noxu", + } + v1GVK := schema.GroupVersionKind{ + Group: "crispy", + Version: "v1", + Kind: "Noxu", + } + v2GVK := schema.GroupVersionKind{ + Group: "crispy", + Version: "v2", + Kind: "Noxu", + } + internalUnstructuredDecodable := buildUnstructuredDecodable(internalGVK) + v1UnstructuredDecodable := buildUnstructuredDecodable(v1GVK) + v2UnstructuredDecodable := buildUnstructuredDecodable(v2GVK) + + testCases := []struct { + name string + serializer runtime.Serializer + convertor runtime.ObjectConvertor + suggestedConvertVersion runtime.GroupVersioner + defaultGVK *schema.GroupVersionKind + intoObj runtime.Object + + errFunc func(error) bool + expectedGVKOfSerializedData *schema.GroupVersionKind + expectedOut runtime.Object + }{ + { + name: "decode v1 unstructured into non-nil v2 unstructured", + serializer: &mockSerializer{actual: &v1GVK, obj: v1UnstructuredDecodable}, + convertor: &mockUnstructuredNopConvertor{ + claimingGVKs: []schema.GroupVersionKind{ + v1GVK, v2GVK, + }, + }, + suggestedConvertVersion: v2GVK.GroupVersion(), + intoObj: v2UnstructuredDecodable, + expectedGVKOfSerializedData: &v1GVK, + expectedOut: v2UnstructuredDecodable, + }, + { + name: "decode v1 unstructured into nil object with v2 version", + serializer: &mockSerializer{actual: &v1GVK, obj: v1UnstructuredDecodable}, + convertor: &mockUnstructuredNopConvertor{ + claimingGVKs: []schema.GroupVersionKind{ + v1GVK, v2GVK, + }, + }, + suggestedConvertVersion: v2GVK.GroupVersion(), + intoObj: nil, + expectedGVKOfSerializedData: &v1GVK, + expectedOut: v2UnstructuredDecodable, + }, + { + name: "decode v1 unstructured into non-nil internal unstructured", + serializer: &mockSerializer{actual: &v1GVK, obj: v1UnstructuredDecodable}, + convertor: &mockUnstructuredNopConvertor{ + claimingGVKs: []schema.GroupVersionKind{ + v1GVK, v2GVK, + }, + }, + suggestedConvertVersion: internalGVK.GroupVersion(), + intoObj: internalUnstructuredDecodable, + errFunc: func(err error) bool { + notRecognized, ok := err.(errNotRecognizedGVK) + if !ok { + return false + } + return assert.Equal(t, notRecognized.failedGVK, internalGVK) + }, + }, + { + name: "decode v1 unstructured into nil object with internal version", + serializer: &mockSerializer{actual: &v1GVK, obj: v1UnstructuredDecodable}, + convertor: &mockUnstructuredNopConvertor{ + claimingGVKs: []schema.GroupVersionKind{ + v1GVK, v2GVK, + }, + }, + suggestedConvertVersion: internalGVK.GroupVersion(), + intoObj: nil, + errFunc: func(err error) bool { + notRecognized, ok := err.(errNotRecognizedGVK) + if !ok { + return false + } + return assert.Equal(t, notRecognized.failedGVK, internalGVK) + }, + }, + { + name: "skip conversion if serializer returns the same unstructured as into", + serializer: &mockSerializer{actual: &v1GVK, obj: v1UnstructuredDecodable}, + convertor: &checkConvertor{ + err: fmt.Errorf("unexpected conversion happened"), + }, + suggestedConvertVersion: internalGVK.GroupVersion(), + intoObj: v1UnstructuredDecodable, + expectedGVKOfSerializedData: &v1GVK, + expectedOut: v1UnstructuredDecodable, + }, + { + name: "invalid convert version makes decoding unstructured fail", + serializer: &mockSerializer{actual: &v1GVK, obj: v1UnstructuredDecodable}, + convertor: &checkConvertor{ + in: v1UnstructuredDecodable, + groupVersion: internalGVK.GroupVersion(), + err: fmt.Errorf("no matching decode version"), + }, + suggestedConvertVersion: internalGVK.GroupVersion(), + errFunc: func(err error) bool { + return assert.Equal(t, err, fmt.Errorf("no matching decode version")) + }, + }, + } + for _, testCase := range testCases { + codec := NewCodec(testCase.serializer, testCase.serializer, testCase.convertor, nil, nil, nil, nil, testCase.suggestedConvertVersion, "noxu-scheme") + actualObj, actualSerializedGVK, err := codec.Decode([]byte(`{}`), testCase.defaultGVK, testCase.intoObj) + if testCase.errFunc != nil { + if !testCase.errFunc(err) { + t.Errorf("%v: failed: %v", testCase.name, err) + } + return + } + assert.NoError(t, err) + assert.Equal(t, testCase.expectedOut, actualObj, "%v failed", testCase.name) + assert.Equal(t, testCase.expectedGVKOfSerializedData, actualSerializedGVK, "%v failed", testCase.name) + } +} diff --git a/staging/src/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go b/staging/src/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go index 596b1888975..3c8e09399f5 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go @@ -17,6 +17,7 @@ limitations under the License. package proxy import ( + "bufio" "bytes" "context" "fmt" @@ -271,6 +272,18 @@ func (h *UpgradeAwareHandler) tryUpgrade(w http.ResponseWriter, req *http.Reques } defer backendConn.Close() + // determine the http response code from the backend by reading from rawResponse+backendConn + backendHTTPResponse, headerBytes, err := getResponse(io.MultiReader(bytes.NewReader(rawResponse), backendConn)) + if err != nil { + klog.V(6).Infof("Proxy connection error: %v", err) + h.Responder.Error(w, req, err) + return true + } + if len(headerBytes) > len(rawResponse) { + // we read beyond the bytes stored in rawResponse, update rawResponse to the full set of bytes read from the backend + rawResponse = headerBytes + } + // Once the connection is hijacked, the ErrorResponder will no longer work, so // hijacking should be the last step in the upgrade. requestHijacker, ok := w.(http.Hijacker) @@ -287,6 +300,22 @@ func (h *UpgradeAwareHandler) tryUpgrade(w http.ResponseWriter, req *http.Reques } defer requestHijackedConn.Close() + if backendHTTPResponse.StatusCode != http.StatusSwitchingProtocols { + // If the backend did not upgrade the request, echo the response from the backend to the client and return, closing the connection. + klog.V(6).Infof("Proxy upgrade error, status code %d", backendHTTPResponse.StatusCode) + // set read/write deadlines + deadline := time.Now().Add(10 * time.Second) + backendConn.SetReadDeadline(deadline) + requestHijackedConn.SetWriteDeadline(deadline) + // write the response to the client + err := backendHTTPResponse.Write(requestHijackedConn) + if err != nil && !strings.Contains(err.Error(), "use of closed network connection") { + klog.Errorf("Error proxying data from backend to client: %v", err) + } + // Indicate we handled the request + return true + } + // Forward raw response bytes back to client. if len(rawResponse) > 0 { klog.V(6).Infof("Writing %d bytes to hijacked connection", len(rawResponse)) @@ -356,6 +385,19 @@ func (h *UpgradeAwareHandler) DialForUpgrade(req *http.Request) (net.Conn, error return dial(updatedReq, h.UpgradeTransport) } +// getResponseCode reads a http response from the given reader, returns the response, +// the bytes read from the reader, and any error encountered +func getResponse(r io.Reader) (*http.Response, []byte, error) { + rawResponse := bytes.NewBuffer(make([]byte, 0, 256)) + // Save the bytes read while reading the response headers into the rawResponse buffer + resp, err := http.ReadResponse(bufio.NewReader(io.TeeReader(r, rawResponse)), nil) + if err != nil { + return nil, nil, err + } + // return the http response and the raw bytes consumed from the reader in the process + return resp, rawResponse.Bytes(), nil +} + // dial dials the backend at req.URL and writes req to it. func dial(req *http.Request, transport http.RoundTripper) (net.Conn, error) { conn, err := DialURL(req.Context(), req.URL, transport) diff --git a/staging/src/k8s.io/apimachinery/pkg/util/wait/wait.go b/staging/src/k8s.io/apimachinery/pkg/util/wait/wait.go index ca61168cd4a..590c17b4c59 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/wait/wait.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/wait/wait.go @@ -173,10 +173,49 @@ type ConditionFunc func() (done bool, err error) // Backoff holds parameters applied to a Backoff function. type Backoff struct { - Duration time.Duration // the base duration - Factor float64 // Duration is multiplied by factor each iteration - Jitter float64 // The amount of jitter applied each iteration - Steps int // Exit with error after this many steps + // The initial duration. + Duration time.Duration + // Duration is multiplied by factor each iteration. Must be greater + // than or equal to zero. + Factor float64 + // The amount of jitter applied each iteration. Jitter is applied after + // cap. + Jitter float64 + // The number of steps before duration stops changing. If zero, initial + // duration is always used. Used for exponential backoff in combination + // with Factor. + Steps int + // The returned duration will never be greater than cap *before* jitter + // is applied. The actual maximum cap is `cap * (1.0 + jitter)`. + Cap time.Duration +} + +// Step returns the next interval in the exponential backoff. This method +// will mutate the provided backoff. +func (b *Backoff) Step() time.Duration { + if b.Steps < 1 { + if b.Jitter > 0 { + return Jitter(b.Duration, b.Jitter) + } + return b.Duration + } + b.Steps-- + + duration := b.Duration + + // calculate the next step + if b.Factor != 0 { + b.Duration = time.Duration(float64(b.Duration) * b.Factor) + if b.Cap > 0 && b.Duration > b.Cap { + b.Duration = b.Cap + b.Steps = 0 + } + } + + if b.Jitter > 0 { + duration = Jitter(duration, b.Jitter) + } + return duration } // ExponentialBackoff repeats a condition check with exponential backoff. @@ -190,19 +229,14 @@ type Backoff struct { // If the condition never returns true, ErrWaitTimeout is returned. All other // errors terminate immediately. func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error { - duration := backoff.Duration - for i := 0; i < backoff.Steps; i++ { - if i != 0 { - adjusted := duration - if backoff.Jitter > 0.0 { - adjusted = Jitter(duration, backoff.Jitter) - } - time.Sleep(adjusted) - duration = time.Duration(float64(duration) * backoff.Factor) - } + for backoff.Steps > 0 { if ok, err := condition(); err != nil || ok { return err } + if backoff.Steps == 1 { + break + } + time.Sleep(backoff.Step()) } return ErrWaitTimeout } diff --git a/staging/src/k8s.io/apimachinery/pkg/util/wait/wait_test.go b/staging/src/k8s.io/apimachinery/pkg/util/wait/wait_test.go index 2dfd2877756..24073bb1922 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/wait/wait_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/wait/wait_test.go @@ -19,6 +19,7 @@ package wait import ( "errors" "fmt" + "math/rand" "sync" "sync/atomic" "testing" @@ -499,3 +500,47 @@ func TestPollUntil(t *testing.T) { // make sure we finished the poll <-pollDone } + +func TestBackoff_Step(t *testing.T) { + tests := []struct { + initial *Backoff + want []time.Duration + }{ + {initial: &Backoff{Duration: time.Second, Steps: 0}, want: []time.Duration{time.Second, time.Second, time.Second}}, + {initial: &Backoff{Duration: time.Second, Steps: 1}, want: []time.Duration{time.Second, time.Second, time.Second}}, + {initial: &Backoff{Duration: time.Second, Factor: 1.0, Steps: 1}, want: []time.Duration{time.Second, time.Second, time.Second}}, + {initial: &Backoff{Duration: time.Second, Factor: 2, Steps: 3}, want: []time.Duration{1 * time.Second, 2 * time.Second, 4 * time.Second}}, + {initial: &Backoff{Duration: time.Second, Factor: 2, Steps: 3, Cap: 3 * time.Second}, want: []time.Duration{1 * time.Second, 2 * time.Second, 3 * time.Second}}, + {initial: &Backoff{Duration: time.Second, Factor: 2, Steps: 2, Cap: 3 * time.Second, Jitter: 0.5}, want: []time.Duration{2 * time.Second, 3 * time.Second, 3 * time.Second}}, + {initial: &Backoff{Duration: time.Second, Factor: 2, Steps: 6, Jitter: 4}, want: []time.Duration{1 * time.Second, 2 * time.Second, 4 * time.Second, 8 * time.Second, 16 * time.Second, 32 * time.Second}}, + } + for seed := int64(0); seed < 5; seed++ { + for _, tt := range tests { + initial := *tt.initial + t.Run(fmt.Sprintf("%#v seed=%d", initial, seed), func(t *testing.T) { + rand.Seed(seed) + for i := 0; i < len(tt.want); i++ { + got := initial.Step() + t.Logf("[%d]=%s", i, got) + if initial.Jitter > 0 { + if got == tt.want[i] { + // this is statistically unlikely to happen by chance + t.Errorf("Backoff.Step(%d) = %v, no jitter", i, got) + continue + } + diff := float64(tt.want[i]-got) / float64(tt.want[i]) + if diff > initial.Jitter { + t.Errorf("Backoff.Step(%d) = %v, want %v, outside range", i, got, tt.want) + continue + } + } else { + if got != tt.want[i] { + t.Errorf("Backoff.Step(%d) = %v, want %v", i, got, tt.want) + continue + } + } + } + }) + } + } +} diff --git a/staging/src/k8s.io/apiserver/Godeps/Godeps.json b/staging/src/k8s.io/apiserver/Godeps/Godeps.json index 9ef4b940b32..b1176385736 100644 --- a/staging/src/k8s.io/apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/apiserver/Godeps/Godeps.json @@ -524,7 +524,7 @@ }, { "ImportPath": "github.com/json-iterator/go", - "Rev": "f2b4162afba35581b6d4a50d3b8f34e33c144682" + "Rev": "ab8a2e0c74be9d3be70b3184d9acc634935ded82" }, { "ImportPath": "github.com/mailru/easyjson/buffer", @@ -2056,7 +2056,7 @@ }, { "ImportPath": "k8s.io/utils/pointer", - "Rev": "66066c83e385e385ccc3c964b44fd7dcd413d0ed" + "Rev": "8e7ff06bf0e2d3289061230af203e430a15b6dcc" }, { "ImportPath": "sigs.k8s.io/yaml", diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/initialization/initialization.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/initialization/initialization.go index d4d184a5747..8219b797f53 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/initialization/initialization.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/initialization/initialization.go @@ -85,7 +85,7 @@ func (i *initializer) ValidateInitialization() error { } if !utilfeature.DefaultFeatureGate.Enabled(features.Initializers) { - if err := utilfeature.DefaultFeatureGate.Set(string(features.Initializers) + "=true"); err != nil { + if err := utilfeature.DefaultMutableFeatureGate.Set(string(features.Initializers) + "=true"); err != nil { klog.Errorf("error enabling Initializers feature as part of admission plugin setup: %v", err) } else { klog.Infof("enabled Initializers feature as part of admission plugin setup") diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/create.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/create.go index 515d2663592..7774f849e1b 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/create.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/create.go @@ -70,6 +70,11 @@ func createHandler(r rest.NamedCreater, scope RequestScope, admit admission.Inte ctx := req.Context() ctx = request.WithNamespace(ctx, namespace) + outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, &scope) + if err != nil { + scope.err(err, w, req) + return + } gv := scope.Kind.GroupVersion() s, err := negotiation.NegotiateInputSerializer(req, false, scope.Serializer) @@ -77,6 +82,7 @@ func createHandler(r rest.NamedCreater, scope RequestScope, admit admission.Inte scope.err(err, w, req) return } + decoder := scope.Serializer.DecoderToVersion(s.Serializer, scope.HubGroupVersion) body, err := readBody(req) @@ -144,17 +150,6 @@ func createHandler(r rest.NamedCreater, scope RequestScope, admit admission.Inte } trace.Step("Object stored in database") - requestInfo, ok := request.RequestInfoFrom(ctx) - if !ok { - scope.err(fmt.Errorf("missing requestInfo"), w, req) - return - } - if err := setSelfLink(result, requestInfo, scope.Namer); err != nil { - scope.err(err, w, req) - return - } - trace.Step("Self-link added") - // If the object is partially initialized, always indicate it via StatusAccepted code := http.StatusCreated if accessor, err := meta.Accessor(result); err == nil { @@ -168,7 +163,7 @@ func createHandler(r rest.NamedCreater, scope RequestScope, admit admission.Inte } scope.Trace = trace - transformResponseObject(ctx, scope, req, w, code, result) + transformResponseObject(ctx, scope, req, w, code, outputMediaType, result) } } diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/delete.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/delete.go index b8b4cdc2315..e38e1c2972c 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/delete.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/delete.go @@ -64,6 +64,12 @@ func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope RequestSco ae := request.AuditEventFrom(ctx) admit = admission.WithAudit(admit, ae) + outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, &scope) + if err != nil { + scope.err(err, w, req) + return + } + options := &metav1.DeleteOptions{} if allowsOptions { body, err := readBody(req) @@ -160,23 +166,10 @@ func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope RequestSco Kind: scope.Kind.Kind, }, } - } else { - // when a non-status response is returned, set the self link - requestInfo, ok := request.RequestInfoFrom(ctx) - if !ok { - scope.err(fmt.Errorf("missing requestInfo"), w, req) - return - } - if _, ok := result.(*metav1.Status); !ok { - if err := setSelfLink(result, requestInfo, scope.Namer); err != nil { - scope.err(err, w, req) - return - } - } } scope.Trace = trace - transformResponseObject(ctx, scope, req, w, status, result) + transformResponseObject(ctx, scope, req, w, status, outputMediaType, result) } } @@ -204,6 +197,12 @@ func DeleteCollection(r rest.CollectionDeleter, checkBody bool, scope RequestSco ctx = request.WithNamespace(ctx, namespace) ae := request.AuditEventFrom(ctx) + outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, &scope) + if err != nil { + scope.err(err, w, req) + return + } + listOptions := metainternalversion.ListOptions{} if err := metainternalversion.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, &listOptions); err != nil { err = errors.NewBadRequest(err.Error()) @@ -304,17 +303,9 @@ func DeleteCollection(r rest.CollectionDeleter, checkBody bool, scope RequestSco Kind: scope.Kind.Kind, }, } - } else { - // when a non-status response is returned, set the self link - if _, ok := result.(*metav1.Status); !ok { - if _, err := setListSelfLink(result, ctx, req, scope.Namer); err != nil { - scope.err(err, w, req) - return - } - } } scope.Trace = trace - transformResponseObject(ctx, scope, req, w, http.StatusOK, result) + transformResponseObject(ctx, scope, req, w, http.StatusOK, outputMediaType, result) } } diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/get.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/get.go index 0f1c59946a3..1c0950b8d10 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/get.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/get.go @@ -33,6 +33,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" "k8s.io/apiserver/pkg/endpoints/metrics" "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" @@ -58,24 +59,21 @@ func getResourceHandler(scope RequestScope, getter getterFunc) http.HandlerFunc ctx := req.Context() ctx = request.WithNamespace(ctx, namespace) - result, err := getter(ctx, name, req, trace) + outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, &scope) if err != nil { scope.err(err, w, req) return } - requestInfo, ok := request.RequestInfoFrom(ctx) - if !ok { - scope.err(fmt.Errorf("missing requestInfo"), w, req) - return - } - if err := setSelfLink(result, requestInfo, scope.Namer); err != nil { + + result, err := getter(ctx, name, req, trace) + if err != nil { scope.err(err, w, req) return } trace.Step("About to write a response") scope.Trace = trace - transformResponseObject(ctx, scope, req, w, http.StatusOK, result) + transformResponseObject(ctx, scope, req, w, http.StatusOK, outputMediaType, result) trace.Step("Transformed response object") } } @@ -187,6 +185,12 @@ func ListResource(r rest.Lister, rw rest.Watcher, scope RequestScope, forceWatch ctx := req.Context() ctx = request.WithNamespace(ctx, namespace) + outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, &scope) + if err != nil { + scope.err(err, w, req) + return + } + opts := metainternalversion.ListOptions{} if err := metainternalversion.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, &opts); err != nil { err = errors.NewBadRequest(err.Error()) @@ -252,7 +256,7 @@ func ListResource(r rest.Lister, rw rest.Watcher, scope RequestScope, forceWatch return } requestInfo, _ := request.RequestInfoFrom(ctx) - metrics.RecordLongRunning(req, requestInfo, func() { + metrics.RecordLongRunning(req, requestInfo, metrics.APIServerComponent, func() { serveWatch(watcher, scope, req, w, timeout) }) return @@ -267,22 +271,9 @@ func ListResource(r rest.Lister, rw rest.Watcher, scope RequestScope, forceWatch return } trace.Step("Listing from storage done") - numberOfItems, err := setListSelfLink(result, ctx, req, scope.Namer) - if err != nil { - scope.err(err, w, req) - return - } - trace.Step("Self-linking done") - // Ensure empty lists return a non-nil items slice - if numberOfItems == 0 && meta.IsListType(result) { - if err := meta.SetList(result, []runtime.Object{}); err != nil { - scope.err(err, w, req) - return - } - } scope.Trace = trace - transformResponseObject(ctx, scope, req, w, http.StatusOK, result) - trace.Step(fmt.Sprintf("Writing http response done (%d items)", numberOfItems)) + transformResponseObject(ctx, scope, req, w, http.StatusOK, outputMediaType, result) + trace.Step(fmt.Sprintf("Writing http response done (%d items)", meta.LenList(result))) } } diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/patch.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/patch.go index df9c38d165f..8ca8b47f856 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/patch.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/patch.go @@ -88,6 +88,12 @@ func PatchResource(r rest.Patcher, scope RequestScope, admit admission.Interface ctx := req.Context() ctx = request.WithNamespace(ctx, namespace) + outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, &scope) + if err != nil { + scope.err(err, w, req) + return + } + patchJS, err := readBody(req) if err != nil { scope.err(err, w, req) @@ -190,19 +196,8 @@ func PatchResource(r rest.Patcher, scope RequestScope, admit admission.Interface } trace.Step("Object stored in database") - requestInfo, ok := request.RequestInfoFrom(ctx) - if !ok { - scope.err(fmt.Errorf("missing requestInfo"), w, req) - return - } - if err := setSelfLink(result, requestInfo, scope.Namer); err != nil { - scope.err(err, w, req) - return - } - trace.Step("Self-link added") - scope.Trace = trace - transformResponseObject(ctx, scope, req, w, http.StatusOK, result) + transformResponseObject(ctx, scope, req, w, http.StatusOK, outputMediaType, result) } } diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/response.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/response.go index e140c081746..f9f363840f7 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/response.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/response.go @@ -27,160 +27,85 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" ) // transformResponseObject takes an object loaded from storage and performs any necessary transformations. // Will write the complete response object. -func transformResponseObject(ctx context.Context, scope RequestScope, req *http.Request, w http.ResponseWriter, statusCode int, result runtime.Object) { - // TODO: fetch the media type much earlier in request processing and pass it into this method. - trace := scope.Trace - mediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, &scope) - if err != nil { - status := responsewriters.ErrorToAPIStatus(err) - trace.Step("Writing raw JSON response") - responsewriters.WriteRawJSON(int(status.Code), status, w) +func transformResponseObject(ctx context.Context, scope RequestScope, req *http.Request, w http.ResponseWriter, statusCode int, mediaType negotiation.MediaTypeOptions, result runtime.Object) { + // status objects are ignored for transformation + if _, ok := result.(*metav1.Status); ok { + responsewriters.WriteObject(statusCode, scope.Kind.GroupVersion(), scope.Serializer, result, w, req) return } - // If conversion was allowed by the scope, perform it before writing the response - if target := mediaType.Convert; target != nil { - switch { - - case target.Kind == "PartialObjectMetadata" && target.GroupVersion() == metav1beta1.SchemeGroupVersion: - if meta.IsListType(result) { - // TODO: this should be calculated earlier - err = newNotAcceptableError(fmt.Sprintf("you requested PartialObjectMetadata, but the requested object is a list (%T)", result)) - scope.err(err, w, req) - return - } - m, err := meta.Accessor(result) - if err != nil { - scope.err(err, w, req) - return - } - partial := meta.AsPartialObjectMetadata(m) - partial.GetObjectKind().SetGroupVersionKind(metav1beta1.SchemeGroupVersion.WithKind("PartialObjectMetadata")) - - // renegotiate under the internal version - _, info, err := negotiation.NegotiateOutputMediaType(req, metainternalversion.Codecs, &scope) - if err != nil { - scope.err(err, w, req) - return - } - encoder := metainternalversion.Codecs.EncoderForVersion(info.Serializer, metav1beta1.SchemeGroupVersion) - trace.Step(fmt.Sprintf("Serializing response as type %s", info.MediaType)) - responsewriters.SerializeObject(info.MediaType, encoder, w, req, statusCode, partial) - return - - case target.Kind == "PartialObjectMetadataList" && target.GroupVersion() == metav1beta1.SchemeGroupVersion: - if !meta.IsListType(result) { - // TODO: this should be calculated earlier - err = newNotAcceptableError(fmt.Sprintf("you requested PartialObjectMetadataList, but the requested object is not a list (%T)", result)) - scope.err(err, w, req) - return - } - list := &metav1beta1.PartialObjectMetadataList{} - trace.Step("Processing list items") - err := meta.EachListItem(result, func(obj runtime.Object) error { - m, err := meta.Accessor(obj) - if err != nil { - return err - } - partial := meta.AsPartialObjectMetadata(m) - partial.GetObjectKind().SetGroupVersionKind(metav1beta1.SchemeGroupVersion.WithKind("PartialObjectMetadata")) - list.Items = append(list.Items, partial) - return nil - }) - if err != nil { - scope.err(err, w, req) - return - } - - // renegotiate under the internal version - _, info, err := negotiation.NegotiateOutputMediaType(req, metainternalversion.Codecs, &scope) - if err != nil { - scope.err(err, w, req) - return - } - encoder := metainternalversion.Codecs.EncoderForVersion(info.Serializer, metav1beta1.SchemeGroupVersion) - trace.Step(fmt.Sprintf("Serializing response as type %s", info.MediaType)) - responsewriters.SerializeObject(info.MediaType, encoder, w, req, statusCode, list) - return - - case target.Kind == "Table" && target.GroupVersion() == metav1beta1.SchemeGroupVersion: - // TODO: relax the version abstraction - // TODO: skip if this is a status response (delete without body)? - - opts := &metav1beta1.TableOptions{} - trace.Step("Decoding parameters") - if err := metav1beta1.ParameterCodec.DecodeParameters(req.URL.Query(), metav1beta1.SchemeGroupVersion, opts); err != nil { - scope.err(err, w, req) - return - } - - trace.Step("Converting to table") - table, err := scope.TableConvertor.ConvertToTable(ctx, result, opts) - if err != nil { - scope.err(err, w, req) - return - } - - trace.Step("Processing rows") - for i := range table.Rows { - item := &table.Rows[i] - switch opts.IncludeObject { - case metav1beta1.IncludeObject: - item.Object.Object, err = scope.Convertor.ConvertToVersion(item.Object.Object, scope.Kind.GroupVersion()) - if err != nil { - scope.err(err, w, req) - return - } - // TODO: rely on defaulting for the value here? - case metav1beta1.IncludeMetadata, "": - m, err := meta.Accessor(item.Object.Object) - if err != nil { - scope.err(err, w, req) - return - } - // TODO: turn this into an internal type and do conversion in order to get object kind automatically set? - partial := meta.AsPartialObjectMetadata(m) - partial.GetObjectKind().SetGroupVersionKind(metav1beta1.SchemeGroupVersion.WithKind("PartialObjectMetadata")) - item.Object.Object = partial - case metav1beta1.IncludeNone: - item.Object.Object = nil - default: - // TODO: move this to validation on the table options? - err = errors.NewBadRequest(fmt.Sprintf("unrecognized includeObject value: %q", opts.IncludeObject)) - scope.err(err, w, req) - } - } - - // renegotiate under the internal version - _, info, err := negotiation.NegotiateOutputMediaType(req, metainternalversion.Codecs, &scope) - if err != nil { - scope.err(err, w, req) - return - } - encoder := metainternalversion.Codecs.EncoderForVersion(info.Serializer, metav1beta1.SchemeGroupVersion) - trace.Step(fmt.Sprintf("Serializing response as type %s", info.MediaType)) - responsewriters.SerializeObject(info.MediaType, encoder, w, req, statusCode, table) - return - - default: - // this block should only be hit if scope AllowsConversion is incorrect - accepted, _ := negotiation.MediaTypesForSerializer(metainternalversion.Codecs) - err := negotiation.NewNotAcceptableError(accepted) - status := responsewriters.ErrorToAPIStatus(err) - trace.Step("Writing raw JSON response") - responsewriters.WriteRawJSON(int(status.Code), status, w) - return - } + // ensure the self link and empty list array are set + if err := setObjectSelfLink(ctx, result, req, scope.Namer); err != nil { + scope.err(err, w, req) + return } - trace.Step("Writing response") - responsewriters.WriteObject(statusCode, scope.Kind.GroupVersion(), scope.Serializer, result, w, req) + trace := scope.Trace + + // If conversion was allowed by the scope, perform it before writing the response + switch target := mediaType.Convert; { + + case target == nil: + trace.Step("Writing response") + responsewriters.WriteObject(statusCode, scope.Kind.GroupVersion(), scope.Serializer, result, w, req) + + case target.Kind == "PartialObjectMetadata" && target.GroupVersion() == metav1beta1.SchemeGroupVersion: + partial, err := asV1Beta1PartialObjectMetadata(result) + if err != nil { + scope.err(err, w, req) + return + } + + if err := writeMetaInternalVersion(partial, statusCode, w, req, &scope, target.GroupVersion()); err != nil { + scope.err(err, w, req) + return + } + + case target.Kind == "PartialObjectMetadataList" && target.GroupVersion() == metav1beta1.SchemeGroupVersion: + trace.Step("Processing list items") + partial, err := asV1Beta1PartialObjectMetadataList(result) + if err != nil { + scope.err(err, w, req) + return + } + + if err := writeMetaInternalVersion(partial, statusCode, w, req, &scope, target.GroupVersion()); err != nil { + scope.err(err, w, req) + return + } + + case target.Kind == "Table" && target.GroupVersion() == metav1beta1.SchemeGroupVersion: + opts := &metav1beta1.TableOptions{} + trace.Step("Decoding parameters") + if err := metav1beta1.ParameterCodec.DecodeParameters(req.URL.Query(), metav1beta1.SchemeGroupVersion, opts); err != nil { + scope.err(err, w, req) + return + } + + table, err := asV1Beta1Table(ctx, result, opts, scope) + if err != nil { + scope.err(err, w, req) + return + } + + if err := writeMetaInternalVersion(table, statusCode, w, req, &scope, target.GroupVersion()); err != nil { + scope.err(err, w, req) + return + } + + default: + // this block should only be hit if scope AllowsConversion is incorrect + accepted, _ := negotiation.MediaTypesForSerializer(metainternalversion.Codecs) + err := negotiation.NewNotAcceptableError(accepted) + scope.err(err, w, req) + } } // errNotAcceptable indicates Accept negotiation has failed @@ -204,3 +129,91 @@ func (e errNotAcceptable) Status() metav1.Status { Message: e.Error(), } } + +func asV1Beta1Table(ctx context.Context, result runtime.Object, opts *metav1beta1.TableOptions, scope RequestScope) (runtime.Object, error) { + trace := scope.Trace + + trace.Step("Converting to table") + table, err := scope.TableConvertor.ConvertToTable(ctx, result, opts) + if err != nil { + return nil, err + } + + trace.Step("Processing rows") + for i := range table.Rows { + item := &table.Rows[i] + switch opts.IncludeObject { + case metav1beta1.IncludeObject: + item.Object.Object, err = scope.Convertor.ConvertToVersion(item.Object.Object, scope.Kind.GroupVersion()) + if err != nil { + return nil, err + } + // TODO: rely on defaulting for the value here? + case metav1beta1.IncludeMetadata, "": + m, err := meta.Accessor(item.Object.Object) + if err != nil { + return nil, err + } + // TODO: turn this into an internal type and do conversion in order to get object kind automatically set? + partial := meta.AsPartialObjectMetadata(m) + partial.GetObjectKind().SetGroupVersionKind(metav1beta1.SchemeGroupVersion.WithKind("PartialObjectMetadata")) + item.Object.Object = partial + case metav1beta1.IncludeNone: + item.Object.Object = nil + default: + // TODO: move this to validation on the table options? + err = errors.NewBadRequest(fmt.Sprintf("unrecognized includeObject value: %q", opts.IncludeObject)) + return nil, err + } + } + + return table, nil +} + +func asV1Beta1PartialObjectMetadata(result runtime.Object) (runtime.Object, error) { + if meta.IsListType(result) { + // TODO: this should be calculated earlier + err := newNotAcceptableError(fmt.Sprintf("you requested PartialObjectMetadata, but the requested object is a list (%T)", result)) + return nil, err + } + m, err := meta.Accessor(result) + if err != nil { + return nil, err + } + partial := meta.AsPartialObjectMetadata(m) + partial.GetObjectKind().SetGroupVersionKind(metav1beta1.SchemeGroupVersion.WithKind("PartialObjectMetadata")) + return partial, nil +} + +func asV1Beta1PartialObjectMetadataList(result runtime.Object) (runtime.Object, error) { + if !meta.IsListType(result) { + // TODO: this should be calculated earlier + return nil, newNotAcceptableError(fmt.Sprintf("you requested PartialObjectMetadataList, but the requested object is not a list (%T)", result)) + } + list := &metav1beta1.PartialObjectMetadataList{} + err := meta.EachListItem(result, func(obj runtime.Object) error { + m, err := meta.Accessor(obj) + if err != nil { + return err + } + partial := meta.AsPartialObjectMetadata(m) + partial.GetObjectKind().SetGroupVersionKind(metav1beta1.SchemeGroupVersion.WithKind("PartialObjectMetadata")) + list.Items = append(list.Items, partial) + return nil + }) + if err != nil { + return nil, err + } + return list, nil +} + +func writeMetaInternalVersion(obj runtime.Object, statusCode int, w http.ResponseWriter, req *http.Request, restrictions negotiation.EndpointRestrictions, target schema.GroupVersion) error { + // renegotiate under the internal version + _, info, err := negotiation.NegotiateOutputMediaType(req, metainternalversion.Codecs, restrictions) + if err != nil { + return err + } + encoder := metainternalversion.Codecs.EncoderForVersion(info.Serializer, target) + responsewriters.SerializeObject(info.MediaType, encoder, w, req, statusCode, obj) + return nil +} diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/writers.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/writers.go index fd335b5d7e5..4f4ef920f97 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/writers.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/writers.go @@ -64,7 +64,7 @@ func WriteObject(statusCode int, gv schema.GroupVersion, s runtime.NegotiatedSer stream, ok := object.(rest.ResourceStreamer) if ok { requestInfo, _ := request.RequestInfoFrom(req.Context()) - metrics.RecordLongRunning(req, requestInfo, func() { + metrics.RecordLongRunning(req, requestInfo, metrics.APIServerComponent, func() { StreamObject(statusCode, gv, s, stream, w, req) }) return diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/rest.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/rest.go index 56da2271838..f0d2216a309 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/rest.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/rest.go @@ -144,7 +144,7 @@ func ConnectResource(connecter rest.Connecter, scope RequestScope, admit admissi } } requestInfo, _ := request.RequestInfoFrom(ctx) - metrics.RecordLongRunning(req, requestInfo, func() { + metrics.RecordLongRunning(req, requestInfo, metrics.APIServerComponent, func() { handler, err := connecter.Connect(ctx, name, opts, &responder{scope: scope, req: req, w: w}) if err != nil { scope.err(err, w, req) @@ -280,23 +280,26 @@ func checkName(obj runtime.Object, name, namespace string, namer ScopeNamer) err return nil } -// setListSelfLink sets the self link of a list to the base URL, then sets the self links -// on all child objects returned. Returns the number of items in the list. -func setListSelfLink(obj runtime.Object, ctx context.Context, req *http.Request, namer ScopeNamer) (int, error) { +// setObjectSelfLink sets the self link of an object as needed. +func setObjectSelfLink(ctx context.Context, obj runtime.Object, req *http.Request, namer ScopeNamer) error { if !meta.IsListType(obj) { - return 0, nil + requestInfo, ok := request.RequestInfoFrom(ctx) + if !ok { + return fmt.Errorf("missing requestInfo") + } + return setSelfLink(obj, requestInfo, namer) } uri, err := namer.GenerateListLink(req) if err != nil { - return 0, err + return err } if err := namer.SetSelfLink(obj, uri); err != nil { klog.V(4).Infof("Unable to set self link on object: %v", err) } requestInfo, ok := request.RequestInfoFrom(ctx) if !ok { - return 0, fmt.Errorf("missing requestInfo") + return fmt.Errorf("missing requestInfo") } count := 0 @@ -304,7 +307,14 @@ func setListSelfLink(obj runtime.Object, ctx context.Context, req *http.Request, count++ return setSelfLink(obj, requestInfo, namer) }) - return count, err + + if count == 0 { + if err := meta.SetList(obj, []runtime.Object{}); err != nil { + return err + } + } + + return err } func summarizeData(data []byte, maxLength int) string { diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/update.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/update.go index 1bcde7f28b4..ad2fc6ef197 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/update.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/update.go @@ -64,6 +64,12 @@ func UpdateResource(r rest.Updater, scope RequestScope, admit admission.Interfac ctx := req.Context() ctx = request.WithNamespace(ctx, namespace) + outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, &scope) + if err != nil { + scope.err(err, w, req) + return + } + body, err := readBody(req) if err != nil { scope.err(err, w, req) @@ -174,24 +180,13 @@ func UpdateResource(r rest.Updater, scope RequestScope, admit admission.Interfac } trace.Step("Object stored in database") - requestInfo, ok := request.RequestInfoFrom(ctx) - if !ok { - scope.err(fmt.Errorf("missing requestInfo"), w, req) - return - } - if err := setSelfLink(result, requestInfo, scope.Namer); err != nil { - scope.err(err, w, req) - return - } - trace.Step("Self-link added") - status := http.StatusOK if wasCreated { status = http.StatusCreated } scope.Trace = trace - transformResponseObject(ctx, scope, req, w, status, result) + transformResponseObject(ctx, scope, req, w, status, outputMediaType, result) } } diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/installer.go b/staging/src/k8s.io/apiserver/pkg/endpoints/installer.go index f387403dce0..89f1caa97d5 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/installer.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/installer.go @@ -176,6 +176,8 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag return nil, err } + group, version := a.group.GroupVersion.Group, a.group.GroupVersion.Version + fqKindToRegister, err := GetResourceKind(a.group.GroupVersion, storage, a.group.Typer) if err != nil { return nil, err @@ -571,9 +573,9 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag if needOverride { // need change the reported verb - handler = metrics.InstrumentRouteFunc(verbOverrider.OverrideMetricsVerb(action.Verb), resource, subresource, requestScope, handler) + handler = metrics.InstrumentRouteFunc(verbOverrider.OverrideMetricsVerb(action.Verb), group, version, resource, subresource, requestScope, metrics.APIServerComponent, handler) } else { - handler = metrics.InstrumentRouteFunc(action.Verb, resource, subresource, requestScope, handler) + handler = metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, handler) } if a.enableAPIResponseCompression { @@ -607,7 +609,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag if isSubresource { doc = "list " + subresource + " of objects of kind " + kind } - handler := metrics.InstrumentRouteFunc(action.Verb, resource, subresource, requestScope, restfulListResource(lister, watcher, reqScope, false, a.minRequestTimeout)) + handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, restfulListResource(lister, watcher, reqScope, false, a.minRequestTimeout)) if a.enableAPIResponseCompression { handler = genericfilters.RestfulWithCompression(handler) } @@ -642,7 +644,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag if isSubresource { doc = "replace " + subresource + " of the specified " + kind } - handler := metrics.InstrumentRouteFunc(action.Verb, resource, subresource, requestScope, restfulUpdateResource(updater, reqScope, admit)) + handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, restfulUpdateResource(updater, reqScope, admit)) route := ws.PUT(action.Path).To(handler). Doc(doc). Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). @@ -669,7 +671,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag string(types.MergePatchType), string(types.StrategicMergePatchType), } - handler := metrics.InstrumentRouteFunc(action.Verb, resource, subresource, requestScope, restfulPatchResource(patcher, reqScope, admit, supportedTypes)) + handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, restfulPatchResource(patcher, reqScope, admit, supportedTypes)) route := ws.PATCH(action.Path).To(handler). Doc(doc). Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). @@ -691,7 +693,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag } else { handler = restfulCreateResource(creater, reqScope, admit) } - handler = metrics.InstrumentRouteFunc(action.Verb, resource, subresource, requestScope, handler) + handler = metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, handler) article := getArticleForNoun(kind, " ") doc := "create" + article + kind if isSubresource { @@ -720,7 +722,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag if isSubresource { doc = "delete " + subresource + " of" + article + kind } - handler := metrics.InstrumentRouteFunc(action.Verb, resource, subresource, requestScope, restfulDeleteResource(gracefulDeleter, isGracefulDeleter, reqScope, admit)) + handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, restfulDeleteResource(gracefulDeleter, isGracefulDeleter, reqScope, admit)) route := ws.DELETE(action.Path).To(handler). Doc(doc). Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). @@ -743,7 +745,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag if isSubresource { doc = "delete collection of " + subresource + " of a " + kind } - handler := metrics.InstrumentRouteFunc(action.Verb, resource, subresource, requestScope, restfulDeleteCollection(collectionDeleter, isCollectionDeleter, reqScope, admit)) + handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, restfulDeleteCollection(collectionDeleter, isCollectionDeleter, reqScope, admit)) route := ws.DELETE(action.Path).To(handler). Doc(doc). Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). @@ -763,7 +765,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag doc = "watch changes to " + subresource + " of an object of kind " + kind } doc += ". deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter." - handler := metrics.InstrumentRouteFunc(action.Verb, resource, subresource, requestScope, restfulListResource(lister, watcher, reqScope, true, a.minRequestTimeout)) + handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, restfulListResource(lister, watcher, reqScope, true, a.minRequestTimeout)) route := ws.GET(action.Path).To(handler). Doc(doc). Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). @@ -783,7 +785,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag doc = "watch individual changes to a list of " + subresource + " of " + kind } doc += ". deprecated: use the 'watch' parameter with a list operation instead." - handler := metrics.InstrumentRouteFunc(action.Verb, resource, subresource, requestScope, restfulListResource(lister, watcher, reqScope, true, a.minRequestTimeout)) + handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, restfulListResource(lister, watcher, reqScope, true, a.minRequestTimeout)) route := ws.GET(action.Path).To(handler). Doc(doc). Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). @@ -806,7 +808,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag if isSubresource { doc = "connect " + method + " requests to " + subresource + " of " + kind } - handler := metrics.InstrumentRouteFunc(action.Verb, resource, subresource, requestScope, restfulConnectResource(connecter, reqScope, admit, path, isSubresource)) + handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, restfulConnectResource(connecter, reqScope, admit, path, isSubresource)) route := ws.Method(method).Path(action.Path). To(handler). Doc(doc). diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go b/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go index ae16d43bb1e..27f416e6b5c 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go @@ -40,50 +40,54 @@ type resettableCollector interface { Reset() } +const ( + APIServerComponent string = "apiserver" +) + var ( // TODO(a-robinson): Add unit tests for the handling of these metrics once // the upstream library supports it. requestCounter = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "apiserver_request_count", - Help: "Counter of apiserver requests broken out for each verb, API resource, client, and HTTP response contentType and code.", + Help: "Counter of apiserver requests broken out for each verb, group, version, resource, scope, component, client, and HTTP response contentType and code.", }, - []string{"verb", "resource", "subresource", "scope", "client", "contentType", "code"}, + []string{"verb", "group", "version", "resource", "subresource", "scope", "component", "client", "contentType", "code"}, ) longRunningRequestGauge = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Name: "apiserver_longrunning_gauge", - Help: "Gauge of all active long-running apiserver requests broken out by verb, API resource, and scope. Not all requests are tracked this way.", + Help: "Gauge of all active long-running apiserver requests broken out by verb, group, version, resource, scope and component. Not all requests are tracked this way.", }, - []string{"verb", "resource", "subresource", "scope"}, + []string{"verb", "group", "version", "resource", "subresource", "scope", "component"}, ) requestLatencies = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: "apiserver_request_latencies", - Help: "Response latency distribution in microseconds for each verb, resource and subresource.", + Help: "Response latency distribution in microseconds for each verb, group, version, resource, subresource, scope and component.", // Use buckets ranging from 125 ms to 8 seconds. Buckets: prometheus.ExponentialBuckets(125000, 2.0, 7), }, - []string{"verb", "resource", "subresource", "scope"}, + []string{"verb", "group", "version", "resource", "subresource", "scope", "component"}, ) requestLatenciesSummary = prometheus.NewSummaryVec( prometheus.SummaryOpts{ Name: "apiserver_request_latencies_summary", - Help: "Response latency summary in microseconds for each verb, resource and subresource.", + Help: "Response latency summary in microseconds for each verb, group, version, resource, subresource, scope and component.", // Make the sliding window of 5h. // TODO: The value for this should be based on our SLI definition (medium term). MaxAge: 5 * time.Hour, }, - []string{"verb", "resource", "subresource", "scope"}, + []string{"verb", "group", "version", "resource", "subresource", "scope", "component"}, ) responseSizes = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: "apiserver_response_sizes", - Help: "Response size distribution in bytes for each verb, resource, subresource and scope (namespace/cluster).", + Help: "Response size distribution in bytes for each group, version, verb, resource, subresource, scope and component.", // Use buckets ranging from 1000 bytes (1KB) to 10^9 bytes (1GB). Buckets: prometheus.ExponentialBuckets(1000, 10.0, 7), }, - []string{"verb", "resource", "subresource", "scope"}, + []string{"verb", "group", "version", "resource", "subresource", "scope", "component"}, ) // DroppedRequests is a number of requests dropped with 'Try again later' response" DroppedRequests = prometheus.NewCounterVec( @@ -157,21 +161,21 @@ func UpdateInflightRequestMetrics(nonmutating, mutating int) { // Record records a single request to the standard metrics endpoints. For use by handlers that perform their own // processing. All API paths should use InstrumentRouteFunc implicitly. Use this instead of MonitorRequest if // you already have a RequestInfo object. -func Record(req *http.Request, requestInfo *request.RequestInfo, contentType string, code int, responseSizeInBytes int, elapsed time.Duration) { +func Record(req *http.Request, requestInfo *request.RequestInfo, component, contentType string, code int, responseSizeInBytes int, elapsed time.Duration) { if requestInfo == nil { requestInfo = &request.RequestInfo{Verb: req.Method, Path: req.URL.Path} } scope := CleanScope(requestInfo) if requestInfo.IsResourceRequest { - MonitorRequest(req, strings.ToUpper(requestInfo.Verb), requestInfo.Resource, requestInfo.Subresource, scope, contentType, code, responseSizeInBytes, elapsed) + MonitorRequest(req, strings.ToUpper(requestInfo.Verb), requestInfo.APIGroup, requestInfo.APIVersion, requestInfo.Resource, requestInfo.Subresource, scope, component, contentType, code, responseSizeInBytes, elapsed) } else { - MonitorRequest(req, strings.ToUpper(requestInfo.Verb), "", requestInfo.Path, scope, contentType, code, responseSizeInBytes, elapsed) + MonitorRequest(req, strings.ToUpper(requestInfo.Verb), "", "", "", requestInfo.Path, scope, component, contentType, code, responseSizeInBytes, elapsed) } } // RecordLongRunning tracks the execution of a long running request against the API server. It provides an accurate count // of the total number of open long running requests. requestInfo may be nil if the caller is not in the normal request flow. -func RecordLongRunning(req *http.Request, requestInfo *request.RequestInfo, fn func()) { +func RecordLongRunning(req *http.Request, requestInfo *request.RequestInfo, component string, fn func()) { if requestInfo == nil { requestInfo = &request.RequestInfo{Verb: req.Method, Path: req.URL.Path} } @@ -179,9 +183,9 @@ func RecordLongRunning(req *http.Request, requestInfo *request.RequestInfo, fn f scope := CleanScope(requestInfo) reportedVerb := cleanVerb(strings.ToUpper(requestInfo.Verb), req) if requestInfo.IsResourceRequest { - g = longRunningRequestGauge.WithLabelValues(reportedVerb, requestInfo.Resource, requestInfo.Subresource, scope) + g = longRunningRequestGauge.WithLabelValues(reportedVerb, requestInfo.APIGroup, requestInfo.APIVersion, requestInfo.Resource, requestInfo.Subresource, scope, component) } else { - g = longRunningRequestGauge.WithLabelValues(reportedVerb, "", requestInfo.Path, scope) + g = longRunningRequestGauge.WithLabelValues(reportedVerb, "", "", "", requestInfo.Path, scope, component) } g.Inc() defer g.Dec() @@ -190,22 +194,22 @@ func RecordLongRunning(req *http.Request, requestInfo *request.RequestInfo, fn f // MonitorRequest handles standard transformations for client and the reported verb and then invokes Monitor to record // a request. verb must be uppercase to be backwards compatible with existing monitoring tooling. -func MonitorRequest(req *http.Request, verb, resource, subresource, scope, contentType string, httpCode, respSize int, elapsed time.Duration) { +func MonitorRequest(req *http.Request, verb, group, version, resource, subresource, scope, component, contentType string, httpCode, respSize int, elapsed time.Duration) { reportedVerb := cleanVerb(verb, req) client := cleanUserAgent(utilnet.GetHTTPClient(req)) elapsedMicroseconds := float64(elapsed / time.Microsecond) - requestCounter.WithLabelValues(reportedVerb, resource, subresource, scope, client, contentType, codeToString(httpCode)).Inc() - requestLatencies.WithLabelValues(reportedVerb, resource, subresource, scope).Observe(elapsedMicroseconds) - requestLatenciesSummary.WithLabelValues(reportedVerb, resource, subresource, scope).Observe(elapsedMicroseconds) + requestCounter.WithLabelValues(reportedVerb, group, version, resource, subresource, scope, component, client, contentType, codeToString(httpCode)).Inc() + requestLatencies.WithLabelValues(reportedVerb, group, version, resource, subresource, scope, component).Observe(elapsedMicroseconds) + requestLatenciesSummary.WithLabelValues(reportedVerb, group, version, resource, subresource, scope, component).Observe(elapsedMicroseconds) // We are only interested in response sizes of read requests. if verb == "GET" || verb == "LIST" { - responseSizes.WithLabelValues(reportedVerb, resource, subresource, scope).Observe(float64(respSize)) + responseSizes.WithLabelValues(reportedVerb, group, version, resource, subresource, scope, component).Observe(float64(respSize)) } } // InstrumentRouteFunc works like Prometheus' InstrumentHandlerFunc but wraps // the go-restful RouteFunction instead of a HandlerFunc plus some Kubernetes endpoint specific information. -func InstrumentRouteFunc(verb, resource, subresource, scope string, routeFunc restful.RouteFunction) restful.RouteFunction { +func InstrumentRouteFunc(verb, group, version, resource, subresource, scope, component string, routeFunc restful.RouteFunction) restful.RouteFunction { return restful.RouteFunction(func(request *restful.Request, response *restful.Response) { now := time.Now() @@ -224,12 +228,12 @@ func InstrumentRouteFunc(verb, resource, subresource, scope string, routeFunc re routeFunc(request, response) - MonitorRequest(request.Request, verb, resource, subresource, scope, delegate.Header().Get("Content-Type"), delegate.Status(), delegate.ContentLength(), time.Since(now)) + MonitorRequest(request.Request, verb, group, version, resource, subresource, scope, component, delegate.Header().Get("Content-Type"), delegate.Status(), delegate.ContentLength(), time.Since(now)) }) } // InstrumentHandlerFunc works like Prometheus' InstrumentHandlerFunc but adds some Kubernetes endpoint specific information. -func InstrumentHandlerFunc(verb, resource, subresource, scope string, handler http.HandlerFunc) http.HandlerFunc { +func InstrumentHandlerFunc(verb, group, version, resource, subresource, scope, component string, handler http.HandlerFunc) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { now := time.Now() @@ -246,7 +250,7 @@ func InstrumentHandlerFunc(verb, resource, subresource, scope string, handler ht handler(w, req) - MonitorRequest(req, verb, resource, subresource, scope, delegate.Header().Get("Content-Type"), delegate.Status(), delegate.ContentLength(), time.Since(now)) + MonitorRequest(req, verb, group, version, resource, subresource, scope, component, delegate.Header().Get("Content-Type"), delegate.Status(), delegate.ContentLength(), time.Since(now)) } } diff --git a/staging/src/k8s.io/apiserver/pkg/features/kube_features.go b/staging/src/k8s.io/apiserver/pkg/features/kube_features.go index 92418256814..88d949c4df0 100644 --- a/staging/src/k8s.io/apiserver/pkg/features/kube_features.go +++ b/staging/src/k8s.io/apiserver/pkg/features/kube_features.go @@ -91,7 +91,7 @@ const ( ) func init() { - utilfeature.DefaultFeatureGate.Add(defaultKubernetesFeatureGates) + utilfeature.DefaultMutableFeatureGate.Add(defaultKubernetesFeatureGates) } // defaultKubernetesFeatureGates consists of all known Kubernetes-specific feature keys. diff --git a/staging/src/k8s.io/apiserver/pkg/server/filters/maxinflight.go b/staging/src/k8s.io/apiserver/pkg/server/filters/maxinflight.go index 8818cb5633f..cc10d0abd8f 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/filters/maxinflight.go +++ b/staging/src/k8s.io/apiserver/pkg/server/filters/maxinflight.go @@ -176,7 +176,7 @@ func WithMaxInFlightLimit( } } } - metrics.Record(r, requestInfo, "", http.StatusTooManyRequests, 0, 0) + metrics.Record(r, requestInfo, metrics.APIServerComponent, "", http.StatusTooManyRequests, 0, 0) tooManyRequests(r, w) } } diff --git a/staging/src/k8s.io/apiserver/pkg/server/filters/timeout.go b/staging/src/k8s.io/apiserver/pkg/server/filters/timeout.go index adb179f8235..56cd8a4ce05 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/filters/timeout.go +++ b/staging/src/k8s.io/apiserver/pkg/server/filters/timeout.go @@ -58,7 +58,7 @@ func WithTimeoutForNonLongRunningRequests(handler http.Handler, longRunning apir postTimeoutFn := func() { cancel() - metrics.Record(req, requestInfo, "", http.StatusGatewayTimeout, 0, 0) + metrics.Record(req, requestInfo, metrics.APIServerComponent, "", http.StatusGatewayTimeout, 0, 0) } return req, time.After(timeout), postTimeoutFn, apierrors.NewTimeoutError(fmt.Sprintf("request did not complete within %s", timeout), 0) } diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/BUILD b/staging/src/k8s.io/apiserver/pkg/server/options/BUILD index 87318a1f053..79c96a13942 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/server/options/BUILD @@ -123,6 +123,7 @@ go_test( "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/github.com/stretchr/testify/require:go_default_library", + "//vendor/k8s.io/kube-openapi/pkg/common:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/authentication.go b/staging/src/k8s.io/apiserver/pkg/server/options/authentication.go index 04331794e54..877dc9133a7 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/authentication.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/authentication.go @@ -117,7 +117,12 @@ type DelegatingAuthenticationOptions struct { ClientCert ClientCertAuthenticationOptions RequestHeader RequestHeaderAuthenticationOptions + // SkipInClusterLookup indicates missing authentication configuration should not be retrieved from the cluster configmap SkipInClusterLookup bool + + // TolerateInClusterLookupFailure indicates failures to look up authentication configuration from the cluster configmap should not be fatal. + // Setting this can result in an authenticator that will reject all requests. + TolerateInClusterLookupFailure bool } func NewDelegatingAuthenticationOptions() *DelegatingAuthenticationOptions { @@ -160,6 +165,9 @@ func (s *DelegatingAuthenticationOptions) AddFlags(fs *pflag.FlagSet) { fs.BoolVar(&s.SkipInClusterLookup, "authentication-skip-lookup", s.SkipInClusterLookup, ""+ "If false, the authentication-kubeconfig will be used to lookup missing authentication "+ "configuration from the cluster.") + fs.BoolVar(&s.TolerateInClusterLookupFailure, "authentication-tolerate-lookup-failure", s.TolerateInClusterLookupFailure, ""+ + "If true, failures to look up missing authentication configuration from the cluster are not considered fatal. "+ + "Note that this can result in authentication that treats all requests as anonymous.") } func (s *DelegatingAuthenticationOptions) ApplyTo(c *server.AuthenticationInfo, servingInfo *server.SecureServingInfo, openAPIConfig *openapicommon.Config) error { @@ -187,7 +195,13 @@ func (s *DelegatingAuthenticationOptions) ApplyTo(c *server.AuthenticationInfo, if !s.SkipInClusterLookup { err := s.lookupMissingConfigInCluster(client) if err != nil { - return err + if s.TolerateInClusterLookupFailure { + klog.Warningf("Error looking up in-cluster authentication configuration: %v", err) + klog.Warningf("Continuing without authentication configuration. This may treat all requests as anonymous.") + klog.Warningf("To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false") + } else { + return err + } } } diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/authentication_test.go b/staging/src/k8s.io/apiserver/pkg/server/options/authentication_test.go index 3b2a581f2cc..a196b1c76da 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/authentication_test.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/authentication_test.go @@ -17,10 +17,15 @@ limitations under the License. package options import ( + "io/ioutil" + "net/http" + "os" "reflect" "testing" "k8s.io/apiserver/pkg/authentication/authenticatorfactory" + "k8s.io/apiserver/pkg/server" + openapicommon "k8s.io/kube-openapi/pkg/common" ) func TestToAuthenticationRequestHeaderConfig(t *testing.T) { @@ -66,3 +71,131 @@ func TestToAuthenticationRequestHeaderConfig(t *testing.T) { }) } } + +func TestApplyToFallback(t *testing.T) { + + f, err := ioutil.TempFile("", "authkubeconfig") + if err != nil { + t.Fatal(err) + } + defer os.Remove(f.Name()) + + if err := ioutil.WriteFile(f.Name(), []byte(` +apiVersion: v1 +kind: Config +clusters: +- cluster: + server: http://localhost:56789 + name: cluster +contexts: +- context: + cluster: cluster + name: cluster +current-context: cluster +`), os.FileMode(0755)); err != nil { + t.Fatal(err) + } + remoteKubeconfig := f.Name() + + testcases := []struct { + name string + options *DelegatingAuthenticationOptions + expectError bool + expectAuthenticator bool + expectTokenAnonymous bool + expectTokenErrors bool + }{ + { + name: "empty", + options: nil, + expectError: false, + expectAuthenticator: false, + }, + { + name: "default", + options: NewDelegatingAuthenticationOptions(), + expectError: true, // in-cluster client building fails, no kubeconfig provided + expectAuthenticator: false, + }, + { + name: "optional kubeconfig", + options: func() *DelegatingAuthenticationOptions { + opts := NewDelegatingAuthenticationOptions() + opts.RemoteKubeConfigFileOptional = true + return opts + }(), + expectError: false, // in-cluster client building fails, no kubeconfig required + expectAuthenticator: true, + expectTokenAnonymous: true, // no token validator available + }, + { + name: "valid client, failed cluster info lookup", + options: func() *DelegatingAuthenticationOptions { + opts := NewDelegatingAuthenticationOptions() + opts.RemoteKubeConfigFile = remoteKubeconfig + return opts + }(), + expectError: true, // client building is valid, remote config lookup fails + expectAuthenticator: false, + }, + { + name: "valid client, skip cluster info lookup", + options: func() *DelegatingAuthenticationOptions { + opts := NewDelegatingAuthenticationOptions() + opts.RemoteKubeConfigFile = remoteKubeconfig + opts.SkipInClusterLookup = true + return opts + }(), + expectError: false, // client building is valid, skipped cluster lookup + expectAuthenticator: true, + expectTokenErrors: true, // client fails making tokenreview calls + }, + { + name: "valid client, tolerate failed cluster info lookup", + options: func() *DelegatingAuthenticationOptions { + opts := NewDelegatingAuthenticationOptions() + opts.RemoteKubeConfigFile = remoteKubeconfig + opts.TolerateInClusterLookupFailure = true + return opts + }(), + expectError: false, // client is valid, skipped cluster lookup + expectAuthenticator: true, // anonymous auth + expectTokenErrors: true, // client fails making tokenreview calls + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + c := &server.AuthenticationInfo{} + servingInfo := &server.SecureServingInfo{} + openAPIConfig := &openapicommon.Config{} + + err := tc.options.ApplyTo(c, servingInfo, openAPIConfig) + if (err != nil) != tc.expectError { + t.Errorf("expected error=%v, got %v", tc.expectError, err) + } + if (c.Authenticator != nil) != tc.expectAuthenticator { + t.Errorf("expected authenticator=%v, got %#v", tc.expectError, c.Authenticator) + } + if c.Authenticator != nil { + { + result, ok, err := c.Authenticator.AuthenticateRequest(&http.Request{}) + if err != nil || !ok || result == nil || result.User.GetName() != "system:anonymous" { + t.Errorf("expected anonymous, got %#v, %#v, %#v", result, ok, err) + } + } + { + result, ok, err := c.Authenticator.AuthenticateRequest(&http.Request{Header: http.Header{"Authorization": []string{"Bearer foo"}}}) + if tc.expectTokenAnonymous { + if err != nil || !ok || result == nil || result.User.GetName() != "system:anonymous" { + t.Errorf("expected anonymous, got %#v, %#v, %#v", result, ok, err) + } + } + if tc.expectTokenErrors != (err != nil) { + t.Errorf("expected error=%v, got %#v, %#v, %#v", tc.expectTokenErrors, result, ok, err) + } + } + } + }) + } +} diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/deprecated_insecure_serving.go b/staging/src/k8s.io/apiserver/pkg/server/options/deprecated_insecure_serving.go index f1cc4430b8b..804f04bca96 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/deprecated_insecure_serving.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/deprecated_insecure_serving.go @@ -85,11 +85,13 @@ func (s *DeprecatedInsecureServingOptions) AddUnqualifiedFlags(fs *pflag.FlagSet } fs.IPVar(&s.BindAddress, "address", s.BindAddress, - "DEPRECATED: see --bind-address instead.") + "The IP address on which to serve the insecure --port (set to 0.0.0.0 for all IPv4 interfaces and :: for all IPv6 interfaces).") fs.MarkDeprecated("address", "see --bind-address instead.") + fs.Lookup("address").Hidden = false - fs.IntVar(&s.BindPort, "port", s.BindPort, "DEPRECATED: see --secure-port instead.") + fs.IntVar(&s.BindPort, "port", s.BindPort, "The port on which to serve unsecured, unauthenticated access. Set to 0 to disable.") fs.MarkDeprecated("port", "see --secure-port instead.") + fs.Lookup("port").Hidden = false } // ApplyTo adds DeprecatedInsecureServingOptions to the insecureserverinfo amd kube-controller manager configuration. diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go b/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go index 7c2a08ba0ae..26173eb72c4 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go @@ -134,9 +134,10 @@ func (s *EtcdOptions) AddFlags(fs *pflag.FlagSet) { "Default watch cache size. If zero, watch cache will be disabled for resources that do not have a default watch size set.") fs.StringSliceVar(&s.WatchCacheSizes, "watch-cache-sizes", s.WatchCacheSizes, ""+ - "List of watch cache sizes for every resource (pods, nodes, etc.), comma separated. "+ - "The individual override format: resource[.group]#size, where resource is lowercase plural (no version), "+ - "group is optional, and size is a number. It takes effect when watch-cache is enabled. "+ + "Watch cache size settings for some resources (pods, nodes, etc.), comma separated. "+ + "The individual setting format: resource[.group]#size, where resource is lowercase plural (no version), "+ + "group is omitted for resources of apiVersion v1 (the legacy core API) and included for others, "+ + "and size is a number. It takes effect when watch-cache is enabled. "+ "Some resources (replicationcontrollers, endpoints, nodes, pods, services, apiservices.apiregistration.k8s.io) "+ "have system defaults set by heuristics, others default to default-watch-cache-size") @@ -164,6 +165,10 @@ func (s *EtcdOptions) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&s.EncryptionProviderConfigFilepath, "experimental-encryption-provider-config", s.EncryptionProviderConfigFilepath, "The file containing configuration for encryption providers to be used for storing secrets in etcd") + fs.MarkDeprecated("experimental-encryption-provider-config", "use --encryption-provider-config.") + + fs.StringVar(&s.EncryptionProviderConfigFilepath, "encryption-provider-config", s.EncryptionProviderConfigFilepath, + "The file containing configuration for encryption providers to be used for storing secrets in etcd") fs.DurationVar(&s.StorageConfig.CompactionInterval, "etcd-compaction-interval", s.StorageConfig.CompactionInterval, "The interval of compaction requests. If 0, the compaction request from apiserver is disabled.") diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/server_run_options.go b/staging/src/k8s.io/apiserver/pkg/server/options/server_run_options.go index fccb24e03ad..2884f853ff6 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/server_run_options.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/server_run_options.go @@ -154,5 +154,5 @@ func (s *ServerRunOptions) AddUniversalFlags(fs *pflag.FlagSet) { "handler, which picks a randomized value above this number as the connection timeout, "+ "to spread out load.") - utilfeature.DefaultFeatureGate.AddFlag(fs) + utilfeature.DefaultMutableFeatureGate.AddFlag(fs) } diff --git a/staging/src/k8s.io/apiserver/pkg/server/secure_serving.go b/staging/src/k8s.io/apiserver/pkg/server/secure_serving.go index 08006c96550..663c33bc4ed 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/secure_serving.go +++ b/staging/src/k8s.io/apiserver/pkg/server/secure_serving.go @@ -165,12 +165,12 @@ func RunServer( type NamedTLSCert struct { TLSCert tls.Certificate - // names is a list of domain patterns: fully qualified domain names, possibly prefixed with + // Names is a list of domain patterns: fully qualified domain names, possibly prefixed with // wildcard segments. Names []string } -// getNamedCertificateMap returns a map of *tls.Certificate by name. It's is +// GetNamedCertificateMap returns a map of *tls.Certificate by name. It's // suitable for use in tls.Config#NamedCertificates. Returns an error if any of the certs // cannot be loaded. Returns nil if len(certs) == 0 func GetNamedCertificateMap(certs []NamedTLSCert) (map[string]*tls.Certificate, error) { diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/BUILD b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/BUILD index 1b177714b27..9ca061e8cac 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/BUILD @@ -47,6 +47,7 @@ go_library( "errors.go", "event.go", "lease_manager.go", + "logger.go", "store.go", "watcher.go", ], diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/logger.go b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/logger.go new file mode 100644 index 00000000000..a117db6fe26 --- /dev/null +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/logger.go @@ -0,0 +1,84 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd3 + +import ( + "fmt" + + "github.com/coreos/etcd/clientv3" + "k8s.io/klog" +) + +func init() { + clientv3.SetLogger(klogWrapper{}) +} + +type klogWrapper struct{} + +const klogWrapperDepth = 4 + +func (klogWrapper) Info(args ...interface{}) { + klog.InfoDepth(klogWrapperDepth, args...) +} + +func (klogWrapper) Infoln(args ...interface{}) { + klog.InfoDepth(klogWrapperDepth, fmt.Sprintln(args...)) +} + +func (klogWrapper) Infof(format string, args ...interface{}) { + klog.InfoDepth(klogWrapperDepth, fmt.Sprintf(format, args...)) +} + +func (klogWrapper) Warning(args ...interface{}) { + klog.WarningDepth(klogWrapperDepth, args...) +} + +func (klogWrapper) Warningln(args ...interface{}) { + klog.WarningDepth(klogWrapperDepth, fmt.Sprintln(args...)) +} + +func (klogWrapper) Warningf(format string, args ...interface{}) { + klog.WarningDepth(klogWrapperDepth, fmt.Sprintf(format, args...)) +} + +func (klogWrapper) Error(args ...interface{}) { + klog.ErrorDepth(klogWrapperDepth, args...) +} + +func (klogWrapper) Errorln(args ...interface{}) { + klog.ErrorDepth(klogWrapperDepth, fmt.Sprintln(args...)) +} + +func (klogWrapper) Errorf(format string, args ...interface{}) { + klog.ErrorDepth(klogWrapperDepth, fmt.Sprintf(format, args...)) +} + +func (klogWrapper) Fatal(args ...interface{}) { + klog.FatalDepth(klogWrapperDepth, args...) +} + +func (klogWrapper) Fatalln(args ...interface{}) { + klog.FatalDepth(klogWrapperDepth, fmt.Sprintln(args...)) +} + +func (klogWrapper) Fatalf(format string, args ...interface{}) { + klog.FatalDepth(klogWrapperDepth, fmt.Sprintf(format, args...)) +} + +func (klogWrapper) V(l int) bool { + return bool(klog.V(klog.Level(l))) +} diff --git a/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1/service.proto b/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1/service.proto index dbde9080e70..b6c2f31c7ea 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1/service.proto +++ b/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1/service.proto @@ -1,3 +1,19 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + // To regenerate service.pb.go run hack/update-generated-kms.sh syntax = "proto3"; diff --git a/staging/src/k8s.io/apiserver/pkg/util/feature/feature_gate.go b/staging/src/k8s.io/apiserver/pkg/util/feature/feature_gate.go index a83dafd56ab..a8bce27f3a7 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/feature/feature_gate.go +++ b/staging/src/k8s.io/apiserver/pkg/util/feature/feature_gate.go @@ -51,8 +51,15 @@ var ( allAlphaGate: setUnsetAlphaGates, } + // DefaultMutableFeatureGate is a mutable version of DefaultFeatureGate. + // Only top-level commands/options setup and the k8s.io/apiserver/pkg/util/feature/testing package should make use of this. + // Tests that need to modify feature gates for the duration of their test should use: + // defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features., )() + DefaultMutableFeatureGate MutableFeatureGate = NewFeatureGate() + // DefaultFeatureGate is a shared global FeatureGate. - DefaultFeatureGate FeatureGate = NewFeatureGate() + // Top-level commands/options setup that needs to modify this feature gate should use DefaultMutableFeatureGate. + DefaultFeatureGate FeatureGate = DefaultMutableFeatureGate ) type FeatureSpec struct { @@ -72,9 +79,23 @@ const ( Deprecated = prerelease("DEPRECATED") ) -// FeatureGate parses and stores flag gates for known features from -// a string like feature1=true,feature2=false,... +// FeatureGate indicates whether a given feature is enabled or not type FeatureGate interface { + // Enabled returns true if the key is enabled. + Enabled(key Feature) bool + // KnownFeatures returns a slice of strings describing the FeatureGate's known features. + KnownFeatures() []string + // DeepCopy returns a deep copy of the FeatureGate object, such that gates can be + // set on the copy without mutating the original. This is useful for validating + // config against potential feature gate changes before committing those changes. + DeepCopy() MutableFeatureGate +} + +// MutableFeatureGate parses and stores flag gates for known features from +// a string like feature1=true,feature2=false,... +type MutableFeatureGate interface { + FeatureGate + // AddFlag adds a flag for setting global feature gates to the specified FlagSet. AddFlag(fs *pflag.FlagSet) // Set parses and stores flag gates for known features @@ -82,16 +103,8 @@ type FeatureGate interface { Set(value string) error // SetFromMap stores flag gates for known features from a map[string]bool or returns an error SetFromMap(m map[string]bool) error - // Enabled returns true if the key is enabled. - Enabled(key Feature) bool // Add adds features to the featureGate. Add(features map[Feature]FeatureSpec) error - // KnownFeatures returns a slice of strings describing the FeatureGate's known features. - KnownFeatures() []string - // DeepCopy returns a deep copy of the FeatureGate object, such that gates can be - // set on the copy without mutating the original. This is useful for validating - // config against potential feature gate changes before committing those changes. - DeepCopy() FeatureGate } // featureGate implements FeatureGate as well as pflag.Value for flag parsing. @@ -294,7 +307,7 @@ func (f *featureGate) KnownFeatures() []string { // DeepCopy returns a deep copy of the FeatureGate object, such that gates can be // set on the copy without mutating the original. This is useful for validating // config against potential feature gate changes before committing those changes. -func (f *featureGate) DeepCopy() FeatureGate { +func (f *featureGate) DeepCopy() MutableFeatureGate { // Copy existing state. known := map[Feature]FeatureSpec{} for k, v := range f.known.Load().(map[Feature]FeatureSpec) { diff --git a/staging/src/k8s.io/apiserver/pkg/util/feature/feature_gate_test.go b/staging/src/k8s.io/apiserver/pkg/util/feature/feature_gate_test.go index 14ec8694816..194ed1f0723 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/feature/feature_gate_test.go +++ b/staging/src/k8s.io/apiserver/pkg/util/feature/feature_gate_test.go @@ -148,7 +148,7 @@ func TestFeatureGateOverride(t *testing.T) { const testBetaGate Feature = "TestBeta" // Don't parse the flag, assert defaults are used. - var f FeatureGate = NewFeatureGate() + var f *featureGate = NewFeatureGate() f.Add(map[Feature]FeatureSpec{ testAlphaGate: {Default: false, PreRelease: Alpha}, testBetaGate: {Default: false, PreRelease: Beta}, @@ -177,7 +177,7 @@ func TestFeatureGateFlagDefaults(t *testing.T) { const testBetaGate Feature = "TestBeta" // Don't parse the flag, assert defaults are used. - var f FeatureGate = NewFeatureGate() + var f *featureGate = NewFeatureGate() f.Add(map[Feature]FeatureSpec{ testAlphaGate: {Default: false, PreRelease: Alpha}, testBetaGate: {Default: true, PreRelease: Beta}, @@ -201,7 +201,7 @@ func TestFeatureGateKnownFeatures(t *testing.T) { ) // Don't parse the flag, assert defaults are used. - var f FeatureGate = NewFeatureGate() + var f *featureGate = NewFeatureGate() f.Add(map[Feature]FeatureSpec{ testAlphaGate: {Default: false, PreRelease: Alpha}, testBetaGate: {Default: true, PreRelease: Beta}, diff --git a/staging/src/k8s.io/apiserver/pkg/util/feature/testing/feature_gate_testing.go b/staging/src/k8s.io/apiserver/pkg/util/feature/testing/feature_gate_testing.go index bcae2566bc3..6b0a5fe8d3c 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/feature/testing/feature_gate_testing.go +++ b/staging/src/k8s.io/apiserver/pkg/util/feature/testing/feature_gate_testing.go @@ -18,67 +18,27 @@ package testing import ( "fmt" - "os" - "strings" "testing" "k8s.io/apiserver/pkg/util/feature" ) -// VerifyFeatureGatesUnchanged ensures the provided gate does not change any values when tests() are completed. -// Intended to be placed into unit test packages that mess with feature gates. -// -// Example use: -// -// import ( -// "testing" -// -// utilfeature "k8s.io/apiserver/pkg/util/feature" -// utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" -// _ "k8s.io/kubernetes/pkg/features" -// ) -// -// func TestMain(m *testing.M) { -// utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run) -// } -func VerifyFeatureGatesUnchanged(gate feature.FeatureGate, tests func() int) { - originalGates := gate.DeepCopy() - originalSet := fmt.Sprint(gate) - - rc := tests() - - finalSet := fmt.Sprint(gate) - if finalSet != originalSet { - for _, kv := range strings.Split(finalSet, ",") { - k := strings.Split(kv, "=")[0] - if originalGates.Enabled(feature.Feature(k)) != gate.Enabled(feature.Feature(k)) { - fmt.Println(fmt.Sprintf("VerifyFeatureGatesUnchanged: mutated %s feature gate from %v to %v", k, originalGates.Enabled(feature.Feature(k)), gate.Enabled(feature.Feature(k)))) - rc = 1 - } - } - } - - if rc != 0 { - os.Exit(rc) - } -} - // SetFeatureGateDuringTest sets the specified gate to the specified value, and returns a function that restores the original value. // Failures to set or restore cause the test to fail. // // Example use: // // defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features., true)() -func SetFeatureGateDuringTest(t *testing.T, gate feature.FeatureGate, feature feature.Feature, value bool) func() { - originalValue := gate.Enabled(feature) +func SetFeatureGateDuringTest(t *testing.T, gate feature.FeatureGate, f feature.Feature, value bool) func() { + originalValue := gate.Enabled(f) - if err := gate.Set(fmt.Sprintf("%s=%v", feature, value)); err != nil { - t.Errorf("error setting %s=%v: %v", feature, value, err) + if err := gate.(feature.MutableFeatureGate).Set(fmt.Sprintf("%s=%v", f, value)); err != nil { + t.Errorf("error setting %s=%v: %v", f, value, err) } return func() { - if err := gate.Set(fmt.Sprintf("%s=%v", feature, originalValue)); err != nil { - t.Errorf("error restoring %s=%v: %v", feature, originalValue, err) + if err := gate.(feature.MutableFeatureGate).Set(fmt.Sprintf("%s=%v", f, originalValue)); err != nil { + t.Errorf("error restoring %s=%v: %v", f, originalValue, err) } } } diff --git a/staging/src/k8s.io/apiserver/pkg/util/globalflag/globalflags.go b/staging/src/k8s.io/apiserver/pkg/util/globalflag/globalflags.go index c00ef7b9f1e..b49c8e9dc8f 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/globalflag/globalflags.go +++ b/staging/src/k8s.io/apiserver/pkg/util/globalflag/globalflags.go @@ -19,7 +19,6 @@ package globalflag import ( "flag" "fmt" - "os" "strings" "github.com/spf13/pflag" @@ -28,7 +27,7 @@ import ( ) // AddGlobalFlags explicitly registers flags that libraries (klog, verflag, etc.) register -// against the global flagsets from "flag" and "github.com/spf13/pflag". +// against the global flagsets from "flag" and "k8s.io/klog". // We do this in order to prevent unwanted flags from leaking into the component's flagset. func AddGlobalFlags(fs *pflag.FlagSet, name string) { addGlogFlags(fs) @@ -39,21 +38,16 @@ func AddGlobalFlags(fs *pflag.FlagSet, name string) { // addGlogFlags explicitly registers flags that klog libraries(k8s.io/klog) register. func addGlogFlags(fs *pflag.FlagSet) { - // lookup flags in global flag set and re-register the values with our flagset - global := flag.CommandLine - local := pflag.NewFlagSet(os.Args[0], pflag.ExitOnError) - - register(global, local, "logtostderr") - register(global, local, "alsologtostderr") - register(global, local, "v") - register(global, local, "skip_headers") - register(global, local, "stderrthreshold") - register(global, local, "vmodule") - register(global, local, "log_backtrace_at") - register(global, local, "log_dir") - register(global, local, "log_file") - - fs.AddFlagSet(local) + // lookup flags of klog libraries in global flag set and re-register the values with our flagset + Register(fs, "logtostderr") + Register(fs, "alsologtostderr") + Register(fs, "v") + Register(fs, "skip_headers") + Register(fs, "stderrthreshold") + Register(fs, "vmodule") + Register(fs, "log_backtrace_at") + Register(fs, "log_dir") + Register(fs, "log_file") } // normalize replaces underscores with hyphens @@ -62,9 +56,9 @@ func normalize(s string) string { return strings.Replace(s, "_", "-", -1) } -// register adds a flag to local that targets the Value associated with the Flag named globalName in global -func register(global *flag.FlagSet, local *pflag.FlagSet, globalName string) { - if f := global.Lookup(globalName); f != nil { +// Register adds a flag to local that targets the Value associated with the Flag named globalName in flag.CommandLine. +func Register(local *pflag.FlagSet, globalName string) { + if f := flag.CommandLine.Lookup(globalName); f != nil { pflagFlag := pflag.PFlagFromGoFlag(f) pflagFlag.Name = normalize(pflagFlag.Name) local.AddFlag(pflagFlag) diff --git a/staging/src/k8s.io/apiserver/pkg/util/webhook/authentication.go b/staging/src/k8s.io/apiserver/pkg/util/webhook/authentication.go index 1d1c0ad3bc9..dd0f4e5e663 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/webhook/authentication.go +++ b/staging/src/k8s.io/apiserver/pkg/util/webhook/authentication.go @@ -177,6 +177,7 @@ func restConfigFromKubeconfig(configAuthInfo *clientcmdapi.AuthInfo) (*rest.Conf return nil, err } config.BearerToken = string(tokenBytes) + config.BearerTokenFile = configAuthInfo.TokenFile } if len(configAuthInfo.Impersonate) > 0 { config.Impersonate = rest.ImpersonationConfig{ diff --git a/staging/src/k8s.io/cli-runtime/Godeps/Godeps.json b/staging/src/k8s.io/cli-runtime/Godeps/Godeps.json index aa88de45f6e..6c669d3848a 100644 --- a/staging/src/k8s.io/cli-runtime/Godeps/Godeps.json +++ b/staging/src/k8s.io/cli-runtime/Godeps/Godeps.json @@ -80,7 +80,7 @@ }, { "ImportPath": "github.com/json-iterator/go", - "Rev": "f2b4162afba35581b6d4a50d3b8f34e33c144682" + "Rev": "ab8a2e0c74be9d3be70b3184d9acc634935ded82" }, { "ImportPath": "github.com/modern-go/concurrent", diff --git a/staging/src/k8s.io/client-go/Godeps/Godeps.json b/staging/src/k8s.io/client-go/Godeps/Godeps.json index 7fbd65b5b5c..7d0d0b0b8aa 100644 --- a/staging/src/k8s.io/client-go/Godeps/Godeps.json +++ b/staging/src/k8s.io/client-go/Godeps/Godeps.json @@ -160,7 +160,7 @@ }, { "ImportPath": "github.com/json-iterator/go", - "Rev": "f2b4162afba35581b6d4a50d3b8f34e33c144682" + "Rev": "ab8a2e0c74be9d3be70b3184d9acc634935ded82" }, { "ImportPath": "github.com/modern-go/concurrent", diff --git a/staging/src/k8s.io/client-go/rest/BUILD b/staging/src/k8s.io/client-go/rest/BUILD index 70920303e4e..9f00aac950e 100644 --- a/staging/src/k8s.io/client-go/rest/BUILD +++ b/staging/src/k8s.io/client-go/rest/BUILD @@ -13,7 +13,6 @@ go_test( "config_test.go", "plugin_test.go", "request_test.go", - "token_source_test.go", "url_utils_test.go", "urlbackoff_test.go", ], @@ -41,7 +40,6 @@ go_test( "//staging/src/k8s.io/client-go/util/testing:go_default_library", "//vendor/github.com/google/gofuzz:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", - "//vendor/golang.org/x/oauth2:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], ) @@ -53,7 +51,6 @@ go_library( "config.go", "plugin.go", "request.go", - "token_source.go", "transport.go", "url_utils.go", "urlbackoff.go", @@ -80,7 +77,6 @@ go_library( "//staging/src/k8s.io/client-go/util/cert:go_default_library", "//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library", "//vendor/golang.org/x/net/http2:go_default_library", - "//vendor/golang.org/x/oauth2:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/client-go/rest/config.go b/staging/src/k8s.io/client-go/rest/config.go index 438eb3bedac..072e7392b19 100644 --- a/staging/src/k8s.io/client-go/rest/config.go +++ b/staging/src/k8s.io/client-go/rest/config.go @@ -70,6 +70,11 @@ type Config struct { // TODO: demonstrate an OAuth2 compatible client. BearerToken string + // Path to a file containing a BearerToken. + // If set, the contents are periodically read. + // The last successfully read value takes precedence over BearerToken. + BearerTokenFile string + // Impersonate is the configuration that RESTClient will use for impersonation. Impersonate ImpersonationConfig @@ -322,9 +327,8 @@ func InClusterConfig() (*Config, error) { return nil, ErrNotInCluster } - ts := NewCachedFileTokenSource(tokenFile) - - if _, err := ts.Token(); err != nil { + token, err := ioutil.ReadFile(tokenFile) + if err != nil { return nil, err } @@ -340,7 +344,8 @@ func InClusterConfig() (*Config, error) { // TODO: switch to using cluster DNS. Host: "https://" + net.JoinHostPort(host, port), TLSClientConfig: tlsClientConfig, - WrapTransport: TokenSourceWrapTransport(ts), + BearerToken: string(token), + BearerTokenFile: tokenFile, }, nil } @@ -430,12 +435,13 @@ func AnonymousClientConfig(config *Config) *Config { // CopyConfig returns a copy of the given config func CopyConfig(config *Config) *Config { return &Config{ - Host: config.Host, - APIPath: config.APIPath, - ContentConfig: config.ContentConfig, - Username: config.Username, - Password: config.Password, - BearerToken: config.BearerToken, + Host: config.Host, + APIPath: config.APIPath, + ContentConfig: config.ContentConfig, + Username: config.Username, + Password: config.Password, + BearerToken: config.BearerToken, + BearerTokenFile: config.BearerTokenFile, Impersonate: ImpersonationConfig{ Groups: config.Impersonate.Groups, Extra: config.Impersonate.Extra, diff --git a/staging/src/k8s.io/client-go/rest/config_test.go b/staging/src/k8s.io/client-go/rest/config_test.go index 34786428548..22c18d77ed4 100644 --- a/staging/src/k8s.io/client-go/rest/config_test.go +++ b/staging/src/k8s.io/client-go/rest/config_test.go @@ -264,6 +264,7 @@ func TestAnonymousConfig(t *testing.T) { // is added to Config, update AnonymousClientConfig to preserve the field otherwise. expected.Impersonate = ImpersonationConfig{} expected.BearerToken = "" + expected.BearerTokenFile = "" expected.Username = "" expected.Password = "" expected.AuthProvider = nil diff --git a/staging/src/k8s.io/client-go/rest/request.go b/staging/src/k8s.io/client-go/rest/request.go index 64901fba20d..9609f01a504 100644 --- a/staging/src/k8s.io/client-go/rest/request.go +++ b/staging/src/k8s.io/client-go/rest/request.go @@ -1195,7 +1195,6 @@ func IsValidPathSegmentPrefix(name string) []string { func ValidatePathSegmentName(name string, prefix bool) []string { if prefix { return IsValidPathSegmentPrefix(name) - } else { - return IsValidPathSegmentName(name) } + return IsValidPathSegmentName(name) } diff --git a/staging/src/k8s.io/client-go/rest/request_test.go b/staging/src/k8s.io/client-go/rest/request_test.go index a415f60ae79..e70770dcd6e 100755 --- a/staging/src/k8s.io/client-go/rest/request_test.go +++ b/staging/src/k8s.io/client-go/rest/request_test.go @@ -1218,10 +1218,9 @@ func TestBackoffLifecycle(t *testing.T) { if count == 5 || count == 9 { w.WriteHeader(http.StatusOK) return - } else { - w.WriteHeader(http.StatusGatewayTimeout) - return } + w.WriteHeader(http.StatusGatewayTimeout) + return })) defer testServer.Close() c := testRESTClient(t, testServer) diff --git a/staging/src/k8s.io/client-go/testing/fixture.go b/staging/src/k8s.io/client-go/testing/fixture.go index 90f16f56080..b468b328c1a 100644 --- a/staging/src/k8s.io/client-go/testing/fixture.go +++ b/staging/src/k8s.io/client-go/testing/fixture.go @@ -339,8 +339,10 @@ func (t *tracker) getWatches(gvr schema.GroupVersionResource, ns string) []*watc if w := t.watchers[gvr][ns]; w != nil { watches = append(watches, w...) } - if w := t.watchers[gvr][""]; w != nil { - watches = append(watches, w...) + if ns != metav1.NamespaceAll { + if w := t.watchers[gvr][metav1.NamespaceAll]; w != nil { + watches = append(watches, w...) + } } } return watches diff --git a/staging/src/k8s.io/client-go/testing/fixture_test.go b/staging/src/k8s.io/client-go/testing/fixture_test.go index 405fe1a7e23..7b01c12a9c5 100644 --- a/staging/src/k8s.io/client-go/testing/fixture_test.go +++ b/staging/src/k8s.io/client-go/testing/fixture_test.go @@ -131,26 +131,47 @@ func TestWatchCallMultipleInvocation(t *testing.T) { cases := []struct { name string op watch.EventType + ns string }{ { "foo", watch.Added, + "test_namespace", }, { "bar", watch.Added, + "test_namespace", + }, + { + "baz", + watch.Added, + "", }, { "bar", watch.Modified, + "test_namespace", + }, + { + "baz", + watch.Modified, + "", }, { "foo", watch.Deleted, + "test_namespace", }, { "bar", watch.Deleted, + "test_namespace", + }, + { + "baz", + watch.Deleted, + "", }, } @@ -169,6 +190,7 @@ func TestWatchCallMultipleInvocation(t *testing.T) { wg.Add(len(watchNamespaces)) for idx, watchNamespace := range watchNamespaces { i := idx + watchNamespace := watchNamespace w, err := o.Watch(testResource, watchNamespace) if err != nil { t.Fatalf("test resource watch failed in %s: %v", watchNamespace, err) @@ -176,14 +198,17 @@ func TestWatchCallMultipleInvocation(t *testing.T) { go func() { assert.NoError(t, err, "watch invocation failed") for _, c := range cases { - fmt.Printf("%#v %#v\n", c, i) - event := <-w.ResultChan() - accessor, err := meta.Accessor(event.Object) - if err != nil { - t.Fatalf("unexpected error: %v", err) + if watchNamespace == "" || c.ns == watchNamespace { + fmt.Printf("%#v %#v\n", c, i) + event := <-w.ResultChan() + accessor, err := meta.Accessor(event.Object) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + assert.Equal(t, c.op, event.Type, "watch event mismatched") + assert.Equal(t, c.name, accessor.GetName(), "watched object mismatch") + assert.Equal(t, c.ns, accessor.GetNamespace(), "watched object mismatch") } - assert.Equal(t, c.op, event.Type, "watch event mismatched") - assert.Equal(t, c.name, accessor.GetName(), "watched object mismatch") } wg.Done() }() @@ -191,13 +216,13 @@ func TestWatchCallMultipleInvocation(t *testing.T) { for _, c := range cases { switch c.op { case watch.Added: - obj := getArbitraryResource(testResource, c.name, "test_namespace") - o.Create(testResource, obj, "test_namespace") + obj := getArbitraryResource(testResource, c.name, c.ns) + o.Create(testResource, obj, c.ns) case watch.Modified: - obj := getArbitraryResource(testResource, c.name, "test_namespace") - o.Update(testResource, obj, "test_namespace") + obj := getArbitraryResource(testResource, c.name, c.ns) + o.Update(testResource, obj, c.ns) case watch.Deleted: - o.Delete(testResource, "test_namespace", c.name) + o.Delete(testResource, c.ns, c.name) } } wg.Wait() diff --git a/staging/src/k8s.io/client-go/tools/clientcmd/client_config.go b/staging/src/k8s.io/client-go/tools/clientcmd/client_config.go index dea229c9182..a7b8c1c6e42 100644 --- a/staging/src/k8s.io/client-go/tools/clientcmd/client_config.go +++ b/staging/src/k8s.io/client-go/tools/clientcmd/client_config.go @@ -229,11 +229,12 @@ func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthI if len(configAuthInfo.Token) > 0 { mergedConfig.BearerToken = configAuthInfo.Token } else if len(configAuthInfo.TokenFile) > 0 { - ts := restclient.NewCachedFileTokenSource(configAuthInfo.TokenFile) - if _, err := ts.Token(); err != nil { + tokenBytes, err := ioutil.ReadFile(configAuthInfo.TokenFile) + if err != nil { return nil, err } - mergedConfig.WrapTransport = restclient.TokenSourceWrapTransport(ts) + mergedConfig.BearerToken = string(tokenBytes) + mergedConfig.BearerTokenFile = configAuthInfo.TokenFile } if len(configAuthInfo.Impersonate) > 0 { mergedConfig.Impersonate = restclient.ImpersonationConfig{ diff --git a/staging/src/k8s.io/client-go/tools/clientcmd/client_config_test.go b/staging/src/k8s.io/client-go/tools/clientcmd/client_config_test.go index 6da850ed405..a13f08ae7d9 100644 --- a/staging/src/k8s.io/client-go/tools/clientcmd/client_config_test.go +++ b/staging/src/k8s.io/client-go/tools/clientcmd/client_config_test.go @@ -18,7 +18,6 @@ package clientcmd import ( "io/ioutil" - "net/http" "os" "reflect" "strings" @@ -334,19 +333,7 @@ func TestBasicTokenFile(t *testing.T) { t.Fatalf("Unexpected error: %v", err) } - var out *http.Request - clientConfig.WrapTransport(fakeTransport(func(req *http.Request) (*http.Response, error) { - out = req - return &http.Response{}, nil - })).RoundTrip(&http.Request{}) - - matchStringArg(token, strings.TrimPrefix(out.Header.Get("Authorization"), "Bearer "), t) -} - -type fakeTransport func(*http.Request) (*http.Response, error) - -func (ft fakeTransport) RoundTrip(req *http.Request) (*http.Response, error) { - return ft(req) + matchStringArg(token, clientConfig.BearerToken, t) } func TestPrecedenceTokenFile(t *testing.T) { diff --git a/staging/src/k8s.io/client-go/transport/BUILD b/staging/src/k8s.io/client-go/transport/BUILD index dc1800681d3..643750e2563 100644 --- a/staging/src/k8s.io/client-go/transport/BUILD +++ b/staging/src/k8s.io/client-go/transport/BUILD @@ -11,9 +11,11 @@ go_test( srcs = [ "cache_test.go", "round_trippers_test.go", + "token_source_test.go", "transport_test.go", ], embed = [":go_default_library"], + deps = ["//vendor/golang.org/x/oauth2:go_default_library"], ) go_library( @@ -22,12 +24,14 @@ go_library( "cache.go", "config.go", "round_trippers.go", + "token_source.go", "transport.go", ], importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/transport", importpath = "k8s.io/client-go/transport", deps = [ "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", + "//vendor/golang.org/x/oauth2:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/client-go/transport/config.go b/staging/src/k8s.io/client-go/transport/config.go index 4081c23e7ff..acb126d8b09 100644 --- a/staging/src/k8s.io/client-go/transport/config.go +++ b/staging/src/k8s.io/client-go/transport/config.go @@ -39,6 +39,11 @@ type Config struct { // Bearer token for authentication BearerToken string + // Path to a file containing a BearerToken. + // If set, the contents are periodically read. + // The last successfully read value takes precedence over BearerToken. + BearerTokenFile string + // Impersonate is the config that this Config will impersonate using Impersonate ImpersonationConfig @@ -80,7 +85,7 @@ func (c *Config) HasBasicAuth() bool { // HasTokenAuth returns whether the configuration has token authentication or not. func (c *Config) HasTokenAuth() bool { - return len(c.BearerToken) != 0 + return len(c.BearerToken) != 0 || len(c.BearerTokenFile) != 0 } // HasCertAuth returns whether the configuration has certificate authentication or not. diff --git a/staging/src/k8s.io/client-go/transport/round_trippers.go b/staging/src/k8s.io/client-go/transport/round_trippers.go index da417cf96ea..117a9c8c4de 100644 --- a/staging/src/k8s.io/client-go/transport/round_trippers.go +++ b/staging/src/k8s.io/client-go/transport/round_trippers.go @@ -22,6 +22,7 @@ import ( "strings" "time" + "golang.org/x/oauth2" "k8s.io/klog" utilnet "k8s.io/apimachinery/pkg/util/net" @@ -44,7 +45,11 @@ func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTrip case config.HasBasicAuth() && config.HasTokenAuth(): return nil, fmt.Errorf("username/password or bearer token may be set, but not both") case config.HasTokenAuth(): - rt = NewBearerAuthRoundTripper(config.BearerToken, rt) + var err error + rt, err = NewBearerAuthWithRefreshRoundTripper(config.BearerToken, config.BearerTokenFile, rt) + if err != nil { + return nil, err + } case config.HasBasicAuth(): rt = NewBasicAuthRoundTripper(config.Username, config.Password, rt) } @@ -265,13 +270,35 @@ func (rt *impersonatingRoundTripper) WrappedRoundTripper() http.RoundTripper { r type bearerAuthRoundTripper struct { bearer string + source oauth2.TokenSource rt http.RoundTripper } // NewBearerAuthRoundTripper adds the provided bearer token to a request // unless the authorization header has already been set. func NewBearerAuthRoundTripper(bearer string, rt http.RoundTripper) http.RoundTripper { - return &bearerAuthRoundTripper{bearer, rt} + return &bearerAuthRoundTripper{bearer, nil, rt} +} + +// NewBearerAuthRoundTripper adds the provided bearer token to a request +// unless the authorization header has already been set. +// If tokenFile is non-empty, it is periodically read, +// and the last successfully read content is used as the bearer token. +// If tokenFile is non-empty and bearer is empty, the tokenFile is read +// immediately to populate the initial bearer token. +func NewBearerAuthWithRefreshRoundTripper(bearer string, tokenFile string, rt http.RoundTripper) (http.RoundTripper, error) { + if len(tokenFile) == 0 { + return &bearerAuthRoundTripper{bearer, nil, rt}, nil + } + source := NewCachedFileTokenSource(tokenFile) + if len(bearer) == 0 { + token, err := source.Token() + if err != nil { + return nil, err + } + bearer = token.AccessToken + } + return &bearerAuthRoundTripper{bearer, source, rt}, nil } func (rt *bearerAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { @@ -280,7 +307,13 @@ func (rt *bearerAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, } req = utilnet.CloneRequest(req) - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", rt.bearer)) + token := rt.bearer + if rt.source != nil { + if refreshedToken, err := rt.source.Token(); err == nil { + token = refreshedToken.AccessToken + } + } + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) return rt.rt.RoundTrip(req) } diff --git a/staging/src/k8s.io/client-go/rest/token_source.go b/staging/src/k8s.io/client-go/transport/token_source.go similarity index 99% rename from staging/src/k8s.io/client-go/rest/token_source.go rename to staging/src/k8s.io/client-go/transport/token_source.go index c251b5eb0bb..818baffd4e6 100644 --- a/staging/src/k8s.io/client-go/rest/token_source.go +++ b/staging/src/k8s.io/client-go/transport/token_source.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package rest +package transport import ( "fmt" diff --git a/staging/src/k8s.io/client-go/rest/token_source_test.go b/staging/src/k8s.io/client-go/transport/token_source_test.go similarity index 99% rename from staging/src/k8s.io/client-go/rest/token_source_test.go rename to staging/src/k8s.io/client-go/transport/token_source_test.go index 40851f80d71..a222495b94e 100644 --- a/staging/src/k8s.io/client-go/rest/token_source_test.go +++ b/staging/src/k8s.io/client-go/transport/token_source_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package rest +package transport import ( "fmt" diff --git a/staging/src/k8s.io/client-go/util/certificate/certificate_manager.go b/staging/src/k8s.io/client-go/util/certificate/certificate_manager.go index ed74559e203..4aa9f3ab4a7 100644 --- a/staging/src/k8s.io/client-go/util/certificate/certificate_manager.go +++ b/staging/src/k8s.io/client-go/util/certificate/certificate_manager.go @@ -48,11 +48,10 @@ var certificateWaitBackoff = wait.Backoff{Duration: 30 * time.Second, Steps: 4, // manager. In the background it communicates with the API server to get new // certificates for certificates about to expire. type Manager interface { - // CertificateSigningRequestClient sets the client interface that is used for - // signing new certificates generated as part of rotation. - SetCertificateSigningRequestClient(certificatesclient.CertificateSigningRequestInterface) error // Start the API server status sync loop. Start() + // Stop the cert manager loop. + Stop() // Current returns the currently selected certificate from the // certificate manager, as well as the associated certificate and key data // in PEM format. @@ -67,11 +66,11 @@ type Manager interface { // Config is the set of configuration parameters available for a new Manager. type Config struct { - // CertificateSigningRequestClient will be used for signing new certificate - // requests generated when a key rotation occurs. It must be set either at - // initialization or by using CertificateSigningRequestClient before - // Manager.Start() is called. - CertificateSigningRequestClient certificatesclient.CertificateSigningRequestInterface + // ClientFn will be used to create a client for + // signing new certificate requests generated when a key rotation occurs. + // It must be set at initialization. The function will never be invoked + // in parallel. It is passed the current client certificate if one exists. + ClientFn CSRClientFunc // Template is the CertificateRequest that will be used as a template for // generating certificate signing requests for all new keys generated as // part of rotation. It follows the same rules as the template parameter of @@ -141,21 +140,34 @@ type Gauge interface { // NoCertKeyError indicates there is no cert/key currently available. type NoCertKeyError string +// CSRClientFunc returns a new client for requesting CSRs. It passes the +// current certificate if one is available and valid. +type CSRClientFunc func(current *tls.Certificate) (certificatesclient.CertificateSigningRequestInterface, error) + func (e *NoCertKeyError) Error() string { return string(*e) } type manager struct { - certSigningRequestClient certificatesclient.CertificateSigningRequestInterface - getTemplate func() *x509.CertificateRequest - lastRequestLock sync.Mutex - lastRequest *x509.CertificateRequest - dynamicTemplate bool - usages []certificates.KeyUsage - certStore Store - certAccessLock sync.RWMutex - cert *tls.Certificate - forceRotation bool - certificateExpiration Gauge - serverHealth bool + getTemplate func() *x509.CertificateRequest + lastRequestLock sync.Mutex + lastRequest *x509.CertificateRequest + dynamicTemplate bool + usages []certificates.KeyUsage + forceRotation bool + + certStore Store + + certificateExpiration Gauge + + // the following variables must only be accessed under certAccessLock + certAccessLock sync.RWMutex + cert *tls.Certificate + serverHealth bool + + // the clientFn must only be accessed under the clientAccessLock + clientAccessLock sync.Mutex + clientFn CSRClientFunc + stopCh chan struct{} + stopped bool } // NewManager returns a new certificate manager. A certificate manager is @@ -176,14 +188,15 @@ func NewManager(config *Config) (Manager, error) { } m := manager{ - certSigningRequestClient: config.CertificateSigningRequestClient, - getTemplate: getTemplate, - dynamicTemplate: config.GetTemplate != nil, - usages: config.Usages, - certStore: config.CertificateStore, - cert: cert, - forceRotation: forceRotation, - certificateExpiration: config.CertificateExpiration, + stopCh: make(chan struct{}), + clientFn: config.ClientFn, + getTemplate: getTemplate, + dynamicTemplate: config.GetTemplate != nil, + usages: config.Usages, + certStore: config.CertificateStore, + cert: cert, + forceRotation: forceRotation, + certificateExpiration: config.CertificateExpiration, } return &m, nil @@ -192,10 +205,14 @@ func NewManager(config *Config) (Manager, error) { // Current returns the currently selected certificate from the certificate // manager. This can be nil if the manager was initialized without a // certificate and has not yet received one from the -// CertificateSigningRequestClient. +// CertificateSigningRequestClient, or if the current cert has expired. func (m *manager) Current() *tls.Certificate { m.certAccessLock.RLock() defer m.certAccessLock.RUnlock() + if m.cert != nil && m.cert.Leaf != nil && time.Now().After(m.cert.Leaf.NotAfter) { + klog.V(2).Infof("Current certificate is expired.") + return nil + } return m.cert } @@ -207,18 +224,15 @@ func (m *manager) ServerHealthy() bool { return m.serverHealth } -// SetCertificateSigningRequestClient sets the client interface that is used -// for signing new certificates generated as part of rotation. It must be -// called before Start() and can not be used to change the -// CertificateSigningRequestClient that has already been set. This method is to -// support the one specific scenario where the CertificateSigningRequestClient -// uses the CertificateManager. -func (m *manager) SetCertificateSigningRequestClient(certSigningRequestClient certificatesclient.CertificateSigningRequestInterface) error { - if m.certSigningRequestClient == nil { - m.certSigningRequestClient = certSigningRequestClient - return nil +// Stop terminates the manager. +func (m *manager) Stop() { + m.clientAccessLock.Lock() + defer m.clientAccessLock.Unlock() + if m.stopped { + return } - return fmt.Errorf("property CertificateSigningRequestClient is already set") + close(m.stopCh) + m.stopped = true } // Start will start the background work of rotating the certificates. @@ -226,7 +240,7 @@ func (m *manager) Start() { // Certificate rotation depends on access to the API server certificate // signing API, so don't start the certificate manager if we don't have a // client. - if m.certSigningRequestClient == nil { + if m.clientFn == nil { klog.V(2).Infof("Certificate rotation is not enabled, no connection to the apiserver.") return } @@ -234,7 +248,7 @@ func (m *manager) Start() { klog.V(2).Infof("Certificate rotation is enabled.") templateChanged := make(chan struct{}) - go wait.Forever(func() { + go wait.Until(func() { deadline := m.nextRotationDeadline() if sleepInterval := deadline.Sub(time.Now()); sleepInterval > 0 { klog.V(2).Infof("Waiting %v for next certificate rotation", sleepInterval) @@ -269,17 +283,17 @@ func (m *manager) Start() { utilruntime.HandleError(fmt.Errorf("Reached backoff limit, still unable to rotate certs: %v", err)) wait.PollInfinite(32*time.Second, m.rotateCerts) } - }, time.Second) + }, time.Second, m.stopCh) if m.dynamicTemplate { - go wait.Forever(func() { + go wait.Until(func() { // check if the current template matches what we last requested if !m.certSatisfiesTemplate() && !reflect.DeepEqual(m.getLastRequest(), m.getTemplate()) { // if the template is different, queue up an interrupt of the rotation deadline loop. // if we've requested a CSR that matches the new template by the time the interrupt is handled, the interrupt is disregarded. templateChanged <- struct{}{} } - }, time.Second) + }, time.Second, m.stopCh) } } @@ -327,11 +341,26 @@ func getCurrentCertificateOrBootstrap( return &bootstrapCert, true, nil } +func (m *manager) getClient() (certificatesclient.CertificateSigningRequestInterface, error) { + current := m.Current() + m.clientAccessLock.Lock() + defer m.clientAccessLock.Unlock() + return m.clientFn(current) +} + +// RotateCerts is exposed for testing only and is not a part of the public interface. +// Returns true if it changed the cert, false otherwise. Error is only returned in +// exceptional cases. +func (m *manager) RotateCerts() (bool, error) { + return m.rotateCerts() +} + // rotateCerts attempts to request a client cert from the server, wait a reasonable // period of time for it to be signed, and then update the cert on disk. If it cannot // retrieve a cert, it will return false. It will only return error in exceptional cases. // This method also keeps track of "server health" by interpreting the responses it gets // from the server on the various calls it makes. +// TODO: return errors, have callers handle and log them correctly func (m *manager) rotateCerts() (bool, error) { klog.V(2).Infof("Rotating certificates") @@ -341,9 +370,16 @@ func (m *manager) rotateCerts() (bool, error) { return false, nil } + // request the client each time + client, err := m.getClient() + if err != nil { + utilruntime.HandleError(fmt.Errorf("Unable to load a client to request certificates: %v", err)) + return false, nil + } + // Call the Certificate Signing Request API to get a certificate for the // new private key. - req, err := csr.RequestCertificate(m.certSigningRequestClient, csrPEM, "", m.usages, privateKey) + req, err := csr.RequestCertificate(client, csrPEM, "", m.usages, privateKey) if err != nil { utilruntime.HandleError(fmt.Errorf("Failed while requesting a signed certificate from the master: %v", err)) return false, m.updateServerError(err) @@ -359,7 +395,7 @@ func (m *manager) rotateCerts() (bool, error) { var crtPEM []byte watchDuration := time.Minute if err := wait.ExponentialBackoff(certificateWaitBackoff, func() (bool, error) { - data, err := csr.WaitForCertificate(m.certSigningRequestClient, req, watchDuration) + data, err := csr.WaitForCertificate(client, req, watchDuration) switch { case err == nil: crtPEM = data diff --git a/staging/src/k8s.io/client-go/util/certificate/certificate_manager_test.go b/staging/src/k8s.io/client-go/util/certificate/certificate_manager_test.go index 545097ea453..3ca7f767a01 100644 --- a/staging/src/k8s.io/client-go/util/certificate/certificate_manager_test.go +++ b/staging/src/k8s.io/client-go/util/certificate/certificate_manager_test.go @@ -60,6 +60,23 @@ iQIgZX08DA8VfvcA5/Xj1Zjdey9FVY6POLXen6RPiabE97UCICp6eUW7ht+2jjar e35EltCRCjoejRHTuN9TC0uCoVipAiAXaJIx/Q47vGwiw6Y8KXsNU6y54gTbOSxX 54LzHNk/+Q== -----END RSA PRIVATE KEY-----`) +var expiredStoreCertData = newCertificateData(`-----BEGIN CERTIFICATE----- +MIIBFzCBwgIJALhygXnxXmN1MA0GCSqGSIb3DQEBCwUAMBMxETAPBgNVBAMMCGhv +c3QtMTIzMB4XDTE4MTEwNDIzNTc1NFoXDTE4MTEwNTIzNTc1NFowEzERMA8GA1UE +AwwIaG9zdC0xMjMwXDANBgkqhkiG9w0BAQEFAANLADBIAkEAtBMa7NWpv3BVlKTC +PGO/LEsguKqWHBtKzweMY2CVtAL1rQm913huhxF9w+ai76KQ3MHK5IVnLJjYYA5M +zP2H5QIDAQABMA0GCSqGSIb3DQEBCwUAA0EAN2DPFUtCzqnidL+5nh+46Sk6dkMI +T5DD11UuuIjZusKvThsHKVCIsyJ2bDo7cTbI+/nklLRP+FcC2wESFUgXbA== +-----END CERTIFICATE-----`, `-----BEGIN RSA PRIVATE KEY----- +MIIBUwIBADANBgkqhkiG9w0BAQEFAASCAT0wggE5AgEAAkEAtBMa7NWpv3BVlKTC +PGO/LEsguKqWHBtKzweMY2CVtAL1rQm913huhxF9w+ai76KQ3MHK5IVnLJjYYA5M +zP2H5QIDAQABAkAS9BfXab3OKpK3bIgNNyp+DQJKrZnTJ4Q+OjsqkpXvNltPJosf +G8GsiKu/vAt4HGqI3eU77NvRI+mL4MnHRmXBAiEA3qM4FAtKSRBbcJzPxxLEUSwg +XSCcosCktbkXvpYrS30CIQDPDxgqlwDEJQ0uKuHkZI38/SPWWqfUmkecwlbpXABK +iQIgZX08DA8VfvcA5/Xj1Zjdey9FVY6POLXen6RPiabE97UCICp6eUW7ht+2jjar +e35EltCRCjoejRHTuN9TC0uCoVipAiAXaJIx/Q47vGwiw6Y8KXsNU6y54gTbOSxX +54LzHNk/+Q== +-----END RSA PRIVATE KEY-----`) var bootstrapCertData = newCertificateData( `-----BEGIN CERTIFICATE----- MIICRzCCAfGgAwIBAgIJANXr+UzRFq4TMA0GCSqGSIb3DQEBCwUAMH4xCzAJBgNV @@ -388,8 +405,8 @@ func TestRotateCertCreateCSRError(t *testing.T) { }, getTemplate: func() *x509.CertificateRequest { return &x509.CertificateRequest{} }, usages: []certificates.KeyUsage{}, - certSigningRequestClient: fakeClient{ - failureType: createError, + clientFn: func(_ *tls.Certificate) (certificatesclient.CertificateSigningRequestInterface, error) { + return fakeClient{failureType: createError}, nil }, } @@ -411,8 +428,8 @@ func TestRotateCertWaitingForResultError(t *testing.T) { }, getTemplate: func() *x509.CertificateRequest { return &x509.CertificateRequest{} }, usages: []certificates.KeyUsage{}, - certSigningRequestClient: fakeClient{ - failureType: watchError, + clientFn: func(_ *tls.Certificate) (certificatesclient.CertificateSigningRequestInterface, error) { + return fakeClient{failureType: watchError}, nil }, } @@ -598,6 +615,14 @@ func TestInitializeCertificateSigningRequestClient(t *testing.T) { expectedCertBeforeStart: storeCertData, expectedCertAfterStart: storeCertData, }, + { + description: "Current certificate expired, no bootstrap certificate", + storeCert: expiredStoreCertData, + bootstrapCert: nilCertificate, + apiCert: apiServerCertData, + expectedCertBeforeStart: nil, + expectedCertAfterStart: apiServerCertData, + }, } for _, tc := range testCases { @@ -621,19 +646,25 @@ func TestInitializeCertificateSigningRequestClient(t *testing.T) { CertificateStore: certificateStore, BootstrapCertificatePEM: tc.bootstrapCert.certificatePEM, BootstrapKeyPEM: tc.bootstrapCert.keyPEM, + ClientFn: func(_ *tls.Certificate) (certificatesclient.CertificateSigningRequestInterface, error) { + return &fakeClient{ + certificatePEM: tc.apiCert.certificatePEM, + }, nil + }, }) if err != nil { t.Errorf("Got %v, wanted no error.", err) } certificate := certificateManager.Current() - if !certificatesEqual(certificate, tc.expectedCertBeforeStart.certificate) { - t.Errorf("Got %v, wanted %v", certificateString(certificate), certificateString(tc.expectedCertBeforeStart.certificate)) - } - if err := certificateManager.SetCertificateSigningRequestClient(&fakeClient{ - certificatePEM: tc.apiCert.certificatePEM, - }); err != nil { - t.Errorf("Got error %v, expected none.", err) + if tc.expectedCertBeforeStart == nil { + if certificate != nil { + t.Errorf("Expected certificate to be nil, was %s", certificate.Leaf.NotAfter) + } + } else { + if !certificatesEqual(certificate, tc.expectedCertBeforeStart.certificate) { + t.Errorf("Got %v, wanted %v", certificateString(certificate), certificateString(tc.expectedCertBeforeStart.certificate)) + } } if m, ok := certificateManager.(*manager); !ok { @@ -649,6 +680,12 @@ func TestInitializeCertificateSigningRequestClient(t *testing.T) { } certificate = certificateManager.Current() + if tc.expectedCertAfterStart == nil { + if certificate != nil { + t.Errorf("Expected certificate to be nil, was %s", certificate.Leaf.NotAfter) + } + return + } if !certificatesEqual(certificate, tc.expectedCertAfterStart.certificate) { t.Errorf("Got %v, wanted %v", certificateString(certificate), certificateString(tc.expectedCertAfterStart.certificate)) } @@ -721,8 +758,10 @@ func TestInitializeOtherRESTClients(t *testing.T) { CertificateStore: certificateStore, BootstrapCertificatePEM: tc.bootstrapCert.certificatePEM, BootstrapKeyPEM: tc.bootstrapCert.keyPEM, - CertificateSigningRequestClient: &fakeClient{ - certificatePEM: tc.apiCert.certificatePEM, + ClientFn: func(_ *tls.Certificate) (certificatesclient.CertificateSigningRequestInterface, error) { + return &fakeClient{ + certificatePEM: tc.apiCert.certificatePEM, + }, nil }, }) if err != nil { @@ -873,10 +912,12 @@ func TestServerHealth(t *testing.T) { CertificateStore: certificateStore, BootstrapCertificatePEM: tc.bootstrapCert.certificatePEM, BootstrapKeyPEM: tc.bootstrapCert.keyPEM, - CertificateSigningRequestClient: &fakeClient{ - certificatePEM: tc.apiCert.certificatePEM, - failureType: tc.failureType, - err: tc.clientErr, + ClientFn: func(_ *tls.Certificate) (certificatesclient.CertificateSigningRequestInterface, error) { + return &fakeClient{ + certificatePEM: tc.apiCert.certificatePEM, + failureType: tc.failureType, + err: tc.clientErr, + }, nil }, }) if err != nil { diff --git a/staging/src/k8s.io/client-go/util/workqueue/BUILD b/staging/src/k8s.io/client-go/util/workqueue/BUILD index ff0e87a7c75..a298ba0710f 100644 --- a/staging/src/k8s.io/client-go/util/workqueue/BUILD +++ b/staging/src/k8s.io/client-go/util/workqueue/BUILD @@ -13,7 +13,7 @@ go_test( "delaying_queue_test.go", "metrics_test.go", "queue_test.go", - "rate_limitting_queue_test.go", + "rate_limiting_queue_test.go", ], embed = [":go_default_library"], deps = [ @@ -31,7 +31,7 @@ go_library( "metrics.go", "parallelizer.go", "queue.go", - "rate_limitting_queue.go", + "rate_limiting_queue.go", ], importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/util/workqueue", importpath = "k8s.io/client-go/util/workqueue", diff --git a/staging/src/k8s.io/client-go/util/workqueue/default_rate_limiters.go b/staging/src/k8s.io/client-go/util/workqueue/default_rate_limiters.go index a5bed29e007..95c52db8413 100644 --- a/staging/src/k8s.io/client-go/util/workqueue/default_rate_limiters.go +++ b/staging/src/k8s.io/client-go/util/workqueue/default_rate_limiters.go @@ -35,7 +35,7 @@ type RateLimiter interface { } // DefaultControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue. It has -// both overall and per-item rate limitting. The overall is a token bucket and the per-item is exponential +// both overall and per-item rate limiting. The overall is a token bucket and the per-item is exponential func DefaultControllerRateLimiter() RateLimiter { return NewMaxOfRateLimiter( NewItemExponentialFailureRateLimiter(5*time.Millisecond, 1000*time.Second), diff --git a/staging/src/k8s.io/client-go/util/workqueue/rate_limitting_queue.go b/staging/src/k8s.io/client-go/util/workqueue/rate_limiting_queue.go similarity index 100% rename from staging/src/k8s.io/client-go/util/workqueue/rate_limitting_queue.go rename to staging/src/k8s.io/client-go/util/workqueue/rate_limiting_queue.go diff --git a/staging/src/k8s.io/client-go/util/workqueue/rate_limitting_queue_test.go b/staging/src/k8s.io/client-go/util/workqueue/rate_limiting_queue_test.go similarity index 100% rename from staging/src/k8s.io/client-go/util/workqueue/rate_limitting_queue_test.go rename to staging/src/k8s.io/client-go/util/workqueue/rate_limiting_queue_test.go diff --git a/staging/src/k8s.io/cloud-provider/Godeps/Godeps.json b/staging/src/k8s.io/cloud-provider/Godeps/Godeps.json index bcaec70c09a..bafa5b124a5 100644 --- a/staging/src/k8s.io/cloud-provider/Godeps/Godeps.json +++ b/staging/src/k8s.io/cloud-provider/Godeps/Godeps.json @@ -76,7 +76,7 @@ }, { "ImportPath": "github.com/json-iterator/go", - "Rev": "f2b4162afba35581b6d4a50d3b8f34e33c144682" + "Rev": "ab8a2e0c74be9d3be70b3184d9acc634935ded82" }, { "ImportPath": "github.com/modern-go/concurrent", diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/BUILD b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/BUILD index cf386272f1a..36dcab0604a 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/BUILD @@ -13,7 +13,6 @@ go_library( "generator_for_expansion.go", "generator_for_group.go", "generator_for_type.go", - "tags.go", ], importmap = "k8s.io/kubernetes/vendor/k8s.io/code-generator/cmd/client-gen/generators", importpath = "k8s.io/code-generator/cmd/client-gen/generators", @@ -24,6 +23,7 @@ go_library( "//staging/src/k8s.io/code-generator/cmd/client-gen/generators/util:go_default_library", "//staging/src/k8s.io/code-generator/cmd/client-gen/path:go_default_library", "//staging/src/k8s.io/code-generator/cmd/client-gen/types:go_default_library", + "//staging/src/k8s.io/code-generator/pkg/namer:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", "//vendor/k8s.io/gengo/generator:go_default_library", "//vendor/k8s.io/gengo/namer:go_default_library", diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go index ee6ebbcf093..18980744f01 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go @@ -27,6 +27,7 @@ import ( "k8s.io/code-generator/cmd/client-gen/generators/util" "k8s.io/code-generator/cmd/client-gen/path" clientgentypes "k8s.io/code-generator/cmd/client-gen/types" + codegennamer "k8s.io/code-generator/pkg/namer" "k8s.io/gengo/args" "k8s.io/gengo/generator" "k8s.io/gengo/namer" @@ -101,7 +102,7 @@ func NameSystems() namer.NameSystems { "publicPlural": publicPluralNamer, "privatePlural": privatePluralNamer, "allLowercasePlural": lowercaseNamer, - "resource": NewTagOverrideNamer("resourceName", lowercaseNamer), + "resource": codegennamer.NewTagOverrideNamer("resourceName", lowercaseNamer), } } @@ -400,27 +401,3 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat return generator.Packages(packageList) } - -// tagOverrideNamer is a namer which pulls names from a given tag, if specified, -// and otherwise falls back to a different namer. -type tagOverrideNamer struct { - tagName string - fallback namer.Namer -} - -func (n *tagOverrideNamer) Name(t *types.Type) string { - if nameOverride := extractTag(n.tagName, append(t.SecondClosestCommentLines, t.CommentLines...)); nameOverride != "" { - return nameOverride - } - - return n.fallback.Name(t) -} - -// NewTagOverrideNamer creates a namer.Namer which uses the contents of the given tag as -// the name, or falls back to another Namer if the tag is not present. -func NewTagOverrideNamer(tagName string, fallback namer.Namer) namer.Namer { - return &tagOverrideNamer{ - tagName: tagName, - fallback: fallback, - } -} diff --git a/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/BUILD b/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/BUILD index 9902386d81b..1717edd83f8 100644 --- a/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/BUILD @@ -24,6 +24,7 @@ go_library( "//staging/src/k8s.io/code-generator/cmd/client-gen/generators/util:go_default_library", "//staging/src/k8s.io/code-generator/cmd/client-gen/types:go_default_library", "//staging/src/k8s.io/code-generator/cmd/informer-gen/args:go_default_library", + "//staging/src/k8s.io/code-generator/pkg/namer:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", "//vendor/k8s.io/gengo/generator:go_default_library", "//vendor/k8s.io/gengo/namer:go_default_library", diff --git a/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/generic.go b/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/generic.go index 54632de0530..5ee918f17ea 100644 --- a/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/generic.go +++ b/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/generic.go @@ -22,6 +22,7 @@ import ( "strings" clientgentypes "k8s.io/code-generator/cmd/client-gen/types" + codegennamer "k8s.io/code-generator/pkg/namer" "k8s.io/gengo/generator" "k8s.io/gengo/namer" "k8s.io/gengo/types" @@ -56,6 +57,7 @@ func (g *genericGenerator) Namers(c *generator.Context) namer.NameSystems { "raw": namer.NewRawNamer(g.outputPackage, g.imports), "allLowercasePlural": namer.NewAllLowercasePluralNamer(pluralExceptions), "publicPlural": namer.NewPublicPluralNamer(pluralExceptions), + "resource": codegennamer.NewTagOverrideNamer("resourceName", namer.NewAllLowercasePluralNamer(pluralExceptions)), } } @@ -168,7 +170,7 @@ func (f *sharedInformerFactory) ForResource(resource {{.schemaGroupVersionResour {{range $version := .Versions -}} // Group={{$group.Name}}, Version={{.Name}} {{range .Resources -}} - case {{index $.schemeGVs $version|raw}}.WithResource("{{.|allLowercasePlural}}"): + case {{index $.schemeGVs $version|raw}}.WithResource("{{.|resource}}"): return &genericInformer{resource: resource.GroupResource(), informer: f.{{$GroupGoName}}().{{$version.GoName}}().{{.|publicPlural}}().Informer()}, nil {{end}} {{end}} diff --git a/staging/src/k8s.io/code-generator/pkg/namer/BUILD b/staging/src/k8s.io/code-generator/pkg/namer/BUILD new file mode 100644 index 00000000000..cbed26a69b6 --- /dev/null +++ b/staging/src/k8s.io/code-generator/pkg/namer/BUILD @@ -0,0 +1,27 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["tag-override.go"], + importmap = "k8s.io/kubernetes/vendor/k8s.io/code-generator/pkg/namer", + importpath = "k8s.io/code-generator/pkg/namer", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/gengo/namer:go_default_library", + "//vendor/k8s.io/gengo/types:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/tags.go b/staging/src/k8s.io/code-generator/pkg/namer/tag-override.go similarity index 50% rename from staging/src/k8s.io/code-generator/cmd/client-gen/generators/tags.go rename to staging/src/k8s.io/code-generator/pkg/namer/tag-override.go index b0040810368..fd8c3a8553c 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/tags.go +++ b/staging/src/k8s.io/code-generator/pkg/namer/tag-override.go @@ -14,12 +14,38 @@ See the License for the specific language governing permissions and limitations under the License. */ -package generators +package namer import ( + "k8s.io/gengo/namer" "k8s.io/gengo/types" ) +// TagOverrideNamer is a namer which pulls names from a given tag, if specified, +// and otherwise falls back to a different namer. +type TagOverrideNamer struct { + tagName string + fallback namer.Namer +} + +// Name returns the tag value if it exists. It no tag was found the fallback namer will be used +func (n *TagOverrideNamer) Name(t *types.Type) string { + if nameOverride := extractTag(n.tagName, append(t.SecondClosestCommentLines, t.CommentLines...)); nameOverride != "" { + return nameOverride + } + + return n.fallback.Name(t) +} + +// NewTagOverrideNamer creates a namer.Namer which uses the contents of the given tag as +// the name, or falls back to another Namer if the tag is not present. +func NewTagOverrideNamer(tagName string, fallback namer.Namer) namer.Namer { + return &TagOverrideNamer{ + tagName: tagName, + fallback: fallback, + } +} + // extractTag gets the comment-tags for the key. If the tag did not exist, it // returns the empty string. func extractTag(key string, lines []string) string { diff --git a/staging/src/k8s.io/csi-api/Godeps/Godeps.json b/staging/src/k8s.io/csi-api/Godeps/Godeps.json index 6949a7fa26c..27d670bb57c 100644 --- a/staging/src/k8s.io/csi-api/Godeps/Godeps.json +++ b/staging/src/k8s.io/csi-api/Godeps/Godeps.json @@ -80,7 +80,7 @@ }, { "ImportPath": "github.com/json-iterator/go", - "Rev": "f2b4162afba35581b6d4a50d3b8f34e33c144682" + "Rev": "ab8a2e0c74be9d3be70b3184d9acc634935ded82" }, { "ImportPath": "github.com/modern-go/concurrent", diff --git a/staging/src/k8s.io/csi-api/SECURITY_CONTACTS b/staging/src/k8s.io/csi-api/SECURITY_CONTACTS index b51da450fc0..585f480fee6 100644 --- a/staging/src/k8s.io/csi-api/SECURITY_CONTACTS +++ b/staging/src/k8s.io/csi-api/SECURITY_CONTACTS @@ -10,4 +10,4 @@ # DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE # INSTRUCTIONS AT https://kubernetes.io/security/ -saadali \ No newline at end of file +saad-ali diff --git a/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json b/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json index a617489e581..fc1ccad901e 100644 --- a/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json +++ b/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json @@ -216,7 +216,7 @@ }, { "ImportPath": "github.com/json-iterator/go", - "Rev": "f2b4162afba35581b6d4a50d3b8f34e33c144682" + "Rev": "ab8a2e0c74be9d3be70b3184d9acc634935ded82" }, { "ImportPath": "github.com/mailru/easyjson/buffer", diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/helpers.go b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/helpers.go index ab1f40cdc1f..488ce21330e 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/helpers.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/helpers.go @@ -98,16 +98,20 @@ func NewLocalAvailableAPIServiceCondition() APIServiceCondition { } } +// GetAPIServiceConditionByType gets an *APIServiceCondition by APIServiceConditionType if present +func GetAPIServiceConditionByType(apiService *APIService, conditionType APIServiceConditionType) *APIServiceCondition { + for i := range apiService.Status.Conditions { + if apiService.Status.Conditions[i].Type == conditionType { + return &apiService.Status.Conditions[i] + } + } + return nil +} + // SetAPIServiceCondition sets the status condition. It either overwrites the existing one or // creates a new one func SetAPIServiceCondition(apiService *APIService, newCondition APIServiceCondition) { - var existingCondition *APIServiceCondition - for i := range apiService.Status.Conditions { - if apiService.Status.Conditions[i].Type == newCondition.Type { - existingCondition = &apiService.Status.Conditions[i] - break - } - } + existingCondition := GetAPIServiceConditionByType(apiService, newCondition.Type) if existingCondition == nil { apiService.Status.Conditions = append(apiService.Status.Conditions, newCondition) return @@ -124,10 +128,6 @@ func SetAPIServiceCondition(apiService *APIService, newCondition APIServiceCondi // IsAPIServiceConditionTrue indicates if the condition is present and strictly true func IsAPIServiceConditionTrue(apiService *APIService, conditionType APIServiceConditionType) bool { - for _, condition := range apiService.Status.Conditions { - if condition.Type == conditionType && condition.Status == ConditionTrue { - return true - } - } - return false + condition := GetAPIServiceConditionByType(apiService, conditionType) + return condition != nil && condition.Status == ConditionTrue } diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/helpers_test.go b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/helpers_test.go index 7bca228bdc0..59b0ed61dee 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/helpers_test.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/helpers_test.go @@ -21,6 +21,124 @@ import ( "testing" ) +var ( + a APIServiceConditionType = "A" + b APIServiceConditionType = "B" + c APIServiceConditionType = "C" +) + +func TestGetAPIServiceConditionByType(t *testing.T) { + conditionA := makeNewAPIServiceCondition(a, "a reason", "a message", ConditionTrue) + conditionB := makeNewAPIServiceCondition(b, "b reason", "b message", ConditionTrue) + tests := []*struct { + name string + apiService *APIService + conditionType APIServiceConditionType + expectedCondition *APIServiceCondition + }{ + { + name: "Should find a matching condition from apiService", + apiService: makeNewApiService("v1", 100, conditionA, conditionB), + conditionType: a, + expectedCondition: &conditionA, + }, + { + name: "Should not find a matching condition", + apiService: makeNewApiService("v1", 100, conditionA), + conditionType: b, + expectedCondition: nil, + }, + } + + for _, tc := range tests { + actual := GetAPIServiceConditionByType(tc.apiService, tc.conditionType) + if !reflect.DeepEqual(tc.expectedCondition, actual) { + t.Errorf("expected %s, actual %s", tc.expectedCondition, actual) + } + } +} + +func TestIsAPIServiceConditionTrue(t *testing.T) { + conditionATrue := makeNewAPIServiceCondition(a, "a reason", "a message", ConditionTrue) + conditionAFalse := makeNewAPIServiceCondition(a, "a reason", "a message", ConditionFalse) + tests := []*struct { + name string + apiService *APIService + conditionType APIServiceConditionType + expected bool + }{ + { + name: "Should return false when condition of type is not present", + apiService: makeNewApiService("v1", 100), + conditionType: a, + expected: false, + }, + { + name: "Should return false when condition of type is present but status is not ConditionTrue", + apiService: makeNewApiService("v1", 100, conditionAFalse), + conditionType: a, + expected: false, + }, + { + name: "Should return false when condition of type is present but status is not ConditionTrue", + apiService: makeNewApiService("v1", 100, conditionATrue), + conditionType: a, + expected: true, + }, + } + + for _, tc := range tests { + if isConditionTrue := IsAPIServiceConditionTrue(tc.apiService, tc.conditionType); isConditionTrue != tc.expected { + t.Errorf("expected condition of type %v to be %v, actually was %v", + tc.conditionType, isConditionTrue, tc.expected) + + } + } +} + +func TestSetAPIServiceCondition(t *testing.T) { + conditionA1 := makeNewAPIServiceCondition(a, "a1 reason", "a1 message", ConditionTrue) + conditionA2 := makeNewAPIServiceCondition(a, "a2 reason", "a2 message", ConditionTrue) + tests := []*struct { + name string + apiService *APIService + conditionType APIServiceConditionType + initialCondition *APIServiceCondition + setCondition APIServiceCondition + expectedCondition *APIServiceCondition + }{ + { + name: "Should set a new condition with type where previously there was no condition of that type", + apiService: makeNewApiService("v1", 100), + conditionType: a, + initialCondition: nil, + setCondition: conditionA1, + expectedCondition: &conditionA1, + }, + { + name: "Should override a condition of type, when a condition of that type existed previously", + apiService: makeNewApiService("v1", 100, conditionA1), + conditionType: a, + initialCondition: &conditionA1, + setCondition: conditionA2, + expectedCondition: &conditionA2, + }, + } + + for _, tc := range tests { + startingCondition := GetAPIServiceConditionByType(tc.apiService, tc.conditionType) + if !reflect.DeepEqual(startingCondition, tc.initialCondition) { + t.Errorf("expected to find condition %s initially, actual was %s", tc.initialCondition, startingCondition) + + } + SetAPIServiceCondition(tc.apiService, tc.setCondition) + actual := GetAPIServiceConditionByType(tc.apiService, tc.setCondition.Type) + if !reflect.DeepEqual(actual, tc.expectedCondition) { + t.Errorf("expected %s, actual %s", tc.expectedCondition, actual) + } + } +} + func TestSortedAPIServicesByVersion(t *testing.T) { tests := []*struct { name string @@ -55,12 +173,12 @@ func TestSortedAPIServicesByVersion(t *testing.T) { } for _, tc := range tests { - apiServices := []*APIService{} + apiServices := make([]*APIService, 0) for _, v := range tc.versions { - apiServices = append(apiServices, &APIService{Spec: APIServiceSpec{Version: v, VersionPriority: 100}}) + apiServices = append(apiServices, makeNewApiService(v, 100)) } sortedServices := SortedByGroupAndVersion(apiServices) - actual := []string{} + actual := make([]string, 0) for _, s := range sortedServices[0] { actual = append(actual, s.Spec.Version) } @@ -69,3 +187,12 @@ func TestSortedAPIServicesByVersion(t *testing.T) { } } } + +func makeNewApiService(version string, priority int32, conditions ...APIServiceCondition) *APIService { + status := APIServiceStatus{Conditions: conditions} + return &APIService{Spec: APIServiceSpec{Version: version, VersionPriority: priority}, Status: status} +} + +func makeNewAPIServiceCondition(conditionType APIServiceConditionType, reason string, message string, status ConditionStatus) APIServiceCondition { + return APIServiceCondition{Type: conditionType, Reason: reason, Message: message, Status: status} +} diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apiserver/BUILD b/staging/src/k8s.io/kube-aggregator/pkg/apiserver/BUILD index 69af8089830..aa5763bd528 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apiserver/BUILD +++ b/staging/src/k8s.io/kube-aggregator/pkg/apiserver/BUILD @@ -55,6 +55,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/endpoints/metrics:go_default_library", "//staging/src/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//staging/src/k8s.io/apiserver/pkg/features:go_default_library", "//staging/src/k8s.io/apiserver/pkg/server:go_default_library", diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apiserver/handler_proxy.go b/staging/src/k8s.io/kube-aggregator/pkg/apiserver/handler_proxy.go index e8976f07eff..e6e30bbd44b 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apiserver/handler_proxy.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/apiserver/handler_proxy.go @@ -30,6 +30,7 @@ import ( utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/proxy" "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" + endpointmetrics "k8s.io/apiserver/pkg/endpoints/metrics" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" genericfeatures "k8s.io/apiserver/pkg/features" utilfeature "k8s.io/apiserver/pkg/util/feature" @@ -38,6 +39,8 @@ import ( apiregistrationapi "k8s.io/kube-aggregator/pkg/apis/apiregistration" ) +const aggregatorComponent string = "aggregator" + // proxyHandler provides a http.Handler which will proxy traffic to locations // specified by items implementing Redirector. type proxyHandler struct { @@ -60,6 +63,8 @@ type proxyHandlingInfo struct { // local indicates that this APIService is locally satisfied local bool + // name is the name of the APIService + name string // restConfig holds the information for building a roundtripper restConfig *restclient.Config // transportBuildingError is an error produced while building the transport. If this @@ -75,6 +80,19 @@ type proxyHandlingInfo struct { serviceAvailable bool } +func proxyError(w http.ResponseWriter, req *http.Request, error string, code int) { + http.Error(w, error, code) + + ctx := req.Context() + info, ok := genericapirequest.RequestInfoFrom(ctx) + if !ok { + klog.Warning("no RequestInfo found in the context") + return + } + // TODO: record long-running request differently? The long-running check func does not necessarily match the one of the aggregated apiserver + endpointmetrics.Record(req, info, aggregatorComponent, "", code, 0, 0) +} + func (r *proxyHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { value := r.handlingInfo.Load() if value == nil { @@ -92,18 +110,18 @@ func (r *proxyHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { } if !handlingInfo.serviceAvailable { - http.Error(w, "service unavailable", http.StatusServiceUnavailable) + proxyError(w, req, "service unavailable", http.StatusServiceUnavailable) return } if handlingInfo.transportBuildingError != nil { - http.Error(w, handlingInfo.transportBuildingError.Error(), http.StatusInternalServerError) + proxyError(w, req, handlingInfo.transportBuildingError.Error(), http.StatusInternalServerError) return } user, ok := genericapirequest.UserFrom(req.Context()) if !ok { - http.Error(w, "missing user", http.StatusInternalServerError) + proxyError(w, req, "missing user", http.StatusInternalServerError) return } @@ -113,7 +131,7 @@ func (r *proxyHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { rloc, err := r.serviceResolver.ResolveEndpoint(handlingInfo.serviceNamespace, handlingInfo.serviceName) if err != nil { klog.Errorf("error resolving %s/%s: %v", handlingInfo.serviceNamespace, handlingInfo.serviceName, err) - http.Error(w, "service unavailable", http.StatusServiceUnavailable) + proxyError(w, req, "service unavailable", http.StatusServiceUnavailable) return } location.Host = rloc.Host @@ -126,14 +144,14 @@ func (r *proxyHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { newReq.URL = location if handlingInfo.proxyRoundTripper == nil { - http.Error(w, "", http.StatusNotFound) + proxyError(w, req, "", http.StatusNotFound) return } // we need to wrap the roundtripper in another roundtripper which will apply the front proxy headers proxyRoundTripper, upgrade, err := maybeWrapForConnectionUpgrades(handlingInfo.restConfig, handlingInfo.proxyRoundTripper, req) if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) + proxyError(w, req, err.Error(), http.StatusInternalServerError) return } proxyRoundTripper = transport.NewAuthProxyRoundTripper(user.GetName(), user.GetGroups(), user.GetExtra(), proxyRoundTripper) @@ -195,6 +213,7 @@ func (r *proxyHandler) updateAPIService(apiService *apiregistrationapi.APIServic } newInfo := proxyHandlingInfo{ + name: apiService.Name, restConfig: &restclient.Config{ TLSClientConfig: restclient.TLSClientConfig{ Insecure: apiService.Spec.InsecureSkipTLSVerify, diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/BUILD b/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/BUILD index 3810051bc50..1026e9bd86a 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/BUILD +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/BUILD @@ -8,11 +8,15 @@ load( go_library( name = "go_default_library", - srcs = ["available_controller.go"], + srcs = [ + "available_controller.go", + "metrics.go", + ], importmap = "k8s.io/kubernetes/vendor/k8s.io/kube-aggregator/pkg/controllers/status", importpath = "k8s.io/kube-aggregator/pkg/controllers/status", deps = [ "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -29,6 +33,7 @@ go_library( "//staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/internalversion:go_default_library", "//staging/src/k8s.io/kube-aggregator/pkg/client/listers/apiregistration/internalversion:go_default_library", "//staging/src/k8s.io/kube-aggregator/pkg/controllers:go_default_library", + "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], ) @@ -46,6 +51,7 @@ go_test( "//staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration:go_default_library", "//staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/fake:go_default_library", "//staging/src/k8s.io/kube-aggregator/pkg/client/listers/apiregistration/internalversion:go_default_library", + "//vendor/github.com/davecgh/go-spew/spew:go_default_library", ], ) diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go b/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go index 66a4d0f2f75..364e00ae9fb 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go @@ -26,6 +26,7 @@ import ( "k8s.io/klog" "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -140,7 +141,7 @@ func NewAvailableConditionController( } func (c *AvailableConditionController) sync(key string) error { - inAPIService, err := c.apiServiceLister.Get(key) + originalAPIService, err := c.apiServiceLister.Get(key) if apierrors.IsNotFound(err) { return nil } @@ -148,7 +149,7 @@ func (c *AvailableConditionController) sync(key string) error { return err } - apiService := inAPIService.DeepCopy() + apiService := originalAPIService.DeepCopy() availableCondition := apiregistration.APIServiceCondition{ Type: apiregistration.Available, @@ -159,7 +160,7 @@ func (c *AvailableConditionController) sync(key string) error { // local API services are always considered available if apiService.Spec.Service == nil { apiregistration.SetAPIServiceCondition(apiService, apiregistration.NewLocalAvailableAPIServiceCondition()) - _, err := c.apiServiceClient.APIServices().UpdateStatus(apiService) + _, err := updateAPIServiceStatus(c.apiServiceClient, originalAPIService, apiService) return err } @@ -169,14 +170,14 @@ func (c *AvailableConditionController) sync(key string) error { availableCondition.Reason = "ServiceNotFound" availableCondition.Message = fmt.Sprintf("service/%s in %q is not present", apiService.Spec.Service.Name, apiService.Spec.Service.Namespace) apiregistration.SetAPIServiceCondition(apiService, availableCondition) - _, err := c.apiServiceClient.APIServices().UpdateStatus(apiService) + _, err := updateAPIServiceStatus(c.apiServiceClient, originalAPIService, apiService) return err } else if err != nil { availableCondition.Status = apiregistration.ConditionUnknown availableCondition.Reason = "ServiceAccessError" availableCondition.Message = fmt.Sprintf("service/%s in %q cannot be checked due to: %v", apiService.Spec.Service.Name, apiService.Spec.Service.Namespace, err) apiregistration.SetAPIServiceCondition(apiService, availableCondition) - _, err := c.apiServiceClient.APIServices().UpdateStatus(apiService) + _, err := updateAPIServiceStatus(c.apiServiceClient, originalAPIService, apiService) return err } @@ -193,7 +194,7 @@ func (c *AvailableConditionController) sync(key string) error { availableCondition.Reason = "ServicePortError" availableCondition.Message = fmt.Sprintf("service/%s in %q is not listening on port 443", apiService.Spec.Service.Name, apiService.Spec.Service.Namespace) apiregistration.SetAPIServiceCondition(apiService, availableCondition) - _, err := c.apiServiceClient.APIServices().UpdateStatus(apiService) + _, err := updateAPIServiceStatus(c.apiServiceClient, originalAPIService, apiService) return err } @@ -203,14 +204,14 @@ func (c *AvailableConditionController) sync(key string) error { availableCondition.Reason = "EndpointsNotFound" availableCondition.Message = fmt.Sprintf("cannot find endpoints for service/%s in %q", apiService.Spec.Service.Name, apiService.Spec.Service.Namespace) apiregistration.SetAPIServiceCondition(apiService, availableCondition) - _, err := c.apiServiceClient.APIServices().UpdateStatus(apiService) + _, err := updateAPIServiceStatus(c.apiServiceClient, originalAPIService, apiService) return err } else if err != nil { availableCondition.Status = apiregistration.ConditionUnknown availableCondition.Reason = "EndpointsAccessError" availableCondition.Message = fmt.Sprintf("service/%s in %q cannot be checked due to: %v", apiService.Spec.Service.Name, apiService.Spec.Service.Namespace, err) apiregistration.SetAPIServiceCondition(apiService, availableCondition) - _, err := c.apiServiceClient.APIServices().UpdateStatus(apiService) + _, err := updateAPIServiceStatus(c.apiServiceClient, originalAPIService, apiService) return err } hasActiveEndpoints := false @@ -225,7 +226,7 @@ func (c *AvailableConditionController) sync(key string) error { availableCondition.Reason = "MissingEndpoints" availableCondition.Message = fmt.Sprintf("endpoints for service/%s in %q have no addresses", apiService.Spec.Service.Name, apiService.Spec.Service.Namespace) apiregistration.SetAPIServiceCondition(apiService, availableCondition) - _, err := c.apiServiceClient.APIServices().UpdateStatus(apiService) + _, err := updateAPIServiceStatus(c.apiServiceClient, originalAPIService, apiService) return err } } @@ -259,7 +260,7 @@ func (c *AvailableConditionController) sync(key string) error { availableCondition.Reason = "FailedDiscoveryCheck" availableCondition.Message = fmt.Sprintf("no response from %v: %v", discoveryURL, err) apiregistration.SetAPIServiceCondition(apiService, availableCondition) - _, updateErr := c.apiServiceClient.APIServices().UpdateStatus(apiService) + _, updateErr := updateAPIServiceStatus(c.apiServiceClient, originalAPIService, apiService) if updateErr != nil { return updateErr } @@ -272,10 +273,42 @@ func (c *AvailableConditionController) sync(key string) error { availableCondition.Reason = "Passed" availableCondition.Message = "all checks passed" apiregistration.SetAPIServiceCondition(apiService, availableCondition) - _, err = c.apiServiceClient.APIServices().UpdateStatus(apiService) + _, err = updateAPIServiceStatus(c.apiServiceClient, originalAPIService, apiService) return err } +// updateAPIServiceStatus only issues an update if a change is detected. We have a tight resync loop to quickly detect dead +// apiservices. Doing that means we don't want to quickly issue no-op updates. +func updateAPIServiceStatus(client apiregistrationclient.APIServicesGetter, originalAPIService, newAPIService *apiregistration.APIService) (*apiregistration.APIService, error) { + if equality.Semantic.DeepEqual(originalAPIService.Status, newAPIService.Status) { + return newAPIService, nil + } + + newAPIService, err := client.APIServices().UpdateStatus(newAPIService) + if err != nil { + return nil, err + } + + // update metrics + wasAvailable := apiregistration.IsAPIServiceConditionTrue(originalAPIService, apiregistration.Available) + isAvailable := apiregistration.IsAPIServiceConditionTrue(newAPIService, apiregistration.Available) + if isAvailable != wasAvailable { + if isAvailable { + unavailableGauge.WithLabelValues(newAPIService.Name).Set(0.0) + } else { + unavailableGauge.WithLabelValues(newAPIService.Name).Set(1.0) + + reason := "UnknownReason" + if newCondition := apiregistration.GetAPIServiceConditionByType(newAPIService, apiregistration.Available); newCondition != nil { + reason = newCondition.Reason + } + unavailableCounter.WithLabelValues(newAPIService.Name, reason).Inc() + } + } + + return newAPIService, nil +} + func (c *AvailableConditionController) Run(threadiness int, stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer c.queue.ShutDown() diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller_test.go b/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller_test.go index d82a0a0bd39..5feec0e183e 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller_test.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller_test.go @@ -19,6 +19,8 @@ package apiserver import ( "testing" + "github.com/davecgh/go-spew/spew" + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1listers "k8s.io/client-go/listers/core/v1" @@ -226,5 +228,26 @@ func TestSync(t *testing.T) { if e, a := tc.expectedAvailability.Message, condition.Message; e != a { t.Errorf("%v expected %v, got %#v", tc.name, e, condition) } + if condition.LastTransitionTime.IsZero() { + t.Error("expected lastTransitionTime to be non-zero") + } } } + +func TestUpdateAPIServiceStatus(t *testing.T) { + foo := &apiregistration.APIService{Status: apiregistration.APIServiceStatus{Conditions: []apiregistration.APIServiceCondition{{Type: "foo"}}}} + bar := &apiregistration.APIService{Status: apiregistration.APIServiceStatus{Conditions: []apiregistration.APIServiceCondition{{Type: "bar"}}}} + + fakeClient := fake.NewSimpleClientset() + updateAPIServiceStatus(fakeClient.Apiregistration(), foo, foo) + if e, a := 0, len(fakeClient.Actions()); e != a { + t.Error(spew.Sdump(fakeClient.Actions())) + } + + fakeClient.ClearActions() + updateAPIServiceStatus(fakeClient.Apiregistration(), foo, bar) + if e, a := 1, len(fakeClient.Actions()); e != a { + t.Error(spew.Sdump(fakeClient.Actions())) + } + +} diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/metrics.go b/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/metrics.go new file mode 100644 index 00000000000..c36527b1076 --- /dev/null +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/metrics.go @@ -0,0 +1,43 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiserver + +import ( + "github.com/prometheus/client_golang/prometheus" +) + +var ( + unavailableCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "aggregator_unavailable_apiservice_count", + Help: "Counter of APIServices which are marked as unavailable broken down by APIService name and reason.", + }, + []string{"name", "reason"}, + ) + unavailableGauge = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "aggregator_unavailable_apiserver_gauge", + Help: "Gauge of APIServices which are marked as unavailable broken down by APIService name.", + }, + []string{"name"}, + ) +) + +func init() { + prometheus.MustRegister(unavailableCounter) + prometheus.MustRegister(unavailableGauge) +} diff --git a/staging/src/k8s.io/kube-controller-manager/Godeps/Godeps.json b/staging/src/k8s.io/kube-controller-manager/Godeps/Godeps.json index 8927874f37b..3df12ff4c03 100644 --- a/staging/src/k8s.io/kube-controller-manager/Godeps/Godeps.json +++ b/staging/src/k8s.io/kube-controller-manager/Godeps/Godeps.json @@ -160,7 +160,7 @@ }, { "ImportPath": "k8s.io/utils/pointer", - "Rev": "66066c83e385e385ccc3c964b44fd7dcd413d0ed" + "Rev": "8e7ff06bf0e2d3289061230af203e430a15b6dcc" } ] } diff --git a/staging/src/k8s.io/kube-scheduler/Godeps/Godeps.json b/staging/src/k8s.io/kube-scheduler/Godeps/Godeps.json index 99bbabbf4af..422e30596b0 100644 --- a/staging/src/k8s.io/kube-scheduler/Godeps/Godeps.json +++ b/staging/src/k8s.io/kube-scheduler/Godeps/Godeps.json @@ -160,7 +160,7 @@ }, { "ImportPath": "k8s.io/utils/pointer", - "Rev": "66066c83e385e385ccc3c964b44fd7dcd413d0ed" + "Rev": "8e7ff06bf0e2d3289061230af203e430a15b6dcc" } ] } diff --git a/staging/src/k8s.io/metrics/Godeps/Godeps.json b/staging/src/k8s.io/metrics/Godeps/Godeps.json index 668ad9186ba..8bdf3c82a38 100644 --- a/staging/src/k8s.io/metrics/Godeps/Godeps.json +++ b/staging/src/k8s.io/metrics/Godeps/Godeps.json @@ -72,7 +72,7 @@ }, { "ImportPath": "github.com/json-iterator/go", - "Rev": "f2b4162afba35581b6d4a50d3b8f34e33c144682" + "Rev": "ab8a2e0c74be9d3be70b3184d9acc634935ded82" }, { "ImportPath": "github.com/modern-go/concurrent", diff --git a/staging/src/k8s.io/sample-apiserver/BUILD b/staging/src/k8s.io/sample-apiserver/BUILD index c2dcbc96442..cf5033eb6ca 100644 --- a/staging/src/k8s.io/sample-apiserver/BUILD +++ b/staging/src/k8s.io/sample-apiserver/BUILD @@ -39,11 +39,8 @@ filegroup( "//staging/src/k8s.io/sample-apiserver/pkg/admission/wardleinitializer:all-srcs", "//staging/src/k8s.io/sample-apiserver/pkg/apis/wardle:all-srcs", "//staging/src/k8s.io/sample-apiserver/pkg/apiserver:all-srcs", - "//staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion:all-srcs", "//staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned:all-srcs", "//staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions:all-srcs", - "//staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion:all-srcs", - "//staging/src/k8s.io/sample-apiserver/pkg/client/listers/wardle/internalversion:all-srcs", "//staging/src/k8s.io/sample-apiserver/pkg/client/listers/wardle/v1alpha1:all-srcs", "//staging/src/k8s.io/sample-apiserver/pkg/client/listers/wardle/v1beta1:all-srcs", "//staging/src/k8s.io/sample-apiserver/pkg/cmd/server:all-srcs", diff --git a/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json b/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json index 50a43f04b7e..4d0fcbdcb71 100644 --- a/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json @@ -208,7 +208,7 @@ }, { "ImportPath": "github.com/json-iterator/go", - "Rev": "f2b4162afba35581b6d4a50d3b8f34e33c144682" + "Rev": "ab8a2e0c74be9d3be70b3184d9acc634935ded82" }, { "ImportPath": "github.com/mailru/easyjson/buffer", diff --git a/staging/src/k8s.io/sample-apiserver/artifacts/example/rc.yaml b/staging/src/k8s.io/sample-apiserver/artifacts/example/rc.yaml index 9513a849484..3852a2273f7 100644 --- a/staging/src/k8s.io/sample-apiserver/artifacts/example/rc.yaml +++ b/staging/src/k8s.io/sample-apiserver/artifacts/example/rc.yaml @@ -21,4 +21,4 @@ spec: imagePullPolicy: Never command: [ "/kube-sample-apiserver", "--etcd-servers=http://localhost:2379" ] - name: etcd - image: quay.io/coreos/etcd:v3.2.24 + image: quay.io/coreos/etcd:v3.3.10 diff --git a/staging/src/k8s.io/sample-apiserver/hack/update-codegen.sh b/staging/src/k8s.io/sample-apiserver/hack/update-codegen.sh index 9dcc8f10242..98c9ae9e1aa 100755 --- a/staging/src/k8s.io/sample-apiserver/hack/update-codegen.sh +++ b/staging/src/k8s.io/sample-apiserver/hack/update-codegen.sh @@ -25,7 +25,13 @@ CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${SCRIPT_ROOT}; ls -d -1 ./vendor/k8s.io/code-ge # --output-base because this script should also be able to run inside the vendor dir of # k8s.io/kubernetes. The output-base is needed for the generators to output into the vendor dir # instead of the $GOPATH directly. For normal projects this can be dropped. -${CODEGEN_PKG}/generate-internal-groups.sh all \ +${CODEGEN_PKG}/generate-groups.sh all \ + k8s.io/sample-apiserver/pkg/client k8s.io/sample-apiserver/pkg/apis \ + "wardle:v1alpha1,v1beta1" \ + --output-base "$(dirname ${BASH_SOURCE})/../../.." \ + --go-header-file ${SCRIPT_ROOT}/hack/boilerplate.go.txt + +${CODEGEN_PKG}/generate-internal-groups.sh "deepcopy,defaulter,conversion" \ k8s.io/sample-apiserver/pkg/client k8s.io/sample-apiserver/pkg/apis k8s.io/sample-apiserver/pkg/apis \ "wardle:v1alpha1,v1beta1" \ --output-base "$(dirname ${BASH_SOURCE})/../../.." \ diff --git a/staging/src/k8s.io/sample-apiserver/pkg/admission/plugin/banflunder/BUILD b/staging/src/k8s.io/sample-apiserver/pkg/admission/plugin/banflunder/BUILD index 56a4f47e473..6a4c5dfea82 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/admission/plugin/banflunder/BUILD +++ b/staging/src/k8s.io/sample-apiserver/pkg/admission/plugin/banflunder/BUILD @@ -18,8 +18,8 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", "//staging/src/k8s.io/sample-apiserver/pkg/admission/wardleinitializer:go_default_library", "//staging/src/k8s.io/sample-apiserver/pkg/apis/wardle:go_default_library", - "//staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion:go_default_library", - "//staging/src/k8s.io/sample-apiserver/pkg/client/listers/wardle/internalversion:go_default_library", + "//staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions:go_default_library", + "//staging/src/k8s.io/sample-apiserver/pkg/client/listers/wardle/v1alpha1:go_default_library", ], ) @@ -34,9 +34,9 @@ go_test( "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", "//staging/src/k8s.io/client-go/testing:go_default_library", "//staging/src/k8s.io/sample-apiserver/pkg/admission/wardleinitializer:go_default_library", - "//staging/src/k8s.io/sample-apiserver/pkg/apis/wardle:go_default_library", - "//staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/fake:go_default_library", - "//staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion:go_default_library", + "//staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1:go_default_library", + "//staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/fake:go_default_library", + "//staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions:go_default_library", ], ) diff --git a/staging/src/k8s.io/sample-apiserver/pkg/admission/plugin/banflunder/admission.go b/staging/src/k8s.io/sample-apiserver/pkg/admission/plugin/banflunder/admission.go index 4acfe6448ba..70d618614a9 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/admission/plugin/banflunder/admission.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/admission/plugin/banflunder/admission.go @@ -26,8 +26,8 @@ import ( "k8s.io/apiserver/pkg/admission" "k8s.io/sample-apiserver/pkg/admission/wardleinitializer" "k8s.io/sample-apiserver/pkg/apis/wardle" - informers "k8s.io/sample-apiserver/pkg/client/informers/internalversion" - listers "k8s.io/sample-apiserver/pkg/client/listers/wardle/internalversion" + informers "k8s.io/sample-apiserver/pkg/client/informers/externalversions" + listers "k8s.io/sample-apiserver/pkg/client/listers/wardle/v1alpha1" ) // Register registers a plugin @@ -85,11 +85,11 @@ func (d *DisallowFlunder) Admit(a admission.Attributes) error { // SetInternalWardleInformerFactory gets Lister from SharedInformerFactory. // The lister knows how to lists Fischers. func (d *DisallowFlunder) SetInternalWardleInformerFactory(f informers.SharedInformerFactory) { - d.lister = f.Wardle().InternalVersion().Fischers().Lister() - d.SetReadyFunc(f.Wardle().InternalVersion().Fischers().Informer().HasSynced) + d.lister = f.Wardle().V1alpha1().Fischers().Lister() + d.SetReadyFunc(f.Wardle().V1alpha1().Fischers().Informer().HasSynced) } -// ValidaValidateInitializationte checks whether the plugin was correctly initialized. +// ValidateInitialization checks whether the plugin was correctly initialized. func (d *DisallowFlunder) ValidateInitialization() error { if d.lister == nil { return fmt.Errorf("missing fischer lister") diff --git a/staging/src/k8s.io/sample-apiserver/pkg/admission/plugin/banflunder/admission_test.go b/staging/src/k8s.io/sample-apiserver/pkg/admission/plugin/banflunder/admission_test.go index 9023d82c281..b60ae0dbae8 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/admission/plugin/banflunder/admission_test.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/admission/plugin/banflunder/admission_test.go @@ -27,9 +27,9 @@ import ( clienttesting "k8s.io/client-go/testing" "k8s.io/sample-apiserver/pkg/admission/plugin/banflunder" "k8s.io/sample-apiserver/pkg/admission/wardleinitializer" - "k8s.io/sample-apiserver/pkg/apis/wardle" - "k8s.io/sample-apiserver/pkg/client/clientset/internalversion/fake" - informers "k8s.io/sample-apiserver/pkg/client/informers/internalversion" + wardle "k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1" + "k8s.io/sample-apiserver/pkg/client/clientset/versioned/fake" + informers "k8s.io/sample-apiserver/pkg/client/informers/externalversions" ) // TestBanfluderAdmissionPlugin tests various test cases against @@ -56,7 +56,7 @@ func TestBanflunderAdmissionPlugin(t *testing.T) { Namespace: "", }, }, - admissionInputKind: wardle.Kind("Flunder").WithVersion("version"), + admissionInputKind: wardle.SchemeGroupVersion.WithKind("Flunder").GroupKind().WithVersion("version"), admissionInputResource: wardle.Resource("flunders").WithVersion("version"), admissionMustFail: true, }, @@ -74,7 +74,7 @@ func TestBanflunderAdmissionPlugin(t *testing.T) { Namespace: "", }, }, - admissionInputKind: wardle.Kind("Flunder").WithVersion("version"), + admissionInputKind: wardle.SchemeGroupVersion.WithKind("Flunder").GroupKind().WithVersion("version"), admissionInputResource: wardle.Resource("flunders").WithVersion("version"), admissionMustFail: false, }, @@ -93,7 +93,7 @@ func TestBanflunderAdmissionPlugin(t *testing.T) { Namespace: "", }, }, - admissionInputKind: wardle.Kind("NotFlunder").WithVersion("version"), + admissionInputKind: wardle.SchemeGroupVersion.WithKind("NotFlunder").GroupKind().WithVersion("version"), admissionInputResource: wardle.Resource("notflunders").WithVersion("version"), admissionMustFail: false, }, diff --git a/staging/src/k8s.io/sample-apiserver/pkg/admission/wardleinitializer/BUILD b/staging/src/k8s.io/sample-apiserver/pkg/admission/wardleinitializer/BUILD index 11bb979dc6b..0d9f62e676a 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/admission/wardleinitializer/BUILD +++ b/staging/src/k8s.io/sample-apiserver/pkg/admission/wardleinitializer/BUILD @@ -16,7 +16,7 @@ go_library( importpath = "k8s.io/sample-apiserver/pkg/admission/wardleinitializer", deps = [ "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", - "//staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion:go_default_library", + "//staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions:go_default_library", ], ) @@ -26,8 +26,8 @@ go_test( embed = [":go_default_library"], deps = [ "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", - "//staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/fake:go_default_library", - "//staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion:go_default_library", + "//staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/fake:go_default_library", + "//staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions:go_default_library", ], ) diff --git a/staging/src/k8s.io/sample-apiserver/pkg/admission/wardleinitializer/interfaces.go b/staging/src/k8s.io/sample-apiserver/pkg/admission/wardleinitializer/interfaces.go index f5c6e41579b..248cd045d82 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/admission/wardleinitializer/interfaces.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/admission/wardleinitializer/interfaces.go @@ -18,7 +18,7 @@ package wardleinitializer import ( "k8s.io/apiserver/pkg/admission" - informers "k8s.io/sample-apiserver/pkg/client/informers/internalversion" + informers "k8s.io/sample-apiserver/pkg/client/informers/externalversions" ) // WantsInternalWardleInformerFactory defines a function which sets InformerFactory for admission plugins that need it diff --git a/staging/src/k8s.io/sample-apiserver/pkg/admission/wardleinitializer/wardleinitializer.go b/staging/src/k8s.io/sample-apiserver/pkg/admission/wardleinitializer/wardleinitializer.go index b41e3dfba09..455d51c9eaa 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/admission/wardleinitializer/wardleinitializer.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/admission/wardleinitializer/wardleinitializer.go @@ -18,7 +18,7 @@ package wardleinitializer import ( "k8s.io/apiserver/pkg/admission" - informers "k8s.io/sample-apiserver/pkg/client/informers/internalversion" + informers "k8s.io/sample-apiserver/pkg/client/informers/externalversions" ) type pluginInitializer struct { diff --git a/staging/src/k8s.io/sample-apiserver/pkg/admission/wardleinitializer/wardleinitializer_test.go b/staging/src/k8s.io/sample-apiserver/pkg/admission/wardleinitializer/wardleinitializer_test.go index c64ed3ab3e1..ff6fee1a92f 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/admission/wardleinitializer/wardleinitializer_test.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/admission/wardleinitializer/wardleinitializer_test.go @@ -22,8 +22,8 @@ import ( "k8s.io/apiserver/pkg/admission" "k8s.io/sample-apiserver/pkg/admission/wardleinitializer" - "k8s.io/sample-apiserver/pkg/client/clientset/internalversion/fake" - informers "k8s.io/sample-apiserver/pkg/client/informers/internalversion" + "k8s.io/sample-apiserver/pkg/client/clientset/versioned/fake" + informers "k8s.io/sample-apiserver/pkg/client/informers/externalversions" ) // TestWantsInternalWardleInformerFactory ensures that the informer factory is injected diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/BUILD b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/BUILD deleted file mode 100644 index 86b1b0e4dd1..00000000000 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/BUILD +++ /dev/null @@ -1,37 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "clientset.go", - "doc.go", - ], - importmap = "k8s.io/kubernetes/vendor/k8s.io/sample-apiserver/pkg/client/clientset/internalversion", - importpath = "k8s.io/sample-apiserver/pkg/client/clientset/internalversion", - visibility = ["//visibility:public"], - deps = [ - "//staging/src/k8s.io/client-go/discovery:go_default_library", - "//staging/src/k8s.io/client-go/rest:go_default_library", - "//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library", - "//staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [ - ":package-srcs", - "//staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/fake:all-srcs", - "//staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/scheme:all-srcs", - "//staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion:all-srcs", - ], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/clientset.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/clientset.go deleted file mode 100644 index b0a9bd43d81..00000000000 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/clientset.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - discovery "k8s.io/client-go/discovery" - rest "k8s.io/client-go/rest" - flowcontrol "k8s.io/client-go/util/flowcontrol" - wardleinternalversion "k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion" -) - -type Interface interface { - Discovery() discovery.DiscoveryInterface - Wardle() wardleinternalversion.WardleInterface -} - -// Clientset contains the clients for groups. Each group has exactly one -// version included in a Clientset. -type Clientset struct { - *discovery.DiscoveryClient - wardle *wardleinternalversion.WardleClient -} - -// Wardle retrieves the WardleClient -func (c *Clientset) Wardle() wardleinternalversion.WardleInterface { - return c.wardle -} - -// Discovery retrieves the DiscoveryClient -func (c *Clientset) Discovery() discovery.DiscoveryInterface { - if c == nil { - return nil - } - return c.DiscoveryClient -} - -// NewForConfig creates a new Clientset for the given config. -func NewForConfig(c *rest.Config) (*Clientset, error) { - configShallowCopy := *c - if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { - configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) - } - var cs Clientset - var err error - cs.wardle, err = wardleinternalversion.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - - cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - return &cs, nil -} - -// NewForConfigOrDie creates a new Clientset for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *Clientset { - var cs Clientset - cs.wardle = wardleinternalversion.NewForConfigOrDie(c) - - cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) - return &cs -} - -// New creates a new Clientset for the given RESTClient. -func New(c rest.Interface) *Clientset { - var cs Clientset - cs.wardle = wardleinternalversion.New(c) - - cs.DiscoveryClient = discovery.NewDiscoveryClient(c) - return &cs -} diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/doc.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/doc.go deleted file mode 100644 index 6c970b092aa..00000000000 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated clientset. -package internalversion diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/fake/BUILD b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/fake/BUILD deleted file mode 100644 index f9de0c31c64..00000000000 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/fake/BUILD +++ /dev/null @@ -1,42 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "clientset_generated.go", - "doc.go", - "register.go", - ], - importmap = "k8s.io/kubernetes/vendor/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/fake", - importpath = "k8s.io/sample-apiserver/pkg/client/clientset/internalversion/fake", - visibility = ["//visibility:public"], - deps = [ - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", - "//staging/src/k8s.io/client-go/discovery:go_default_library", - "//staging/src/k8s.io/client-go/discovery/fake:go_default_library", - "//staging/src/k8s.io/client-go/testing:go_default_library", - "//staging/src/k8s.io/sample-apiserver/pkg/apis/wardle:go_default_library", - "//staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion:go_default_library", - "//staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion:go_default_library", - "//staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fake:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/fake/clientset_generated.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/fake/clientset_generated.go deleted file mode 100644 index 79c8eeb4612..00000000000 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/fake/clientset_generated.go +++ /dev/null @@ -1,77 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/discovery" - fakediscovery "k8s.io/client-go/discovery/fake" - "k8s.io/client-go/testing" - clientset "k8s.io/sample-apiserver/pkg/client/clientset/internalversion" - wardleinternalversion "k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion" - fakewardleinternalversion "k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fake" -) - -// NewSimpleClientset returns a clientset that will respond with the provided objects. -// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, -// without applying any validations and/or defaults. It shouldn't be considered a replacement -// for a real clientset and is mostly useful in simple unit tests. -func NewSimpleClientset(objects ...runtime.Object) *Clientset { - o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) - for _, obj := range objects { - if err := o.Add(obj); err != nil { - panic(err) - } - } - - cs := &Clientset{} - cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} - cs.AddReactor("*", "*", testing.ObjectReaction(o)) - cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { - gvr := action.GetResource() - ns := action.GetNamespace() - watch, err := o.Watch(gvr, ns) - if err != nil { - return false, nil, err - } - return true, watch, nil - }) - - return cs -} - -// Clientset implements clientset.Interface. Meant to be embedded into a -// struct to get a default implementation. This makes faking out just the method -// you want to test easier. -type Clientset struct { - testing.Fake - discovery *fakediscovery.FakeDiscovery -} - -func (c *Clientset) Discovery() discovery.DiscoveryInterface { - return c.discovery -} - -var _ clientset.Interface = &Clientset{} - -// Wardle retrieves the WardleClient -func (c *Clientset) Wardle() wardleinternalversion.WardleInterface { - return &fakewardleinternalversion.FakeWardle{Fake: &c.Fake} -} diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/fake/doc.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/fake/doc.go deleted file mode 100644 index 9b99e716709..00000000000 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated fake clientset. -package fake diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/fake/register.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/fake/register.go deleted file mode 100644 index f617753ac3e..00000000000 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/fake/register.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - schema "k8s.io/apimachinery/pkg/runtime/schema" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - wardleinternalversion "k8s.io/sample-apiserver/pkg/apis/wardle" -) - -var scheme = runtime.NewScheme() -var codecs = serializer.NewCodecFactory(scheme) -var parameterCodec = runtime.NewParameterCodec(scheme) -var localSchemeBuilder = runtime.SchemeBuilder{ - wardleinternalversion.AddToScheme, -} - -// AddToScheme adds all types of this clientset into the given scheme. This allows composition -// of clientsets, like in: -// -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) -// -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) -// -// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types -// correctly. -var AddToScheme = localSchemeBuilder.AddToScheme - -func init() { - v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) - utilruntime.Must(AddToScheme(scheme)) -} diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/scheme/BUILD b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/scheme/BUILD deleted file mode 100644 index fb08c925277..00000000000 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/scheme/BUILD +++ /dev/null @@ -1,33 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "register.go", - ], - importmap = "k8s.io/kubernetes/vendor/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/scheme", - importpath = "k8s.io/sample-apiserver/pkg/client/clientset/internalversion/scheme", - visibility = ["//visibility:public"], - deps = [ - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/install:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/scheme/doc.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/scheme/doc.go deleted file mode 100644 index 7dc3756168f..00000000000 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/scheme/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package contains the scheme of the automatically generated clientset. -package scheme diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/scheme/register.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/scheme/register.go deleted file mode 100644 index 7a519381928..00000000000 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/scheme/register.go +++ /dev/null @@ -1,41 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package scheme - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - schema "k8s.io/apimachinery/pkg/runtime/schema" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - wardle "k8s.io/sample-apiserver/pkg/apis/wardle/install" -) - -var Scheme = runtime.NewScheme() -var Codecs = serializer.NewCodecFactory(Scheme) -var ParameterCodec = runtime.NewParameterCodec(Scheme) - -func init() { - v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) - Install(Scheme) -} - -// Install registers the API group and adds types to a scheme -func Install(scheme *runtime.Scheme) { - wardle.Install(scheme) -} diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/BUILD b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/BUILD deleted file mode 100644 index f07c432daef..00000000000 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/BUILD +++ /dev/null @@ -1,40 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "fischer.go", - "flunder.go", - "generated_expansion.go", - "wardle_client.go", - ], - importmap = "k8s.io/kubernetes/vendor/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion", - importpath = "k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion", - visibility = ["//visibility:public"], - deps = [ - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", - "//staging/src/k8s.io/client-go/rest:go_default_library", - "//staging/src/k8s.io/sample-apiserver/pkg/apis/wardle:go_default_library", - "//staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/scheme:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [ - ":package-srcs", - "//staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fake:all-srcs", - ], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/doc.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/doc.go deleted file mode 100644 index 86602442bab..00000000000 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package internalversion diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fake/BUILD b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fake/BUILD deleted file mode 100644 index feda5e743be..00000000000 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fake/BUILD +++ /dev/null @@ -1,39 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "fake_fischer.go", - "fake_flunder.go", - "fake_wardle_client.go", - ], - importmap = "k8s.io/kubernetes/vendor/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fake", - importpath = "k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fake", - visibility = ["//visibility:public"], - deps = [ - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", - "//staging/src/k8s.io/client-go/rest:go_default_library", - "//staging/src/k8s.io/client-go/testing:go_default_library", - "//staging/src/k8s.io/sample-apiserver/pkg/apis/wardle:go_default_library", - "//staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fake/doc.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fake/doc.go deleted file mode 100644 index 16f44399065..00000000000 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fake/fake_fischer.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fake/fake_fischer.go deleted file mode 100644 index 77eb5f27355..00000000000 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fake/fake_fischer.go +++ /dev/null @@ -1,120 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" - wardle "k8s.io/sample-apiserver/pkg/apis/wardle" -) - -// FakeFischers implements FischerInterface -type FakeFischers struct { - Fake *FakeWardle -} - -var fischersResource = schema.GroupVersionResource{Group: "wardle.k8s.io", Version: "", Resource: "fischers"} - -var fischersKind = schema.GroupVersionKind{Group: "wardle.k8s.io", Version: "", Kind: "Fischer"} - -// Get takes name of the fischer, and returns the corresponding fischer object, and an error if there is any. -func (c *FakeFischers) Get(name string, options v1.GetOptions) (result *wardle.Fischer, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(fischersResource, name), &wardle.Fischer{}) - if obj == nil { - return nil, err - } - return obj.(*wardle.Fischer), err -} - -// List takes label and field selectors, and returns the list of Fischers that match those selectors. -func (c *FakeFischers) List(opts v1.ListOptions) (result *wardle.FischerList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(fischersResource, fischersKind, opts), &wardle.FischerList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &wardle.FischerList{ListMeta: obj.(*wardle.FischerList).ListMeta} - for _, item := range obj.(*wardle.FischerList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested fischers. -func (c *FakeFischers) Watch(opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(fischersResource, opts)) -} - -// Create takes the representation of a fischer and creates it. Returns the server's representation of the fischer, and an error, if there is any. -func (c *FakeFischers) Create(fischer *wardle.Fischer) (result *wardle.Fischer, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(fischersResource, fischer), &wardle.Fischer{}) - if obj == nil { - return nil, err - } - return obj.(*wardle.Fischer), err -} - -// Update takes the representation of a fischer and updates it. Returns the server's representation of the fischer, and an error, if there is any. -func (c *FakeFischers) Update(fischer *wardle.Fischer) (result *wardle.Fischer, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(fischersResource, fischer), &wardle.Fischer{}) - if obj == nil { - return nil, err - } - return obj.(*wardle.Fischer), err -} - -// Delete takes name of the fischer and deletes it. Returns an error if one occurs. -func (c *FakeFischers) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(fischersResource, name), &wardle.Fischer{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeFischers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(fischersResource, listOptions) - - _, err := c.Fake.Invokes(action, &wardle.FischerList{}) - return err -} - -// Patch applies the patch and returns the patched fischer. -func (c *FakeFischers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *wardle.Fischer, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(fischersResource, name, pt, data, subresources...), &wardle.Fischer{}) - if obj == nil { - return nil, err - } - return obj.(*wardle.Fischer), err -} diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fake/fake_flunder.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fake/fake_flunder.go deleted file mode 100644 index 2c893d3ff2d..00000000000 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fake/fake_flunder.go +++ /dev/null @@ -1,140 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" - wardle "k8s.io/sample-apiserver/pkg/apis/wardle" -) - -// FakeFlunders implements FlunderInterface -type FakeFlunders struct { - Fake *FakeWardle - ns string -} - -var flundersResource = schema.GroupVersionResource{Group: "wardle.k8s.io", Version: "", Resource: "flunders"} - -var flundersKind = schema.GroupVersionKind{Group: "wardle.k8s.io", Version: "", Kind: "Flunder"} - -// Get takes name of the flunder, and returns the corresponding flunder object, and an error if there is any. -func (c *FakeFlunders) Get(name string, options v1.GetOptions) (result *wardle.Flunder, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(flundersResource, c.ns, name), &wardle.Flunder{}) - - if obj == nil { - return nil, err - } - return obj.(*wardle.Flunder), err -} - -// List takes label and field selectors, and returns the list of Flunders that match those selectors. -func (c *FakeFlunders) List(opts v1.ListOptions) (result *wardle.FlunderList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(flundersResource, flundersKind, c.ns, opts), &wardle.FlunderList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &wardle.FlunderList{ListMeta: obj.(*wardle.FlunderList).ListMeta} - for _, item := range obj.(*wardle.FlunderList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested flunders. -func (c *FakeFlunders) Watch(opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(flundersResource, c.ns, opts)) - -} - -// Create takes the representation of a flunder and creates it. Returns the server's representation of the flunder, and an error, if there is any. -func (c *FakeFlunders) Create(flunder *wardle.Flunder) (result *wardle.Flunder, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(flundersResource, c.ns, flunder), &wardle.Flunder{}) - - if obj == nil { - return nil, err - } - return obj.(*wardle.Flunder), err -} - -// Update takes the representation of a flunder and updates it. Returns the server's representation of the flunder, and an error, if there is any. -func (c *FakeFlunders) Update(flunder *wardle.Flunder) (result *wardle.Flunder, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(flundersResource, c.ns, flunder), &wardle.Flunder{}) - - if obj == nil { - return nil, err - } - return obj.(*wardle.Flunder), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeFlunders) UpdateStatus(flunder *wardle.Flunder) (*wardle.Flunder, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(flundersResource, "status", c.ns, flunder), &wardle.Flunder{}) - - if obj == nil { - return nil, err - } - return obj.(*wardle.Flunder), err -} - -// Delete takes name of the flunder and deletes it. Returns an error if one occurs. -func (c *FakeFlunders) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(flundersResource, c.ns, name), &wardle.Flunder{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeFlunders) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(flundersResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &wardle.FlunderList{}) - return err -} - -// Patch applies the patch and returns the patched flunder. -func (c *FakeFlunders) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *wardle.Flunder, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(flundersResource, c.ns, name, pt, data, subresources...), &wardle.Flunder{}) - - if obj == nil { - return nil, err - } - return obj.(*wardle.Flunder), err -} diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fake/fake_wardle_client.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fake/fake_wardle_client.go deleted file mode 100644 index b9282c03d34..00000000000 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fake/fake_wardle_client.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" - internalversion "k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion" -) - -type FakeWardle struct { - *testing.Fake -} - -func (c *FakeWardle) Fischers() internalversion.FischerInterface { - return &FakeFischers{c} -} - -func (c *FakeWardle) Flunders(namespace string) internalversion.FlunderInterface { - return &FakeFlunders{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeWardle) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fischer.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fischer.go deleted file mode 100644 index f5d88411fd4..00000000000 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fischer.go +++ /dev/null @@ -1,164 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - "time" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - wardle "k8s.io/sample-apiserver/pkg/apis/wardle" - scheme "k8s.io/sample-apiserver/pkg/client/clientset/internalversion/scheme" -) - -// FischersGetter has a method to return a FischerInterface. -// A group's client should implement this interface. -type FischersGetter interface { - Fischers() FischerInterface -} - -// FischerInterface has methods to work with Fischer resources. -type FischerInterface interface { - Create(*wardle.Fischer) (*wardle.Fischer, error) - Update(*wardle.Fischer) (*wardle.Fischer, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*wardle.Fischer, error) - List(opts v1.ListOptions) (*wardle.FischerList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *wardle.Fischer, err error) - FischerExpansion -} - -// fischers implements FischerInterface -type fischers struct { - client rest.Interface -} - -// newFischers returns a Fischers -func newFischers(c *WardleClient) *fischers { - return &fischers{ - client: c.RESTClient(), - } -} - -// Get takes name of the fischer, and returns the corresponding fischer object, and an error if there is any. -func (c *fischers) Get(name string, options v1.GetOptions) (result *wardle.Fischer, err error) { - result = &wardle.Fischer{} - err = c.client.Get(). - Resource("fischers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Fischers that match those selectors. -func (c *fischers) List(opts v1.ListOptions) (result *wardle.FischerList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &wardle.FischerList{} - err = c.client.Get(). - Resource("fischers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested fischers. -func (c *fischers) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("fischers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch() -} - -// Create takes the representation of a fischer and creates it. Returns the server's representation of the fischer, and an error, if there is any. -func (c *fischers) Create(fischer *wardle.Fischer) (result *wardle.Fischer, err error) { - result = &wardle.Fischer{} - err = c.client.Post(). - Resource("fischers"). - Body(fischer). - Do(). - Into(result) - return -} - -// Update takes the representation of a fischer and updates it. Returns the server's representation of the fischer, and an error, if there is any. -func (c *fischers) Update(fischer *wardle.Fischer) (result *wardle.Fischer, err error) { - result = &wardle.Fischer{} - err = c.client.Put(). - Resource("fischers"). - Name(fischer.Name). - Body(fischer). - Do(). - Into(result) - return -} - -// Delete takes name of the fischer and deletes it. Returns an error if one occurs. -func (c *fischers) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("fischers"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *fischers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("fischers"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched fischer. -func (c *fischers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *wardle.Fischer, err error) { - result = &wardle.Fischer{} - err = c.client.Patch(pt). - Resource("fischers"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/flunder.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/flunder.go deleted file mode 100644 index 7d72dbf993a..00000000000 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/flunder.go +++ /dev/null @@ -1,191 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - "time" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - wardle "k8s.io/sample-apiserver/pkg/apis/wardle" - scheme "k8s.io/sample-apiserver/pkg/client/clientset/internalversion/scheme" -) - -// FlundersGetter has a method to return a FlunderInterface. -// A group's client should implement this interface. -type FlundersGetter interface { - Flunders(namespace string) FlunderInterface -} - -// FlunderInterface has methods to work with Flunder resources. -type FlunderInterface interface { - Create(*wardle.Flunder) (*wardle.Flunder, error) - Update(*wardle.Flunder) (*wardle.Flunder, error) - UpdateStatus(*wardle.Flunder) (*wardle.Flunder, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*wardle.Flunder, error) - List(opts v1.ListOptions) (*wardle.FlunderList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *wardle.Flunder, err error) - FlunderExpansion -} - -// flunders implements FlunderInterface -type flunders struct { - client rest.Interface - ns string -} - -// newFlunders returns a Flunders -func newFlunders(c *WardleClient, namespace string) *flunders { - return &flunders{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the flunder, and returns the corresponding flunder object, and an error if there is any. -func (c *flunders) Get(name string, options v1.GetOptions) (result *wardle.Flunder, err error) { - result = &wardle.Flunder{} - err = c.client.Get(). - Namespace(c.ns). - Resource("flunders"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Flunders that match those selectors. -func (c *flunders) List(opts v1.ListOptions) (result *wardle.FlunderList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &wardle.FlunderList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("flunders"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested flunders. -func (c *flunders) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("flunders"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch() -} - -// Create takes the representation of a flunder and creates it. Returns the server's representation of the flunder, and an error, if there is any. -func (c *flunders) Create(flunder *wardle.Flunder) (result *wardle.Flunder, err error) { - result = &wardle.Flunder{} - err = c.client.Post(). - Namespace(c.ns). - Resource("flunders"). - Body(flunder). - Do(). - Into(result) - return -} - -// Update takes the representation of a flunder and updates it. Returns the server's representation of the flunder, and an error, if there is any. -func (c *flunders) Update(flunder *wardle.Flunder) (result *wardle.Flunder, err error) { - result = &wardle.Flunder{} - err = c.client.Put(). - Namespace(c.ns). - Resource("flunders"). - Name(flunder.Name). - Body(flunder). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *flunders) UpdateStatus(flunder *wardle.Flunder) (result *wardle.Flunder, err error) { - result = &wardle.Flunder{} - err = c.client.Put(). - Namespace(c.ns). - Resource("flunders"). - Name(flunder.Name). - SubResource("status"). - Body(flunder). - Do(). - Into(result) - return -} - -// Delete takes name of the flunder and deletes it. Returns an error if one occurs. -func (c *flunders) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("flunders"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *flunders) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("flunders"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched flunder. -func (c *flunders) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *wardle.Flunder, err error) { - result = &wardle.Flunder{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("flunders"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/wardle_client.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/wardle_client.go deleted file mode 100644 index 12e618e9699..00000000000 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/wardle_client.go +++ /dev/null @@ -1,101 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - rest "k8s.io/client-go/rest" - "k8s.io/sample-apiserver/pkg/client/clientset/internalversion/scheme" -) - -type WardleInterface interface { - RESTClient() rest.Interface - FischersGetter - FlundersGetter -} - -// WardleClient is used to interact with features provided by the wardle.k8s.io group. -type WardleClient struct { - restClient rest.Interface -} - -func (c *WardleClient) Fischers() FischerInterface { - return newFischers(c) -} - -func (c *WardleClient) Flunders(namespace string) FlunderInterface { - return newFlunders(c, namespace) -} - -// NewForConfig creates a new WardleClient for the given config. -func NewForConfig(c *rest.Config) (*WardleClient, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &WardleClient{client}, nil -} - -// NewForConfigOrDie creates a new WardleClient for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *WardleClient { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new WardleClient for the given RESTClient. -func New(c rest.Interface) *WardleClient { - return &WardleClient{c} -} - -func setConfigDefaults(config *rest.Config) error { - config.APIPath = "/apis" - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - if config.GroupVersion == nil || config.GroupVersion.Group != scheme.Scheme.PrioritizedVersionsForGroup("wardle.k8s.io")[0].Group { - gv := scheme.Scheme.PrioritizedVersionsForGroup("wardle.k8s.io")[0] - config.GroupVersion = &gv - } - config.NegotiatedSerializer = scheme.Codecs - - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *WardleClient) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/BUILD b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/BUILD deleted file mode 100644 index c626b200205..00000000000 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/BUILD +++ /dev/null @@ -1,40 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "factory.go", - "generic.go", - ], - importmap = "k8s.io/kubernetes/vendor/k8s.io/sample-apiserver/pkg/client/informers/internalversion", - importpath = "k8s.io/sample-apiserver/pkg/client/informers/internalversion", - visibility = ["//visibility:public"], - deps = [ - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//staging/src/k8s.io/client-go/tools/cache:go_default_library", - "//staging/src/k8s.io/sample-apiserver/pkg/apis/wardle:go_default_library", - "//staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion:go_default_library", - "//staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/internalinterfaces:go_default_library", - "//staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [ - ":package-srcs", - "//staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/internalinterfaces:all-srcs", - "//staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle:all-srcs", - ], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/factory.go b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/factory.go deleted file mode 100644 index 8cb755ece29..00000000000 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/factory.go +++ /dev/null @@ -1,180 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package internalversion - -import ( - reflect "reflect" - sync "sync" - time "time" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - schema "k8s.io/apimachinery/pkg/runtime/schema" - cache "k8s.io/client-go/tools/cache" - internalversion "k8s.io/sample-apiserver/pkg/client/clientset/internalversion" - internalinterfaces "k8s.io/sample-apiserver/pkg/client/informers/internalversion/internalinterfaces" - wardle "k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle" -) - -// SharedInformerOption defines the functional option type for SharedInformerFactory. -type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory - -type sharedInformerFactory struct { - client internalversion.Interface - namespace string - tweakListOptions internalinterfaces.TweakListOptionsFunc - lock sync.Mutex - defaultResync time.Duration - customResync map[reflect.Type]time.Duration - - informers map[reflect.Type]cache.SharedIndexInformer - // startedInformers is used for tracking which informers have been started. - // This allows Start() to be called multiple times safely. - startedInformers map[reflect.Type]bool -} - -// WithCustomResyncConfig sets a custom resync period for the specified informer types. -func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { - return func(factory *sharedInformerFactory) *sharedInformerFactory { - for k, v := range resyncConfig { - factory.customResync[reflect.TypeOf(k)] = v - } - return factory - } -} - -// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. -func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { - return func(factory *sharedInformerFactory) *sharedInformerFactory { - factory.tweakListOptions = tweakListOptions - return factory - } -} - -// WithNamespace limits the SharedInformerFactory to the specified namespace. -func WithNamespace(namespace string) SharedInformerOption { - return func(factory *sharedInformerFactory) *sharedInformerFactory { - factory.namespace = namespace - return factory - } -} - -// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. -func NewSharedInformerFactory(client internalversion.Interface, defaultResync time.Duration) SharedInformerFactory { - return NewSharedInformerFactoryWithOptions(client, defaultResync) -} - -// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. -// Listers obtained via this SharedInformerFactory will be subject to the same filters -// as specified here. -// Deprecated: Please use NewSharedInformerFactoryWithOptions instead -func NewFilteredSharedInformerFactory(client internalversion.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { - return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) -} - -// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. -func NewSharedInformerFactoryWithOptions(client internalversion.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { - factory := &sharedInformerFactory{ - client: client, - namespace: v1.NamespaceAll, - defaultResync: defaultResync, - informers: make(map[reflect.Type]cache.SharedIndexInformer), - startedInformers: make(map[reflect.Type]bool), - customResync: make(map[reflect.Type]time.Duration), - } - - // Apply all options - for _, opt := range options { - factory = opt(factory) - } - - return factory -} - -// Start initializes all requested informers. -func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { - f.lock.Lock() - defer f.lock.Unlock() - - for informerType, informer := range f.informers { - if !f.startedInformers[informerType] { - go informer.Run(stopCh) - f.startedInformers[informerType] = true - } - } -} - -// WaitForCacheSync waits for all started informers' cache were synced. -func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { - informers := func() map[reflect.Type]cache.SharedIndexInformer { - f.lock.Lock() - defer f.lock.Unlock() - - informers := map[reflect.Type]cache.SharedIndexInformer{} - for informerType, informer := range f.informers { - if f.startedInformers[informerType] { - informers[informerType] = informer - } - } - return informers - }() - - res := map[reflect.Type]bool{} - for informType, informer := range informers { - res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) - } - return res -} - -// InternalInformerFor returns the SharedIndexInformer for obj using an internal -// client. -func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { - f.lock.Lock() - defer f.lock.Unlock() - - informerType := reflect.TypeOf(obj) - informer, exists := f.informers[informerType] - if exists { - return informer - } - - resyncPeriod, exists := f.customResync[informerType] - if !exists { - resyncPeriod = f.defaultResync - } - - informer = newFunc(f.client, resyncPeriod) - f.informers[informerType] = informer - - return informer -} - -// SharedInformerFactory provides shared informers for resources in all known -// API group versions. -type SharedInformerFactory interface { - internalinterfaces.SharedInformerFactory - ForResource(resource schema.GroupVersionResource) (GenericInformer, error) - WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool - - Wardle() wardle.Interface -} - -func (f *sharedInformerFactory) Wardle() wardle.Interface { - return wardle.New(f, f.namespace, f.tweakListOptions) -} diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/generic.go b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/generic.go deleted file mode 100644 index a0291d38a8a..00000000000 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/generic.go +++ /dev/null @@ -1,64 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package internalversion - -import ( - "fmt" - - schema "k8s.io/apimachinery/pkg/runtime/schema" - cache "k8s.io/client-go/tools/cache" - wardle "k8s.io/sample-apiserver/pkg/apis/wardle" -) - -// GenericInformer is type of SharedIndexInformer which will locate and delegate to other -// sharedInformers based on type -type GenericInformer interface { - Informer() cache.SharedIndexInformer - Lister() cache.GenericLister -} - -type genericInformer struct { - informer cache.SharedIndexInformer - resource schema.GroupResource -} - -// Informer returns the SharedIndexInformer. -func (f *genericInformer) Informer() cache.SharedIndexInformer { - return f.informer -} - -// Lister returns the GenericLister. -func (f *genericInformer) Lister() cache.GenericLister { - return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) -} - -// ForResource gives generic access to a shared informer of the matching type -// TODO extend this to unknown resources with a client pool -func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { - switch resource { - // Group=wardle.k8s.io, Version=internalVersion - case wardle.SchemeGroupVersion.WithResource("fischers"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Wardle().InternalVersion().Fischers().Informer()}, nil - case wardle.SchemeGroupVersion.WithResource("flunders"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Wardle().InternalVersion().Flunders().Informer()}, nil - - } - - return nil, fmt.Errorf("no informer found for %v", resource) -} diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/internalinterfaces/BUILD b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/internalinterfaces/BUILD deleted file mode 100644 index 02e8076f85f..00000000000 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/internalinterfaces/BUILD +++ /dev/null @@ -1,29 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["factory_interfaces.go"], - importmap = "k8s.io/kubernetes/vendor/k8s.io/sample-apiserver/pkg/client/informers/internalversion/internalinterfaces", - importpath = "k8s.io/sample-apiserver/pkg/client/informers/internalversion/internalinterfaces", - visibility = ["//visibility:public"], - deps = [ - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//staging/src/k8s.io/client-go/tools/cache:go_default_library", - "//staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/internalinterfaces/factory_interfaces.go b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/internalinterfaces/factory_interfaces.go deleted file mode 100644 index abd5bbe58bb..00000000000 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/internalinterfaces/factory_interfaces.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package internalinterfaces - -import ( - time "time" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - cache "k8s.io/client-go/tools/cache" - internalversion "k8s.io/sample-apiserver/pkg/client/clientset/internalversion" -) - -// NewInformerFunc takes internalversion.Interface and time.Duration to return a SharedIndexInformer. -type NewInformerFunc func(internalversion.Interface, time.Duration) cache.SharedIndexInformer - -// SharedInformerFactory a small interface to allow for adding an informer without an import cycle -type SharedInformerFactory interface { - Start(stopCh <-chan struct{}) - InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer -} - -// TweakListOptionsFunc is a function that transforms a v1.ListOptions. -type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/BUILD b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/BUILD deleted file mode 100644 index 90d6566cb50..00000000000 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/BUILD +++ /dev/null @@ -1,30 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["interface.go"], - importmap = "k8s.io/kubernetes/vendor/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle", - importpath = "k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle", - visibility = ["//visibility:public"], - deps = [ - "//staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/internalinterfaces:go_default_library", - "//staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/internalversion:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [ - ":package-srcs", - "//staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/internalversion:all-srcs", - ], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/interface.go b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/interface.go deleted file mode 100644 index bccd11a2a57..00000000000 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/interface.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package wardle - -import ( - internalinterfaces "k8s.io/sample-apiserver/pkg/client/informers/internalversion/internalinterfaces" - internalversion "k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/internalversion" -) - -// Interface provides access to each of this group's versions. -type Interface interface { - // InternalVersion provides access to shared informers for resources in InternalVersion. - InternalVersion() internalversion.Interface -} - -type group struct { - factory internalinterfaces.SharedInformerFactory - namespace string - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { - return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} -} - -// InternalVersion returns a new internalversion.Interface. -func (g *group) InternalVersion() internalversion.Interface { - return internalversion.New(g.factory, g.namespace, g.tweakListOptions) -} diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/internalversion/BUILD b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/internalversion/BUILD deleted file mode 100644 index f75fdcef59c..00000000000 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/internalversion/BUILD +++ /dev/null @@ -1,37 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "fischer.go", - "flunder.go", - "interface.go", - ], - importmap = "k8s.io/kubernetes/vendor/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/internalversion", - importpath = "k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/internalversion", - visibility = ["//visibility:public"], - deps = [ - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", - "//staging/src/k8s.io/client-go/tools/cache:go_default_library", - "//staging/src/k8s.io/sample-apiserver/pkg/apis/wardle:go_default_library", - "//staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion:go_default_library", - "//staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/internalinterfaces:go_default_library", - "//staging/src/k8s.io/sample-apiserver/pkg/client/listers/wardle/internalversion:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/internalversion/fischer.go b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/internalversion/fischer.go deleted file mode 100644 index ede44539ac6..00000000000 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/internalversion/fischer.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package internalversion - -import ( - time "time" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" - wardle "k8s.io/sample-apiserver/pkg/apis/wardle" - clientsetinternalversion "k8s.io/sample-apiserver/pkg/client/clientset/internalversion" - internalinterfaces "k8s.io/sample-apiserver/pkg/client/informers/internalversion/internalinterfaces" - internalversion "k8s.io/sample-apiserver/pkg/client/listers/wardle/internalversion" -) - -// FischerInformer provides access to a shared informer and lister for -// Fischers. -type FischerInformer interface { - Informer() cache.SharedIndexInformer - Lister() internalversion.FischerLister -} - -type fischerInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// NewFischerInformer constructs a new informer for Fischer type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFischerInformer(client clientsetinternalversion.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredFischerInformer(client, resyncPeriod, indexers, nil) -} - -// NewFilteredFischerInformer constructs a new informer for Fischer type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredFischerInformer(client clientsetinternalversion.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.Wardle().Fischers().List(options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.Wardle().Fischers().Watch(options) - }, - }, - &wardle.Fischer{}, - resyncPeriod, - indexers, - ) -} - -func (f *fischerInformer) defaultInformer(client clientsetinternalversion.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredFischerInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *fischerInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&wardle.Fischer{}, f.defaultInformer) -} - -func (f *fischerInformer) Lister() internalversion.FischerLister { - return internalversion.NewFischerLister(f.Informer().GetIndexer()) -} diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/internalversion/flunder.go b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/internalversion/flunder.go deleted file mode 100644 index 2fdd3ef0f91..00000000000 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/internalversion/flunder.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package internalversion - -import ( - time "time" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" - wardle "k8s.io/sample-apiserver/pkg/apis/wardle" - clientsetinternalversion "k8s.io/sample-apiserver/pkg/client/clientset/internalversion" - internalinterfaces "k8s.io/sample-apiserver/pkg/client/informers/internalversion/internalinterfaces" - internalversion "k8s.io/sample-apiserver/pkg/client/listers/wardle/internalversion" -) - -// FlunderInformer provides access to a shared informer and lister for -// Flunders. -type FlunderInformer interface { - Informer() cache.SharedIndexInformer - Lister() internalversion.FlunderLister -} - -type flunderInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewFlunderInformer constructs a new informer for Flunder type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFlunderInformer(client clientsetinternalversion.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredFlunderInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredFlunderInformer constructs a new informer for Flunder type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredFlunderInformer(client clientsetinternalversion.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.Wardle().Flunders(namespace).List(options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.Wardle().Flunders(namespace).Watch(options) - }, - }, - &wardle.Flunder{}, - resyncPeriod, - indexers, - ) -} - -func (f *flunderInformer) defaultInformer(client clientsetinternalversion.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredFlunderInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *flunderInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&wardle.Flunder{}, f.defaultInformer) -} - -func (f *flunderInformer) Lister() internalversion.FlunderLister { - return internalversion.NewFlunderLister(f.Informer().GetIndexer()) -} diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/internalversion/interface.go b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/internalversion/interface.go deleted file mode 100644 index b3b98a338e8..00000000000 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/internalversion/interface.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package internalversion - -import ( - internalinterfaces "k8s.io/sample-apiserver/pkg/client/informers/internalversion/internalinterfaces" -) - -// Interface provides access to all the informers in this group version. -type Interface interface { - // Fischers returns a FischerInformer. - Fischers() FischerInformer - // Flunders returns a FlunderInformer. - Flunders() FlunderInformer -} - -type version struct { - factory internalinterfaces.SharedInformerFactory - namespace string - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { - return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} -} - -// Fischers returns a FischerInformer. -func (v *version) Fischers() FischerInformer { - return &fischerInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} -} - -// Flunders returns a FlunderInformer. -func (v *version) Flunders() FlunderInformer { - return &flunderInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/listers/wardle/internalversion/BUILD b/staging/src/k8s.io/sample-apiserver/pkg/client/listers/wardle/internalversion/BUILD deleted file mode 100644 index 9d89f3a907f..00000000000 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/listers/wardle/internalversion/BUILD +++ /dev/null @@ -1,33 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "expansion_generated.go", - "fischer.go", - "flunder.go", - ], - importmap = "k8s.io/kubernetes/vendor/k8s.io/sample-apiserver/pkg/client/listers/wardle/internalversion", - importpath = "k8s.io/sample-apiserver/pkg/client/listers/wardle/internalversion", - visibility = ["//visibility:public"], - deps = [ - "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", - "//staging/src/k8s.io/client-go/tools/cache:go_default_library", - "//staging/src/k8s.io/sample-apiserver/pkg/apis/wardle:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/listers/wardle/internalversion/expansion_generated.go b/staging/src/k8s.io/sample-apiserver/pkg/client/listers/wardle/internalversion/expansion_generated.go deleted file mode 100644 index 89892308b42..00000000000 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/listers/wardle/internalversion/expansion_generated.go +++ /dev/null @@ -1,31 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package internalversion - -// FischerListerExpansion allows custom methods to be added to -// FischerLister. -type FischerListerExpansion interface{} - -// FlunderListerExpansion allows custom methods to be added to -// FlunderLister. -type FlunderListerExpansion interface{} - -// FlunderNamespaceListerExpansion allows custom methods to be added to -// FlunderNamespaceLister. -type FlunderNamespaceListerExpansion interface{} diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/listers/wardle/internalversion/fischer.go b/staging/src/k8s.io/sample-apiserver/pkg/client/listers/wardle/internalversion/fischer.go deleted file mode 100644 index 63ec3b50698..00000000000 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/listers/wardle/internalversion/fischer.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package internalversion - -import ( - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" - wardle "k8s.io/sample-apiserver/pkg/apis/wardle" -) - -// FischerLister helps list Fischers. -type FischerLister interface { - // List lists all Fischers in the indexer. - List(selector labels.Selector) (ret []*wardle.Fischer, err error) - // Get retrieves the Fischer from the index for a given name. - Get(name string) (*wardle.Fischer, error) - FischerListerExpansion -} - -// fischerLister implements the FischerLister interface. -type fischerLister struct { - indexer cache.Indexer -} - -// NewFischerLister returns a new FischerLister. -func NewFischerLister(indexer cache.Indexer) FischerLister { - return &fischerLister{indexer: indexer} -} - -// List lists all Fischers in the indexer. -func (s *fischerLister) List(selector labels.Selector) (ret []*wardle.Fischer, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*wardle.Fischer)) - }) - return ret, err -} - -// Get retrieves the Fischer from the index for a given name. -func (s *fischerLister) Get(name string) (*wardle.Fischer, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(wardle.Resource("fischer"), name) - } - return obj.(*wardle.Fischer), nil -} diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/listers/wardle/internalversion/flunder.go b/staging/src/k8s.io/sample-apiserver/pkg/client/listers/wardle/internalversion/flunder.go deleted file mode 100644 index f15119845ee..00000000000 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/listers/wardle/internalversion/flunder.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package internalversion - -import ( - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" - wardle "k8s.io/sample-apiserver/pkg/apis/wardle" -) - -// FlunderLister helps list Flunders. -type FlunderLister interface { - // List lists all Flunders in the indexer. - List(selector labels.Selector) (ret []*wardle.Flunder, err error) - // Flunders returns an object that can list and get Flunders. - Flunders(namespace string) FlunderNamespaceLister - FlunderListerExpansion -} - -// flunderLister implements the FlunderLister interface. -type flunderLister struct { - indexer cache.Indexer -} - -// NewFlunderLister returns a new FlunderLister. -func NewFlunderLister(indexer cache.Indexer) FlunderLister { - return &flunderLister{indexer: indexer} -} - -// List lists all Flunders in the indexer. -func (s *flunderLister) List(selector labels.Selector) (ret []*wardle.Flunder, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*wardle.Flunder)) - }) - return ret, err -} - -// Flunders returns an object that can list and get Flunders. -func (s *flunderLister) Flunders(namespace string) FlunderNamespaceLister { - return flunderNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// FlunderNamespaceLister helps list and get Flunders. -type FlunderNamespaceLister interface { - // List lists all Flunders in the indexer for a given namespace. - List(selector labels.Selector) (ret []*wardle.Flunder, err error) - // Get retrieves the Flunder from the indexer for a given namespace and name. - Get(name string) (*wardle.Flunder, error) - FlunderNamespaceListerExpansion -} - -// flunderNamespaceLister implements the FlunderNamespaceLister -// interface. -type flunderNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Flunders in the indexer for a given namespace. -func (s flunderNamespaceLister) List(selector labels.Selector) (ret []*wardle.Flunder, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*wardle.Flunder)) - }) - return ret, err -} - -// Get retrieves the Flunder from the indexer for a given namespace and name. -func (s flunderNamespaceLister) Get(name string) (*wardle.Flunder, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(wardle.Resource("flunder"), name) - } - return obj.(*wardle.Flunder), nil -} diff --git a/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/BUILD b/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/BUILD index e51546ee850..db6621acce8 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/BUILD +++ b/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/BUILD @@ -19,8 +19,8 @@ go_library( "//staging/src/k8s.io/sample-apiserver/pkg/admission/wardleinitializer:go_default_library", "//staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1:go_default_library", "//staging/src/k8s.io/sample-apiserver/pkg/apiserver:go_default_library", - "//staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion:go_default_library", - "//staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion:go_default_library", + "//staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned:go_default_library", + "//staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", ], ) diff --git a/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go b/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go index c9fa45850f5..e492925353e 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go @@ -31,8 +31,8 @@ import ( "k8s.io/sample-apiserver/pkg/admission/wardleinitializer" "k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1" "k8s.io/sample-apiserver/pkg/apiserver" - clientset "k8s.io/sample-apiserver/pkg/client/clientset/internalversion" - informers "k8s.io/sample-apiserver/pkg/client/informers/internalversion" + clientset "k8s.io/sample-apiserver/pkg/client/clientset/versioned" + informers "k8s.io/sample-apiserver/pkg/client/informers/externalversions" ) const defaultEtcdPathPrefix = "/registry/wardle.kubernetes.io" diff --git a/staging/src/k8s.io/sample-cli-plugin/Godeps/Godeps.json b/staging/src/k8s.io/sample-cli-plugin/Godeps/Godeps.json index f7550bb7722..d913de81a6a 100644 --- a/staging/src/k8s.io/sample-cli-plugin/Godeps/Godeps.json +++ b/staging/src/k8s.io/sample-cli-plugin/Godeps/Godeps.json @@ -76,7 +76,7 @@ }, { "ImportPath": "github.com/json-iterator/go", - "Rev": "f2b4162afba35581b6d4a50d3b8f34e33c144682" + "Rev": "ab8a2e0c74be9d3be70b3184d9acc634935ded82" }, { "ImportPath": "github.com/modern-go/concurrent", diff --git a/staging/src/k8s.io/sample-controller/Godeps/Godeps.json b/staging/src/k8s.io/sample-controller/Godeps/Godeps.json index aa95b82db0b..4076ccb0649 100644 --- a/staging/src/k8s.io/sample-controller/Godeps/Godeps.json +++ b/staging/src/k8s.io/sample-controller/Godeps/Godeps.json @@ -88,7 +88,7 @@ }, { "ImportPath": "github.com/json-iterator/go", - "Rev": "f2b4162afba35581b6d4a50d3b8f34e33c144682" + "Rev": "ab8a2e0c74be9d3be70b3184d9acc634935ded82" }, { "ImportPath": "github.com/modern-go/concurrent", diff --git a/staging/src/k8s.io/sample-controller/controller.go b/staging/src/k8s.io/sample-controller/controller.go index e9d1d8389ee..bea78e72e7e 100644 --- a/staging/src/k8s.io/sample-controller/controller.go +++ b/staging/src/k8s.io/sample-controller/controller.go @@ -25,7 +25,6 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" appsinformers "k8s.io/client-go/informers/apps/v1" @@ -150,7 +149,7 @@ func NewController( // is closed, at which point it will shutdown the workqueue and wait for // workers to finish processing their current work items. func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error { - defer runtime.HandleCrash() + defer utilruntime.HandleCrash() defer c.workqueue.ShutDown() // Start the informer factories to begin populating the informer caches @@ -213,7 +212,7 @@ func (c *Controller) processNextWorkItem() bool { // Forget here else we'd go into a loop of attempting to // process a work item that is invalid. c.workqueue.Forget(obj) - runtime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj)) + utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj)) return nil } // Run the syncHandler, passing it the namespace/name string of the @@ -231,7 +230,7 @@ func (c *Controller) processNextWorkItem() bool { }(obj) if err != nil { - runtime.HandleError(err) + utilruntime.HandleError(err) return true } @@ -245,7 +244,7 @@ func (c *Controller) syncHandler(key string) error { // Convert the namespace/name string into a distinct namespace and name namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { - runtime.HandleError(fmt.Errorf("invalid resource key: %s", key)) + utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", key)) return nil } @@ -255,7 +254,7 @@ func (c *Controller) syncHandler(key string) error { // The Foo resource may no longer exist, in which case we stop // processing. if errors.IsNotFound(err) { - runtime.HandleError(fmt.Errorf("foo '%s' in work queue no longer exists", key)) + utilruntime.HandleError(fmt.Errorf("foo '%s' in work queue no longer exists", key)) return nil } @@ -267,7 +266,7 @@ func (c *Controller) syncHandler(key string) error { // We choose to absorb the error here as the worker would requeue the // resource otherwise. Instead, the next time the resource is updated // the resource will be queued again. - runtime.HandleError(fmt.Errorf("%s: deployment name must be specified", key)) + utilruntime.HandleError(fmt.Errorf("%s: deployment name must be specified", key)) return nil } @@ -340,7 +339,7 @@ func (c *Controller) enqueueFoo(obj interface{}) { var key string var err error if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil { - runtime.HandleError(err) + utilruntime.HandleError(err) return } c.workqueue.AddRateLimited(key) @@ -357,12 +356,12 @@ func (c *Controller) handleObject(obj interface{}) { if object, ok = obj.(metav1.Object); !ok { tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { - runtime.HandleError(fmt.Errorf("error decoding object, invalid type")) + utilruntime.HandleError(fmt.Errorf("error decoding object, invalid type")) return } object, ok = tombstone.Obj.(metav1.Object) if !ok { - runtime.HandleError(fmt.Errorf("error decoding object tombstone, invalid type")) + utilruntime.HandleError(fmt.Errorf("error decoding object tombstone, invalid type")) return } klog.V(4).Infof("Recovered deleted object '%s' from tombstone", object.GetName()) diff --git a/test/OWNERS b/test/OWNERS index ea5cffadabb..832701e5309 100644 --- a/test/OWNERS +++ b/test/OWNERS @@ -27,6 +27,7 @@ reviewers: - zmerlynn - vishh - MaciekPytel # for test/e2e/common/autoscaling_utils.go + - oomichi approvers: - bowei # for test/e2e/{dns*,network}.go - cblecker diff --git a/test/cmd/diff.sh b/test/cmd/diff.sh index 6d0d5a12752..1441f7c4f6e 100755 --- a/test/cmd/diff.sh +++ b/test/cmd/diff.sh @@ -40,3 +40,20 @@ run_kubectl_diff_tests() { set +o nounset set +o errexit } + +run_kubectl_diff_same_names() { + set -o nounset + set -o errexit + + create_and_use_new_namespace + kube::log::status "Test kubectl diff with multiple resources with the same name" + + output_message=$(KUBECTL_EXTERNAL_DIFF=find kubectl diff -Rf hack/testdata/diff/) + kube::test::if_has_string "${output_message}" 'v1\.Pod\..*\.test' + kube::test::if_has_string "${output_message}" 'apps\.v1\.Deployment\..*\.test' + kube::test::if_has_string "${output_message}" 'v1\.ConfigMap\..*\.test' + kube::test::if_has_string "${output_message}" 'v1\.Secret\..*\.test' + + set +o nounset + set +o errexit +} diff --git a/test/cmd/legacy-script.sh b/test/cmd/legacy-script.sh index 49c849e8b37..c838ee4d928 100755 --- a/test/cmd/legacy-script.sh +++ b/test/cmd/legacy-script.sh @@ -473,6 +473,7 @@ runTests() { # Kubectl diff # ################ record_command run_kubectl_diff_tests + record_command run_kubectl_diff_same_names ############### # Kubectl get # diff --git a/test/conformance/testdata/OWNERS b/test/conformance/testdata/OWNERS index f948d2de612..3dfb0c82ba4 100644 --- a/test/conformance/testdata/OWNERS +++ b/test/conformance/testdata/OWNERS @@ -4,6 +4,9 @@ options: reviewers: - bgrant0607 - smarterclayton + - spiffxp + - timothysc + - dims approvers: - bgrant0607 - smarterclayton diff --git a/test/e2e/apimachinery/aggregator.go b/test/e2e/apimachinery/aggregator.go index e3d964ddd3a..8c515047b14 100644 --- a/test/e2e/apimachinery/aggregator.go +++ b/test/e2e/apimachinery/aggregator.go @@ -179,7 +179,7 @@ func TestSampleAPIServer(f *framework.Framework, image string) { // kubectl create -f deploy.yaml deploymentName := "sample-apiserver-deployment" - etcdImage := "quay.io/coreos/etcd:v3.2.24" + etcdImage := "quay.io/coreos/etcd:v3.3.10" podLabels := map[string]string{"app": "sample-apiserver", "apiserver": "true"} replicas := int32(1) zero := int64(0) diff --git a/test/e2e/apps/BUILD b/test/e2e/apps/BUILD index 3ac8ab15068..75b06a53607 100644 --- a/test/e2e/apps/BUILD +++ b/test/e2e/apps/BUILD @@ -36,7 +36,7 @@ go_library( "//pkg/controller/replicaset:go_default_library", "//pkg/controller/replication:go_default_library", "//pkg/master/ports:go_default_library", - "//pkg/scheduler/cache:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/batch/v1:go_default_library", "//staging/src/k8s.io/api/batch/v1beta1:go_default_library", diff --git a/test/e2e/apps/cronjob.go b/test/e2e/apps/cronjob.go index ec2b10a146f..d63f046db22 100644 --- a/test/e2e/apps/cronjob.go +++ b/test/e2e/apps/cronjob.go @@ -250,8 +250,10 @@ var _ = SIGDescribe("CronJob", func() { Expect(len(finishedJobs) == 1).To(BeTrue()) // Job should get deleted when the next job finishes the next minute - By("Ensuring this job does not exist anymore") - err = waitForJobNotExist(f.ClientSet, f.Namespace.Name, finishedJobs[0]) + By("Ensuring this job and its pods does not exist anymore") + err = waitForJobToDisappear(f.ClientSet, f.Namespace.Name, finishedJobs[0]) + Expect(err).NotTo(HaveOccurred()) + err = waitForJobsPodToDisappear(f.ClientSet, f.Namespace.Name, finishedJobs[0]) Expect(err).NotTo(HaveOccurred()) By("Ensuring there is 1 finished job by listing jobs explicitly") @@ -380,8 +382,8 @@ func waitForJobNotActive(c clientset.Interface, ns, cronJobName, jobName string) }) } -// Wait for a job to not exist by listing jobs explicitly. -func waitForJobNotExist(c clientset.Interface, ns string, targetJob *batchv1.Job) error { +// Wait for a job to disappear by listing them explicitly. +func waitForJobToDisappear(c clientset.Interface, ns string, targetJob *batchv1.Job) error { return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) { jobs, err := c.BatchV1().Jobs(ns).List(metav1.ListOptions{}) if err != nil { @@ -397,6 +399,18 @@ func waitForJobNotExist(c clientset.Interface, ns string, targetJob *batchv1.Job }) } +// Wait for a pod to disappear by listing them explicitly. +func waitForJobsPodToDisappear(c clientset.Interface, ns string, targetJob *batchv1.Job) error { + return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) { + options := metav1.ListOptions{LabelSelector: fmt.Sprintf("controller-uid=%s", targetJob.UID)} + pods, err := c.CoreV1().Pods(ns).List(options) + if err != nil { + return false, err + } + return len(pods.Items) == 0, nil + }) +} + // Wait for a job to be replaced with a new one. func waitForJobReplaced(c clientset.Interface, ns, previousJobName string) error { return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) { diff --git a/test/e2e/apps/daemon_set.go b/test/e2e/apps/daemon_set.go index acd62243136..ee4176adc13 100644 --- a/test/e2e/apps/daemon_set.go +++ b/test/e2e/apps/daemon_set.go @@ -35,7 +35,7 @@ import ( podutil "k8s.io/kubernetes/pkg/api/v1/pod" extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/controller/daemon" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" @@ -639,7 +639,7 @@ func checkAtLeastOneNewPod(c clientset.Interface, ns string, label map[string]st // canScheduleOnNode checks if a given DaemonSet can schedule pods on the given node func canScheduleOnNode(node v1.Node, ds *apps.DaemonSet) bool { newPod := daemon.NewPod(ds, node.Name) - nodeInfo := schedulercache.NewNodeInfo() + nodeInfo := schedulernodeinfo.NewNodeInfo() nodeInfo.SetNode(&node) fit, _, err := daemon.Predicates(newPod, nodeInfo) if err != nil { diff --git a/test/e2e/apps/rc.go b/test/e2e/apps/rc.go index 8c127291f64..24b233c1ee5 100644 --- a/test/e2e/apps/rc.go +++ b/test/e2e/apps/rc.go @@ -264,7 +264,7 @@ func testRCAdoptMatchingOrphans(f *framework.Framework) { Containers: []v1.Container{ { Name: name, - Image: NginxImageName, + Image: NginxImage, }, }, }, @@ -272,7 +272,7 @@ func testRCAdoptMatchingOrphans(f *framework.Framework) { By("When a replication controller with a matching selector is created") replicas := int32(1) - rcSt := newRC(name, replicas, map[string]string{"name": name}, name, NginxImageName) + rcSt := newRC(name, replicas, map[string]string{"name": name}, name, NginxImage) rcSt.Spec.Selector = map[string]string{"name": name} rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(rcSt) Expect(err).NotTo(HaveOccurred()) @@ -301,7 +301,7 @@ func testRCReleaseControlledNotMatching(f *framework.Framework) { name := "pod-release" By("Given a ReplicationController is created") replicas := int32(1) - rcSt := newRC(name, replicas, map[string]string{"name": name}, name, NginxImageName) + rcSt := newRC(name, replicas, map[string]string{"name": name}, name, NginxImage) rcSt.Spec.Selector = map[string]string{"name": name} rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(rcSt) Expect(err).NotTo(HaveOccurred()) diff --git a/test/e2e/apps/replica_set.go b/test/e2e/apps/replica_set.go index b41ea595132..170ea910fd4 100644 --- a/test/e2e/apps/replica_set.go +++ b/test/e2e/apps/replica_set.go @@ -271,7 +271,7 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) { Containers: []v1.Container{ { Name: name, - Image: NginxImageName, + Image: NginxImage, }, }, }, @@ -279,7 +279,7 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) { By("When a replicaset with a matching selector is created") replicas := int32(1) - rsSt := newRS(name, replicas, map[string]string{"name": name}, name, NginxImageName) + rsSt := newRS(name, replicas, map[string]string{"name": name}, name, NginxImage) rsSt.Spec.Selector = &metav1.LabelSelector{MatchLabels: map[string]string{"name": name}} rs, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(rsSt) Expect(err).NotTo(HaveOccurred()) diff --git a/test/e2e/apps/types.go b/test/e2e/apps/types.go index ed673b3eb82..0b9d9c70d15 100644 --- a/test/e2e/apps/types.go +++ b/test/e2e/apps/types.go @@ -21,6 +21,7 @@ import ( imageutils "k8s.io/kubernetes/test/utils/image" ) +// NOTE(claudiub): These constants should NOT be used as Pod Container Images. const ( NginxImageName = "nginx" RedisImageName = "redis" diff --git a/test/e2e/autoscaling/cluster_size_autoscaling.go b/test/e2e/autoscaling/cluster_size_autoscaling.go index c84d515a02b..2d2f866997d 100644 --- a/test/e2e/autoscaling/cluster_size_autoscaling.go +++ b/test/e2e/autoscaling/cluster_size_autoscaling.go @@ -349,8 +349,14 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) By("Expect no more scale-up to be happening after all pods are scheduled") - status, err = getScaleUpStatus(c) + + // wait for a while until scale-up finishes; we cannot read CA status immediately + // after pods are scheduled as status config map is updated by CA once every loop iteration + status, err = waitForScaleUpStatus(c, func(s *scaleUpStatus) bool { + return s.status == caNoScaleUpStatus + }, 2*freshStatusLimit) framework.ExpectNoError(err) + if status.target != target { klog.Warningf("Final number of nodes (%v) does not match initial scale-up target (%v).", status.target, target) } @@ -875,6 +881,19 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { clusterSize = manuallyIncreaseClusterSize(f, originalSizes) } + // If new nodes are disconnected too soon, they'll be considered not started + // instead of unready, and cluster won't be considered unhealthy. + // + // More precisely, Cluster Autoscaler compares last transition time of + // several readiness conditions to node create time. If it's within + // 2 minutes, it'll assume node is just starting and not unhealthy. + // + // Nodes become ready in less than 1 minute after being created, + // so waiting extra 2 minutes before breaking them (which triggers + // readiness condition transition) should be sufficient, while + // making no assumptions about minimal node startup time. + time.Sleep(2 * time.Minute) + By("Block network connectivity to some nodes to simulate unhealthy cluster") nodesToBreakCount := int(math.Ceil(math.Max(float64(unhealthyClusterThreshold), 0.5*float64(clusterSize)))) nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{ diff --git a/test/e2e/common/configmap_volume.go b/test/e2e/common/configmap_volume.go index 40cdcd5044c..61c430a3199 100644 --- a/test/e2e/common/configmap_volume.go +++ b/test/e2e/common/configmap_volume.go @@ -18,7 +18,6 @@ package common import ( "fmt" - "os" "path" . "github.com/onsi/ginkgo" @@ -642,17 +641,14 @@ func doConfigMapE2EWithoutMappings(f *framework.Framework, uid, fsGroup int64, d if defaultMode != nil { pod.Spec.Volumes[0].VolumeSource.ConfigMap.DefaultMode = defaultMode - } else { - mode := int32(0644) - defaultMode = &mode } - modeString := fmt.Sprintf("%v", os.FileMode(*defaultMode)) + fileModeRegexp := framework.GetFileModeRegex("/etc/configmap-volume/data-1", defaultMode) output := []string{ "content of file \"/etc/configmap-volume/data-1\": value-1", - "mode of file \"/etc/configmap-volume/data-1\": " + modeString, + fileModeRegexp, } - f.TestContainerOutput("consume configMaps", pod, 0, output) + f.TestContainerOutputRegexp("consume configMaps", pod, 0, output) } func doConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup int64, itemMode *int32) { @@ -728,9 +724,6 @@ func doConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup int64, item if itemMode != nil { pod.Spec.Volumes[0].VolumeSource.ConfigMap.Items[0].Mode = itemMode - } else { - mode := int32(0644) - itemMode = &mode } // Just check file mode if fsGroup is not set. If fsGroup is set, the @@ -739,10 +732,10 @@ func doConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup int64, item "content of file \"/etc/configmap-volume/path/to/data-2\": value-2", } if fsGroup == 0 { - modeString := fmt.Sprintf("%v", os.FileMode(*itemMode)) - output = append(output, "mode of file \"/etc/configmap-volume/path/to/data-2\": "+modeString) + fileModeRegexp := framework.GetFileModeRegex("/etc/configmap-volume/path/to/data-2", itemMode) + output = append(output, fileModeRegexp) } - f.TestContainerOutput("consume configMaps", pod, 0, output) + f.TestContainerOutputRegexp("consume configMaps", pod, 0, output) } func createNonOptionalConfigMapPod(f *framework.Framework, volumeMountPath, podName string) error { diff --git a/test/e2e/common/host_path.go b/test/e2e/common/host_path.go index 2728b6e4c32..fef81301803 100644 --- a/test/e2e/common/host_path.go +++ b/test/e2e/common/host_path.go @@ -49,7 +49,7 @@ var _ = Describe("[sig-storage] HostPath", func() { source := &v1.HostPathVolumeSource{ Path: "/tmp", } - pod := testPodWithHostVol(volumePath, source) + pod := testPodWithHostVol(volumePath, source, false) pod.Spec.Containers[0].Args = []string{ fmt.Sprintf("--fs_type=%v", volumePath), @@ -67,7 +67,7 @@ var _ = Describe("[sig-storage] HostPath", func() { source := &v1.HostPathVolumeSource{ Path: "/tmp", } - pod := testPodWithHostVol(volumePath, source) + pod := testPodWithHostVol(volumePath, source, true) pod.Spec.Containers[0].Args = []string{ fmt.Sprintf("--new_file_0644=%v", filePath), @@ -96,7 +96,7 @@ var _ = Describe("[sig-storage] HostPath", func() { source := &v1.HostPathVolumeSource{ Path: "/tmp", } - pod := testPodWithHostVol(volumePath, source) + pod := testPodWithHostVol(volumePath, source, true) // Write the file in the subPath from container 0 container := &pod.Spec.Containers[0] @@ -135,9 +135,8 @@ func mount(source *v1.HostPathVolumeSource) []v1.Volume { } //TODO: To merge this with the emptyDir tests, we can make source a lambda. -func testPodWithHostVol(path string, source *v1.HostPathVolumeSource) *v1.Pod { +func testPodWithHostVol(path string, source *v1.HostPathVolumeSource, privileged bool) *v1.Pod { podName := "pod-host-path-test" - privileged := true return &v1.Pod{ TypeMeta: metav1.TypeMeta{ diff --git a/test/e2e/common/projected_configmap.go b/test/e2e/common/projected_configmap.go index 2616189d42b..92a92b837d6 100644 --- a/test/e2e/common/projected_configmap.go +++ b/test/e2e/common/projected_configmap.go @@ -18,7 +18,6 @@ package common import ( "fmt" - "os" "path" "k8s.io/api/core/v1" @@ -574,17 +573,14 @@ func doProjectedConfigMapE2EWithoutMappings(f *framework.Framework, uid, fsGroup if defaultMode != nil { //pod.Spec.Volumes[0].VolumeSource.Projected.Sources[0].ConfigMap.DefaultMode = defaultMode pod.Spec.Volumes[0].VolumeSource.Projected.DefaultMode = defaultMode - } else { - mode := int32(0644) - defaultMode = &mode } - modeString := fmt.Sprintf("%v", os.FileMode(*defaultMode)) + fileModeRegexp := framework.GetFileModeRegex("/etc/projected-configmap-volume/data-1", defaultMode) output := []string{ "content of file \"/etc/projected-configmap-volume/data-1\": value-1", - "mode of file \"/etc/projected-configmap-volume/data-1\": " + modeString, + fileModeRegexp, } - f.TestContainerOutput("consume configMaps", pod, 0, output) + f.TestContainerOutputRegexp("consume configMaps", pod, 0, output) } func doProjectedConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup int64, itemMode *int32) { @@ -665,9 +661,6 @@ func doProjectedConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup in if itemMode != nil { //pod.Spec.Volumes[0].VolumeSource.ConfigMap.Items[0].Mode = itemMode pod.Spec.Volumes[0].VolumeSource.Projected.DefaultMode = itemMode - } else { - mode := int32(0644) - itemMode = &mode } // Just check file mode if fsGroup is not set. If fsGroup is set, the @@ -676,8 +669,8 @@ func doProjectedConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup in "content of file \"/etc/projected-configmap-volume/path/to/data-2\": value-2", } if fsGroup == 0 { - modeString := fmt.Sprintf("%v", os.FileMode(*itemMode)) - output = append(output, "mode of file \"/etc/projected-configmap-volume/path/to/data-2\": "+modeString) + fileModeRegexp := framework.GetFileModeRegex("/etc/projected-configmap-volume/path/to/data-2", itemMode) + output = append(output, fileModeRegexp) } - f.TestContainerOutput("consume configMaps", pod, 0, output) + f.TestContainerOutputRegexp("consume configMaps", pod, 0, output) } diff --git a/test/e2e/common/projected_secret.go b/test/e2e/common/projected_secret.go index 4f88da9f3e0..ba3950a484f 100644 --- a/test/e2e/common/projected_secret.go +++ b/test/e2e/common/projected_secret.go @@ -18,7 +18,6 @@ package common import ( "fmt" - "os" "path" "k8s.io/api/core/v1" @@ -193,9 +192,10 @@ var _ = Describe("[sig-storage] Projected secret", func() { }, } - f.TestContainerOutput("consume secrets", pod, 0, []string{ + fileModeRegexp := framework.GetFileModeRegex("/etc/projected-secret-volume/data-1", nil) + f.TestContainerOutputRegexp("consume secrets", pod, 0, []string{ "content of file \"/etc/projected-secret-volume/data-1\": value-1", - "mode of file \"/etc/projected-secret-volume/data-1\": -rw-r--r--", + fileModeRegexp, }) }) @@ -481,9 +481,6 @@ func doProjectedSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int if defaultMode != nil { //pod.Spec.Volumes[0].VolumeSource.Projected.Sources[0].Secret.DefaultMode = defaultMode pod.Spec.Volumes[0].VolumeSource.Projected.DefaultMode = defaultMode - } else { - mode := int32(0644) - defaultMode = &mode } if fsGroup != nil || uid != nil { @@ -493,13 +490,13 @@ func doProjectedSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int } } - modeString := fmt.Sprintf("%v", os.FileMode(*defaultMode)) + fileModeRegexp := framework.GetFileModeRegex("/etc/projected-secret-volume/data-1", defaultMode) expectedOutput := []string{ "content of file \"/etc/projected-secret-volume/data-1\": value-1", - "mode of file \"/etc/projected-secret-volume/data-1\": " + modeString, + fileModeRegexp, } - f.TestContainerOutput("consume secrets", pod, 0, expectedOutput) + f.TestContainerOutputRegexp("consume secrets", pod, 0, expectedOutput) } func doProjectedSecretE2EWithMapping(f *framework.Framework, mode *int32) { @@ -567,16 +564,13 @@ func doProjectedSecretE2EWithMapping(f *framework.Framework, mode *int32) { if mode != nil { //pod.Spec.Volumes[0].VolumeSource.Projected.Sources[0].Secret.Items[0].Mode = mode pod.Spec.Volumes[0].VolumeSource.Projected.DefaultMode = mode - } else { - defaultItemMode := int32(0644) - mode = &defaultItemMode } - modeString := fmt.Sprintf("%v", os.FileMode(*mode)) + fileModeRegexp := framework.GetFileModeRegex("/etc/projected-secret-volume/new-path-data-1", mode) expectedOutput := []string{ "content of file \"/etc/projected-secret-volume/new-path-data-1\": value-1", - "mode of file \"/etc/projected-secret-volume/new-path-data-1\": " + modeString, + fileModeRegexp, } - f.TestContainerOutput("consume secrets", pod, 0, expectedOutput) + f.TestContainerOutputRegexp("consume secrets", pod, 0, expectedOutput) } diff --git a/test/e2e/common/runtime.go b/test/e2e/common/runtime.go index 69b7bbf82f2..7fdb3215896 100644 --- a/test/e2e/common/runtime.go +++ b/test/e2e/common/runtime.go @@ -96,11 +96,6 @@ while true; do sleep 1; done Container: testContainer, RestartPolicy: testCase.RestartPolicy, Volumes: testVolumes, - PodSecurityContext: &v1.PodSecurityContext{ - SELinuxOptions: &v1.SELinuxOptions{ - Level: "s0", - }, - }, } terminateContainer.Create() defer terminateContainer.Delete() diff --git a/test/e2e/common/secrets_volume.go b/test/e2e/common/secrets_volume.go index e700c699e80..f51ff813616 100644 --- a/test/e2e/common/secrets_volume.go +++ b/test/e2e/common/secrets_volume.go @@ -18,7 +18,6 @@ package common import ( "fmt" - "os" "path" "k8s.io/api/core/v1" @@ -182,9 +181,10 @@ var _ = Describe("[sig-storage] Secrets", func() { }, } - f.TestContainerOutput("consume secrets", pod, 0, []string{ + fileModeRegexp := framework.GetFileModeRegex("/etc/secret-volume/data-1", nil) + f.TestContainerOutputRegexp("consume secrets", pod, 0, []string{ "content of file \"/etc/secret-volume/data-1\": value-1", - "mode of file \"/etc/secret-volume/data-1\": -rw-r--r--", + fileModeRegexp, }) }) @@ -451,9 +451,6 @@ func doSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32, secre if defaultMode != nil { pod.Spec.Volumes[0].VolumeSource.Secret.DefaultMode = defaultMode - } else { - mode := int32(0644) - defaultMode = &mode } if fsGroup != nil || uid != nil { @@ -463,13 +460,13 @@ func doSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32, secre } } - modeString := fmt.Sprintf("%v", os.FileMode(*defaultMode)) + fileModeRegexp := framework.GetFileModeRegex("/etc/secret-volume/data-1", defaultMode) expectedOutput := []string{ "content of file \"/etc/secret-volume/data-1\": value-1", - "mode of file \"/etc/secret-volume/data-1\": " + modeString, + fileModeRegexp, } - f.TestContainerOutput("consume secrets", pod, 0, expectedOutput) + f.TestContainerOutputRegexp("consume secrets", pod, 0, expectedOutput) } func doSecretE2EWithMapping(f *framework.Framework, mode *int32) { @@ -528,18 +525,15 @@ func doSecretE2EWithMapping(f *framework.Framework, mode *int32) { if mode != nil { pod.Spec.Volumes[0].VolumeSource.Secret.Items[0].Mode = mode - } else { - defaultItemMode := int32(0644) - mode = &defaultItemMode } - modeString := fmt.Sprintf("%v", os.FileMode(*mode)) + fileModeRegexp := framework.GetFileModeRegex("/etc/secret-volume/new-path-data-1", mode) expectedOutput := []string{ "content of file \"/etc/secret-volume/new-path-data-1\": value-1", - "mode of file \"/etc/secret-volume/new-path-data-1\": " + modeString, + fileModeRegexp, } - f.TestContainerOutput("consume secrets", pod, 0, expectedOutput) + f.TestContainerOutputRegexp("consume secrets", pod, 0, expectedOutput) } func createNonOptionalSecretPod(f *framework.Framework, volumeMountPath, podName string) error { diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index d05483d20c4..9b960fe0c82 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -53,7 +53,8 @@ import ( ) var ( - cloudConfig = &framework.TestContext.CloudConfig + cloudConfig = &framework.TestContext.CloudConfig + nodeKillerStopCh = make(chan struct{}) ) // There are certain operations we only want to run once per overall test invocation @@ -136,6 +137,11 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { // Reference common test to make the import valid. commontest.CurrentSuite = commontest.E2E + if framework.TestContext.NodeKiller.Enabled { + nodeKiller := framework.NewNodeKiller(framework.TestContext.NodeKiller, c, framework.TestContext.Provider) + nodeKillerStopCh = make(chan struct{}) + go nodeKiller.Run(nodeKillerStopCh) + } return nil }, func(data []byte) { @@ -160,6 +166,9 @@ var _ = ginkgo.SynchronizedAfterSuite(func() { framework.Logf("Error gathering metrics: %v", err) } } + if framework.TestContext.NodeKiller.Enabled { + close(nodeKillerStopCh) + } }) func gatherTestSuiteMetrics() error { diff --git a/test/e2e/framework/BUILD b/test/e2e/framework/BUILD index 6a1be393003..ab4868a01d5 100644 --- a/test/e2e/framework/BUILD +++ b/test/e2e/framework/BUILD @@ -65,8 +65,8 @@ go_library( "//pkg/kubelet/util/format:go_default_library", "//pkg/master/ports:go_default_library", "//pkg/scheduler/algorithm/predicates:go_default_library", - "//pkg/scheduler/cache:go_default_library", "//pkg/scheduler/metrics:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", "//pkg/security/podsecuritypolicy/seccomp:go_default_library", "//pkg/ssh:go_default_library", "//pkg/util/system:go_default_library", diff --git a/test/e2e/framework/nodes_util.go b/test/e2e/framework/nodes_util.go index d0731cbff5d..60e238185f4 100644 --- a/test/e2e/framework/nodes_util.go +++ b/test/e2e/framework/nodes_util.go @@ -22,9 +22,12 @@ import ( "path" "path/filepath" "strings" + "sync" "time" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" ) func EtcdUpgrade(target_storage, target_version string) error { @@ -63,7 +66,7 @@ func etcdUpgradeGCE(target_storage, target_version string) error { os.Environ(), "TEST_ETCD_VERSION="+target_version, "STORAGE_BACKEND="+target_storage, - "TEST_ETCD_IMAGE=3.2.24-1") + "TEST_ETCD_IMAGE=3.3.10-0") _, _, err := RunCmdEnv(env, gceUpgradeScript(), "-l", "-M") return err @@ -103,7 +106,7 @@ func masterUpgradeGCE(rawV string, enableKubeProxyDaemonSet bool) error { env = append(env, "TEST_ETCD_VERSION="+TestContext.EtcdUpgradeVersion, "STORAGE_BACKEND="+TestContext.EtcdUpgradeStorage, - "TEST_ETCD_IMAGE=3.2.24-1") + "TEST_ETCD_IMAGE=3.3.10-0") } else { // In e2e tests, we skip the confirmation prompt about // implicit etcd upgrades to simulate the user entering "y". @@ -331,3 +334,63 @@ func waitForSSHTunnels() { return err == nil, nil }) } + +// NodeKiller is a utility to simulate node failures. +type NodeKiller struct { + config NodeKillerConfig + client clientset.Interface + provider string +} + +// NewNodeKiller creates new NodeKiller. +func NewNodeKiller(config NodeKillerConfig, client clientset.Interface, provider string) *NodeKiller { + return &NodeKiller{config, client, provider} +} + +// Run starts NodeKiller until stopCh is closed. +func (k *NodeKiller) Run(stopCh <-chan struct{}) { + // wait.JitterUntil starts work immediately, so wait first. + time.Sleep(wait.Jitter(k.config.Interval, k.config.JitterFactor)) + wait.JitterUntil(func() { + nodes := k.pickNodes() + k.kill(nodes) + }, k.config.Interval, k.config.JitterFactor, true, stopCh) +} + +func (k *NodeKiller) pickNodes() []v1.Node { + nodes := GetReadySchedulableNodesOrDie(k.client) + numNodes := int(k.config.FailureRatio * float64(len(nodes.Items))) + shuffledNodes := shuffleNodes(nodes.Items) + if len(shuffledNodes) > numNodes { + return shuffledNodes[:numNodes] + } + return shuffledNodes +} + +func (k *NodeKiller) kill(nodes []v1.Node) { + wg := sync.WaitGroup{} + wg.Add(len(nodes)) + for _, node := range nodes { + node := node + go func() { + defer wg.Done() + + Logf("Stopping docker and kubelet on %q to simulate failure", node.Name) + err := IssueSSHCommand("sudo systemctl stop docker kubelet", k.provider, &node) + if err != nil { + Logf("ERROR while stopping node %q: %v", node.Name, err) + return + } + + time.Sleep(k.config.SimulatedDowntime) + + Logf("Rebooting %q to repair the node", node.Name) + err = IssueSSHCommand("sudo reboot", k.provider, &node) + if err != nil { + Logf("ERROR while rebooting node %q: %v", node.Name, err) + return + } + }() + } + wg.Wait() +} diff --git a/test/e2e/framework/podlogs/podlogs.go b/test/e2e/framework/podlogs/podlogs.go index 77fae293af0..cf24571d963 100644 --- a/test/e2e/framework/podlogs/podlogs.go +++ b/test/e2e/framework/podlogs/podlogs.go @@ -99,9 +99,15 @@ func CopyAllLogs(ctx context.Context, cs clientset.Interface, ns string, to LogO } for _, pod := range pods.Items { - for _, c := range pod.Spec.Containers { + for i, c := range pod.Spec.Containers { name := pod.ObjectMeta.Name + "/" + c.Name - if logging[name] { + if logging[name] || + // sanity check, array should have entry for each container + len(pod.Status.ContainerStatuses) <= i || + // Don't attempt to get logs for a container unless it is running or has terminated. + // Trying to get a log would just end up with an error that we would have to suppress. + (pod.Status.ContainerStatuses[i].State.Running == nil && + pod.Status.ContainerStatuses[i].State.Terminated == nil) { continue } readCloser, err := LogsForPod(ctx, cs, ns, pod.ObjectMeta.Name, diff --git a/test/e2e/framework/service_util.go b/test/e2e/framework/service_util.go index 9a30001760c..394958d0f68 100644 --- a/test/e2e/framework/service_util.go +++ b/test/e2e/framework/service_util.go @@ -25,7 +25,7 @@ import ( "strings" "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" policyv1beta1 "k8s.io/api/policy/v1beta1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -109,6 +109,14 @@ type ServiceTestJig struct { Labels map[string]string } +// PodNode is a pod-node pair indicating which node a given pod is running on +type PodNode struct { + // Pod represents pod name + Pod string + // Node represents node name + Node string +} + // NewServiceTestJig allocates and inits a new ServiceTestJig. func NewServiceTestJig(client clientset.Interface, name string) *ServiceTestJig { j := &ServiceTestJig{} @@ -348,6 +356,25 @@ func PickNodeIP(c clientset.Interface) string { return ip } +// PodNodePairs return PodNode pairs for all pods in a namespace +func PodNodePairs(c clientset.Interface, ns string) ([]PodNode, error) { + var result []PodNode + + podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{}) + if err != nil { + return result, err + } + + for _, pod := range podList.Items { + result = append(result, PodNode{ + Pod: pod.Name, + Node: pod.Spec.NodeName, + }) + } + + return result, nil +} + // GetEndpointNodes returns a map of nodenames:external-ip on which the // endpoints of the given Service are running. func (j *ServiceTestJig) GetEndpointNodes(svc *v1.Service) map[string][]string { diff --git a/test/e2e/framework/test_context.go b/test/e2e/framework/test_context.go index 7ac659465a6..58df02d4371 100644 --- a/test/e2e/framework/test_context.go +++ b/test/e2e/framework/test_context.go @@ -115,6 +115,7 @@ type TestContextType struct { GatherLogsSizes bool GatherMetricsAfterTest string GatherSuiteMetricsAfterTest bool + MaxNodesToGather int AllowGatheringProfiles bool // If set to 'true' framework will gather ClusterAutoscaler metrics when gathering them for other components. IncludeClusterAutoscalerMetrics bool @@ -148,6 +149,26 @@ type TestContextType struct { // The DNS Domain of the cluster. ClusterDNSDomain string + + // The configration of NodeKiller. + NodeKiller NodeKillerConfig +} + +// NodeKillerConfig describes configuration of NodeKiller -- a utility to +// simulate node failures. +type NodeKillerConfig struct { + // Enabled determines whether NodeKill should do anything at all. + // All other options below are ignored if Enabled = false. + Enabled bool + // FailureRatio is a percentage of all nodes that could fail simultinously. + FailureRatio float64 + // Interval is time between node failures. + Interval time.Duration + // JitterFactor is factor used to jitter node failures. + // Node will be killed between [Interval, Interval + (1.0 + JitterFactor)]. + JitterFactor float64 + // SimulatedDowntime is a duration between node is killed and recreated. + SimulatedDowntime time.Duration } // NodeTestContextType is part of TestContextType, it is shared by all node e2e test. @@ -206,6 +227,7 @@ func RegisterCommonFlags() { flag.StringVar(&TestContext.GatherKubeSystemResourceUsageData, "gather-resource-usage", "false", "If set to 'true' or 'all' framework will be monitoring resource usage of system all add-ons in (some) e2e tests, if set to 'master' framework will be monitoring master node only, if set to 'none' of 'false' monitoring will be turned off.") flag.BoolVar(&TestContext.GatherLogsSizes, "gather-logs-sizes", false, "If set to true framework will be monitoring logs sizes on all machines running e2e tests.") + flag.IntVar(&TestContext.MaxNodesToGather, "max-nodes-to-gather-from", 20, "The maximum number of nodes to gather extended info from on test failure.") flag.StringVar(&TestContext.GatherMetricsAfterTest, "gather-metrics-at-teardown", "false", "If set to 'true' framework will gather metrics from all components after each test. If set to 'master' only master component metrics would be gathered.") flag.BoolVar(&TestContext.GatherSuiteMetricsAfterTest, "gather-suite-metrics-at-teardown", false, "If set to true framwork will gather metrics from all components after the whole test suite completes.") flag.BoolVar(&TestContext.AllowGatheringProfiles, "allow-gathering-profiles", true, "If set to true framework will allow to gather CPU/memory allocation pprof profiles from the master.") @@ -281,6 +303,13 @@ func RegisterClusterFlags() { flag.StringVar(&TestContext.IngressUpgradeImage, "ingress-upgrade-image", "", "Image to upgrade to if doing an upgrade test for ingress.") flag.StringVar(&TestContext.GCEUpgradeScript, "gce-upgrade-script", "", "Script to use to upgrade a GCE cluster.") flag.BoolVar(&TestContext.CleanStart, "clean-start", false, "If true, purge all namespaces except default and system before running tests. This serves to Cleanup test namespaces from failed/interrupted e2e runs in a long-lived cluster.") + + nodeKiller := &TestContext.NodeKiller + flag.BoolVar(&nodeKiller.Enabled, "node-killer", false, "Whether NodeKiller should kill any nodes.") + flag.Float64Var(&nodeKiller.FailureRatio, "node-killer-failure-ratio", 0.01, "Percentage of nodes to be killed") + flag.DurationVar(&nodeKiller.Interval, "node-killer-interval", 1*time.Minute, "Time between node failures.") + flag.Float64Var(&nodeKiller.JitterFactor, "node-killer-jitter-factor", 60, "Factor used to jitter node failures.") + flag.DurationVar(&nodeKiller.SimulatedDowntime, "node-killer-simulated-downtime", 10*time.Minute, "A delay between node death and recreation") } // Register flags specific to the node e2e test suite. @@ -315,7 +344,8 @@ func createKubeConfig(clientCfg *restclient.Config) *clientcmdapi.Config { config := clientcmdapi.NewConfig() credentials := clientcmdapi.NewAuthInfo() - credentials.TokenFile = "/var/run/secrets/kubernetes.io/serviceaccount/token" + credentials.Token = clientCfg.BearerToken + credentials.TokenFile = clientCfg.BearerTokenFile credentials.ClientCertificate = clientCfg.TLSClientConfig.CertFile if len(credentials.ClientCertificate) == 0 { credentials.ClientCertificateData = clientCfg.TLSClientConfig.CertData diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 59d58947f62..1d3f7df7464 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -91,7 +91,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" sshutil "k8s.io/kubernetes/pkg/ssh" "k8s.io/kubernetes/pkg/util/system" taintutils "k8s.io/kubernetes/pkg/util/taints" @@ -1052,6 +1052,25 @@ func WaitForPersistentVolumeClaimsPhase(phase v1.PersistentVolumeClaimPhase, c c return fmt.Errorf("PersistentVolumeClaims %v not all in phase %s within %v", pvcNames, phase, timeout) } +// findAvailableNamespaceName random namespace name starting with baseName. +func findAvailableNamespaceName(baseName string, c clientset.Interface) (string, error) { + var name string + err := wait.PollImmediate(Poll, 30*time.Second, func() (bool, error) { + name = fmt.Sprintf("%v-%v", baseName, randomSuffix()) + _, err := c.CoreV1().Namespaces().Get(name, metav1.GetOptions{}) + if err == nil { + // Already taken + return false, nil + } + if apierrs.IsNotFound(err) { + return true, nil + } + Logf("Unexpected error while getting namespace: %v", err) + return false, nil + }) + return name, err +} + // CreateTestingNS should be used by every test, note that we append a common prefix to the provided test name. // Please see NewFramework instead of using this directly. func CreateTestingNS(baseName string, c clientset.Interface, labels map[string]string) (*v1.Namespace, error) { @@ -1060,11 +1079,19 @@ func CreateTestingNS(baseName string, c clientset.Interface, labels map[string]s } labels["e2e-run"] = string(RunId) + // We don't use ObjectMeta.GenerateName feature, as in case of API call + // failure we don't know whether the namespace was created and what is its + // name. + name, err := findAvailableNamespaceName(baseName, c) + if err != nil { + return nil, err + } + namespaceObj := &v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ - GenerateName: fmt.Sprintf("e2e-tests-%v-", baseName), - Namespace: "", - Labels: labels, + Name: name, + Namespace: "", + Labels: labels, }, Status: v1.NamespaceStatus{}, } @@ -1496,10 +1523,12 @@ func eventOccurred(c clientset.Interface, podName, namespace, eventSelector, msg if err != nil { return false, fmt.Errorf("got error while getting pod events: %s", err) } - if len(events.Items) == 0 { - return false, nil // no events have occurred yet + for _, event := range events.Items { + if strings.Contains(event.Message, msg) { + return true, nil + } } - return strings.Contains(events.Items[0].Message, msg), nil + return false, nil } } @@ -2489,7 +2518,7 @@ func DumpAllNamespaceInfo(c clientset.Interface, namespace string) { // 1. it takes tens of minutes or hours to grab all of them // 2. there are so many of them that working with them are mostly impossible // So we dump them only if the cluster is relatively small. - maxNodesForDump := 20 + maxNodesForDump := TestContext.MaxNodesToGather if nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}); err == nil { if len(nodes.Items) <= maxNodesForDump { dumpAllPodInfo(c) @@ -2653,7 +2682,7 @@ func isNodeUntainted(node *v1.Node) bool { }, }, } - nodeInfo := schedulercache.NewNodeInfo() + nodeInfo := schedulernodeinfo.NewNodeInfo() nodeInfo.SetNode(node) fit, _, err := predicates.PodToleratesNodeTaints(fakePod, nil, nodeInfo) if err != nil { @@ -2732,7 +2761,7 @@ func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) er // However, we only allow non-ready nodes with some specific reasons. if len(notSchedulable) > 0 { // In large clusters, log them only every 10th pass. - if len(nodes.Items) >= largeClusterThreshold && attempt%10 == 0 { + if len(nodes.Items) < largeClusterThreshold || attempt%10 == 0 { Logf("Unschedulable nodes:") for i := range notSchedulable { Logf("-> %s Ready=%t Network=%t Taints=%v", @@ -3599,44 +3628,43 @@ func DeletePodOrFail(c clientset.Interface, ns, name string) { // GetSigner returns an ssh.Signer for the provider ("gce", etc.) that can be // used to SSH to their nodes. func GetSigner(provider string) (ssh.Signer, error) { - // Get the directory in which SSH keys are located. - keydir := filepath.Join(os.Getenv("HOME"), ".ssh") - // Select the key itself to use. When implementing more providers here, // please also add them to any SSH tests that are disabled because of signer // support. keyfile := "" - key := "" switch provider { case "gce", "gke", "kubemark": - keyfile = "google_compute_engine" - case "aws": - // If there is an env. variable override, use that. - aws_keyfile := os.Getenv("AWS_SSH_KEY") - if len(aws_keyfile) != 0 { - return sshutil.MakePrivateKeySignerFromFile(aws_keyfile) + keyfile = os.Getenv("GCE_SSH_KEY") + if keyfile == "" { + keyfile = "google_compute_engine" + } + case "aws": + keyfile = os.Getenv("AWS_SSH_KEY") + if keyfile == "" { + keyfile = "kube_aws_rsa" } - // Otherwise revert to home dir - keyfile = "kube_aws_rsa" case "local", "vsphere": - keyfile = os.Getenv("LOCAL_SSH_KEY") // maybe? - if len(keyfile) == 0 { + keyfile = os.Getenv("LOCAL_SSH_KEY") + if keyfile == "" { keyfile = "id_rsa" } case "skeleton": keyfile = os.Getenv("KUBE_SSH_KEY") - if len(keyfile) == 0 { + if keyfile == "" { keyfile = "id_rsa" } default: return nil, fmt.Errorf("GetSigner(...) not implemented for %s", provider) } - if len(key) == 0 { - key = filepath.Join(keydir, keyfile) + // Respect absolute paths for keys given by user, fallback to assuming + // relative paths are in ~/.ssh + if !filepath.IsAbs(keyfile) { + keydir := filepath.Join(os.Getenv("HOME"), ".ssh") + keyfile = filepath.Join(keydir, keyfile) } - return sshutil.MakePrivateKeySignerFromFile(key) + return sshutil.MakePrivateKeySignerFromFile(keyfile) } // CheckPodsRunningReady returns whether all pods whose names are listed in @@ -5270,3 +5298,24 @@ func WaitForNodeHasTaintOrNot(c clientset.Interface, nodeName string, taint *v1. } return nil } + +// GetFileModeRegex returns a file mode related regex which should be matched by the mounttest pods' output. +// If the given mask is nil, then the regex will contain the default OS file modes, which are 0644 for Linux and 0775 for Windows. +func GetFileModeRegex(filePath string, mask *int32) string { + var ( + linuxMask int32 + windowsMask int32 + ) + if mask == nil { + linuxMask = int32(0644) + windowsMask = int32(0775) + } else { + linuxMask = *mask + windowsMask = *mask + } + + linuxOutput := fmt.Sprintf("mode of file \"%s\": %v", filePath, os.FileMode(linuxMask)) + windowsOutput := fmt.Sprintf("mode of Windows file \"%v\": %s", filePath, os.FileMode(windowsMask)) + + return fmt.Sprintf("(%s|%s)", linuxOutput, windowsOutput) +} diff --git a/test/e2e/instrumentation/logging/OWNERS b/test/e2e/instrumentation/logging/OWNERS index 1a6a1d51b32..c76a20b5677 100644 --- a/test/e2e/instrumentation/logging/OWNERS +++ b/test/e2e/instrumentation/logging/OWNERS @@ -1,6 +1,8 @@ reviewers: - coffeepac + - monotek - piosz approvers: - coffeepac + - monotek - piosz diff --git a/test/e2e/network/dns_common.go b/test/e2e/network/dns_common.go index ca1dc35283d..c93c431a3cd 100644 --- a/test/e2e/network/dns_common.go +++ b/test/e2e/network/dns_common.go @@ -22,9 +22,9 @@ import ( "strings" "time" + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/intstr" @@ -494,7 +494,7 @@ func createProbeCommand(namesToResolve []string, hostEntries []string, ptrLookup // createTargetedProbeCommand returns a command line that performs a DNS lookup for a specific record type func createTargetedProbeCommand(nameToResolve string, lookup string, fileNamePrefix string) (string, string) { fileName := fmt.Sprintf("%s_udp@%s", fileNamePrefix, nameToResolve) - probeCmd := fmt.Sprintf("dig +short +tries=12 +norecurse %s %s > /results/%s", nameToResolve, lookup, fileName) + probeCmd := fmt.Sprintf("dig +short +tries=12 %s %s > /results/%s", nameToResolve, lookup, fileName) return probeCmd, fileName } diff --git a/test/e2e/network/firewall.go b/test/e2e/network/firewall.go index ee3839f16d2..32e4c1ace1c 100644 --- a/test/e2e/network/firewall.go +++ b/test/e2e/network/firewall.go @@ -131,7 +131,7 @@ var _ = SIGDescribe("Firewall rule", func() { // Send requests from outside of the cluster because internal traffic is whitelisted By("Accessing the external service ip from outside, all non-master nodes should be reached") - Expect(framework.TestHitNodesFromOutside(svcExternalIP, gce.FirewallTestHttpPort, gce.FirewallTimeoutDefault, nodesSet)).NotTo(HaveOccurred()) + Expect(framework.TestHitNodesFromOutside(svcExternalIP, gce.FirewallTestHttpPort, framework.LoadBalancerCreateTimeoutDefault, nodesSet)).NotTo(HaveOccurred()) // Check if there are overlapping tags on the firewall that extend beyond just the vms in our cluster // by removing the tag on one vm and make sure it doesn't get any traffic. This is an imperfect @@ -151,11 +151,11 @@ var _ = SIGDescribe("Firewall rule", func() { nodesSet.Insert(nodesNames[0]) gce.SetInstanceTags(cloudConfig, nodesNames[0], zone, removedTags) // Make sure traffic is recovered before exit - Expect(framework.TestHitNodesFromOutside(svcExternalIP, gce.FirewallTestHttpPort, gce.FirewallTimeoutDefault, nodesSet)).NotTo(HaveOccurred()) + Expect(framework.TestHitNodesFromOutside(svcExternalIP, gce.FirewallTestHttpPort, framework.LoadBalancerCreateTimeoutDefault, nodesSet)).NotTo(HaveOccurred()) }() By("Accessing serivce through the external ip and examine got no response from the node without tags") - Expect(framework.TestHitNodesFromOutsideWithCount(svcExternalIP, gce.FirewallTestHttpPort, gce.FirewallTimeoutDefault, nodesSet, 15)).NotTo(HaveOccurred()) + Expect(framework.TestHitNodesFromOutsideWithCount(svcExternalIP, gce.FirewallTestHttpPort, framework.LoadBalancerCreateTimeoutDefault, nodesSet, 15)).NotTo(HaveOccurred()) }) It("should have correct firewall rules for e2e cluster", func() { diff --git a/test/e2e/network/ingress.go b/test/e2e/network/ingress.go index c7377ecd597..6a9a98b5485 100644 --- a/test/e2e/network/ingress.go +++ b/test/e2e/network/ingress.go @@ -1083,7 +1083,8 @@ func detectHttpVersionAndSchemeTest(f *framework.Framework, jig *ingress.Ingress timeoutClient := &http.Client{Timeout: ingress.IngressReqTimeout} resp := "" err := wait.PollImmediate(framework.LoadBalancerPollInterval, framework.LoadBalancerPollTimeout, func() (bool, error) { - resp, err := framework.SimpleGET(timeoutClient, fmt.Sprintf("http://%s", address), "") + var err error + resp, err = framework.SimpleGET(timeoutClient, fmt.Sprintf("http://%s", address), "") if err != nil { framework.Logf("SimpleGET failed: %v", err) return false, nil diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go index a1b85257b8a..818d3a5cf54 100644 --- a/test/e2e/network/service.go +++ b/test/e2e/network/service.go @@ -27,7 +27,7 @@ import ( compute "google.golang.org/api/compute/v1" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/intstr" @@ -2111,6 +2111,8 @@ func execAffinityTestForLBService(f *framework.Framework, cs clientset.Interface svc = jig.WaitForLoadBalancerOrFail(ns, serviceName, framework.LoadBalancerCreateTimeoutDefault) jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer) defer func() { + podNodePairs, err := framework.PodNodePairs(cs, ns) + framework.Logf("[pod,node] pairs: %+v; err: %v", podNodePairs, err) framework.StopServeHostnameService(cs, ns, serviceName) lb := cloudprovider.DefaultLoadBalancerName(svc) framework.Logf("cleaning load balancer resource for %s", lb) diff --git a/test/e2e/scalability/BUILD b/test/e2e/scalability/BUILD index a87ebd8c347..0d7e0e47f69 100644 --- a/test/e2e/scalability/BUILD +++ b/test/e2e/scalability/BUILD @@ -3,6 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ + "common.go", "density.go", "framework.go", "load.go", diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/generated_expansion.go b/test/e2e/scalability/common.go similarity index 72% rename from staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/generated_expansion.go rename to test/e2e/scalability/common.go index c3270941d4d..4fac86cb03d 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/generated_expansion.go +++ b/test/e2e/scalability/common.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,10 +14,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by client-gen. DO NOT EDIT. +package scalability -package internalversion +import "time" -type FischerExpansion interface{} - -type FlunderExpansion interface{} +const ( + // UnreadyNodeToleration denotes time the node can be unreachable/not ready. + UnreadyNodeToleration = 15 * time.Minute +) diff --git a/test/e2e/scalability/density.go b/test/e2e/scalability/density.go index 3d4d9a6e21e..9a2e64256c3 100644 --- a/test/e2e/scalability/density.go +++ b/test/e2e/scalability/density.go @@ -637,7 +637,11 @@ var _ = SIGDescribe("Density", func() { // Since all RCs are created at the same time, timeout for each config // has to assume that it will be run at the very end. podThroughput := 20 - timeout := time.Duration(totalPods/podThroughput)*time.Second + 3*time.Minute + timeout := time.Duration(totalPods/podThroughput) * time.Second + if timeout < UnreadyNodeToleration { + timeout = UnreadyNodeToleration + } + timeout += 3 * time.Minute // createClients is defined in load.go clients, internalClients, scalesClients, err := createClients(numberOfCollections) framework.ExpectNoError(err) @@ -688,6 +692,19 @@ var _ = SIGDescribe("Density", func() { SecretNames: secretNames, ConfigMapNames: configMapNames, ServiceAccountTokenProjections: itArg.svcacctTokenProjectionsPerPod, + Tolerations: []v1.Toleration{ + { + Key: "node.kubernetes.io/not-ready", + Operator: v1.TolerationOpExists, + Effect: v1.TaintEffectNoExecute, + TolerationSeconds: func(i int64) *int64 { return &i }(int64(UnreadyNodeToleration / time.Second)), + }, { + Key: "node.kubernetes.io/unreachable", + Operator: v1.TolerationOpExists, + Effect: v1.TaintEffectNoExecute, + TolerationSeconds: func(i int64) *int64 { return &i }(int64(UnreadyNodeToleration / time.Second)), + }, + }, } switch itArg.kind { case api.Kind("ReplicationController"): diff --git a/test/e2e/scalability/load.go b/test/e2e/scalability/load.go index 78d3d45c55c..5e2a95e2d9c 100644 --- a/test/e2e/scalability/load.go +++ b/test/e2e/scalability/load.go @@ -541,7 +541,7 @@ func GenerateConfigsForGroup( InternalClient: nil, // this will be overwritten later Name: groupName + "-" + strconv.Itoa(i), Namespace: namespace, - Timeout: 10 * time.Minute, + Timeout: UnreadyNodeToleration, Image: image, Command: command, Replicas: size, @@ -551,6 +551,19 @@ func GenerateConfigsForGroup( ConfigMapNames: configMapNames, // Define a label to group every 2 RCs into one service. Labels: map[string]string{svcLabelKey: groupName + "-" + strconv.Itoa((i+1)/2)}, + Tolerations: []v1.Toleration{ + { + Key: "node.kubernetes.io/not-ready", + Operator: v1.TolerationOpExists, + Effect: v1.TaintEffectNoExecute, + TolerationSeconds: func(i int64) *int64 { return &i }(int64(UnreadyNodeToleration / time.Second)), + }, { + Key: "node.kubernetes.io/unreachable", + Operator: v1.TolerationOpExists, + Effect: v1.TaintEffectNoExecute, + TolerationSeconds: func(i int64) *int64 { return &i }(int64(UnreadyNodeToleration / time.Second)), + }, + }, } if kind == randomKind { diff --git a/test/e2e/scheduling/BUILD b/test/e2e/scheduling/BUILD index 9da465bf82c..33eaa147b54 100644 --- a/test/e2e/scheduling/BUILD +++ b/test/e2e/scheduling/BUILD @@ -27,6 +27,7 @@ go_library( "//pkg/quota/v1/evaluator/core:go_default_library", "//pkg/scheduler/algorithm/priorities/util:go_default_library", "//pkg/scheduler/api:go_default_library", + "//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/extensions/v1beta1:go_default_library", "//staging/src/k8s.io/api/scheduling/v1beta1:go_default_library", diff --git a/test/e2e/scheduling/preemption.go b/test/e2e/scheduling/preemption.go index f4d57dde116..c1f7395bdf3 100644 --- a/test/e2e/scheduling/preemption.go +++ b/test/e2e/scheduling/preemption.go @@ -18,13 +18,19 @@ package scheduling import ( "fmt" + "strings" "time" + "k8s.io/client-go/tools/cache" + + appsv1 "k8s.io/api/apps/v1" "k8s.io/api/core/v1" schedulerapi "k8s.io/api/scheduling/v1beta1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/pkg/apis/scheduling" "k8s.io/kubernetes/test/e2e/framework" @@ -34,6 +40,11 @@ import ( _ "github.com/stretchr/testify/assert" ) +type priorityPair struct { + name string + value int32 +} + var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { var cs clientset.Interface var nodeList *v1.NodeList @@ -44,24 +55,31 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { lowPriorityClassName := f.BaseName + "-low-priority" mediumPriorityClassName := f.BaseName + "-medium-priority" highPriorityClassName := f.BaseName + "-high-priority" + priorityPairs := []priorityPair{ + {name: lowPriorityClassName, value: lowPriority}, + {name: mediumPriorityClassName, value: mediumPriority}, + {name: highPriorityClassName, value: highPriority}, + } + AfterEach(func() { + for _, pair := range priorityPairs { + cs.SchedulingV1beta1().PriorityClasses().Delete(pair.name, metav1.NewDeleteOptions(0)) + } }) BeforeEach(func() { cs = f.ClientSet ns = f.Namespace.Name nodeList = &v1.NodeList{} - _, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}) - Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true)) - _, err = f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: mediumPriorityClassName}, Value: mediumPriority}) - Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true)) - _, err = f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: lowPriorityClassName}, Value: lowPriority}) - Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true)) + for _, pair := range priorityPairs { + _, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: pair.name}, Value: pair.value}) + Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true)) + } framework.WaitForAllNodesHealthy(cs, time.Minute) masterNodes, nodeList = framework.GetMasterAndWorkerNodesOrDie(cs) - err = framework.CheckTestingNSDeletedExcept(cs, ns) + err := framework.CheckTestingNSDeletedExcept(cs, ns) framework.ExpectNoError(err) }) @@ -353,3 +371,233 @@ var _ = SIGDescribe("PodPriorityResolution [Serial]", func() { } }) }) + +// construct a fakecpu so as to set it to status of Node object +// otherwise if we update CPU/Memory/etc, those values will be corrected back by kubelet +var fakecpu v1.ResourceName = "example.com/fakecpu" + +var _ = SIGDescribe("PreemptionExecutionPath", func() { + var cs clientset.Interface + var node *v1.Node + var ns string + f := framework.NewDefaultFramework("sched-preemption-path") + + priorityPairs := make([]priorityPair, 0) + + AfterEach(func() { + if node != nil { + nodeCopy := node.DeepCopy() + // force it to update + nodeCopy.ResourceVersion = "0" + delete(nodeCopy.Status.Capacity, fakecpu) + _, err := cs.CoreV1().Nodes().UpdateStatus(nodeCopy) + framework.ExpectNoError(err) + } + for _, pair := range priorityPairs { + cs.SchedulingV1beta1().PriorityClasses().Delete(pair.name, metav1.NewDeleteOptions(0)) + } + }) + + BeforeEach(func() { + cs = f.ClientSet + ns = f.Namespace.Name + + // find an available node + By("Finding an available node") + nodeName := GetNodeThatCanRunPod(f) + framework.Logf("found a healthy node: %s", nodeName) + + // get the node API object + var err error + node, err = cs.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + if err != nil { + framework.Failf("error getting node %q: %v", nodeName, err) + } + + // update Node API object with a fake resource + nodeCopy := node.DeepCopy() + // force it to update + nodeCopy.ResourceVersion = "0" + nodeCopy.Status.Capacity[fakecpu] = resource.MustParse("800") + node, err = cs.CoreV1().Nodes().UpdateStatus(nodeCopy) + framework.ExpectNoError(err) + + // create four PriorityClass: p1, p2, p3, p4 + for i := 1; i <= 4; i++ { + priorityName := fmt.Sprintf("p%d", i) + priorityVal := int32(i) + priorityPairs = append(priorityPairs, priorityPair{name: priorityName, value: priorityVal}) + _, err := cs.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: priorityName}, Value: priorityVal}) + Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true)) + } + }) + + It("runs ReplicaSets to verify preemption running path", func() { + podNamesSeen := make(map[string]struct{}) + stopCh := make(chan struct{}) + + // create a pod controller to list/watch pod events from the test framework namespace + _, podController := cache.NewInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + obj, err := f.ClientSet.CoreV1().Pods(ns).List(options) + return runtime.Object(obj), err + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return f.ClientSet.CoreV1().Pods(ns).Watch(options) + }, + }, + &v1.Pod{}, + 0, + cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + if pod, ok := obj.(*v1.Pod); ok { + podNamesSeen[pod.Name] = struct{}{} + } + }, + }, + ) + go podController.Run(stopCh) + defer close(stopCh) + + // prepare four ReplicaSet + rsConfs := []pauseRSConfig{ + { + Replicas: int32(5), + PodConfig: pausePodConfig{ + Name: "pod1", + Namespace: ns, + Labels: map[string]string{"name": "pod1"}, + PriorityClassName: "p1", + NodeSelector: map[string]string{"kubernetes.io/hostname": node.Name}, + Resources: &v1.ResourceRequirements{ + Requests: v1.ResourceList{fakecpu: resource.MustParse("40")}, + Limits: v1.ResourceList{fakecpu: resource.MustParse("40")}, + }, + }, + }, + { + Replicas: int32(4), + PodConfig: pausePodConfig{ + Name: "pod2", + Namespace: ns, + Labels: map[string]string{"name": "pod2"}, + PriorityClassName: "p2", + NodeSelector: map[string]string{"kubernetes.io/hostname": node.Name}, + Resources: &v1.ResourceRequirements{ + Requests: v1.ResourceList{fakecpu: resource.MustParse("50")}, + Limits: v1.ResourceList{fakecpu: resource.MustParse("50")}, + }, + }, + }, + { + Replicas: int32(4), + PodConfig: pausePodConfig{ + Name: "pod3", + Namespace: ns, + Labels: map[string]string{"name": "pod3"}, + PriorityClassName: "p3", + NodeSelector: map[string]string{"kubernetes.io/hostname": node.Name}, + Resources: &v1.ResourceRequirements{ + Requests: v1.ResourceList{fakecpu: resource.MustParse("95")}, + Limits: v1.ResourceList{fakecpu: resource.MustParse("95")}, + }, + }, + }, + { + Replicas: int32(1), + PodConfig: pausePodConfig{ + Name: "pod4", + Namespace: ns, + Labels: map[string]string{"name": "pod4"}, + PriorityClassName: "p4", + NodeSelector: map[string]string{"kubernetes.io/hostname": node.Name}, + Resources: &v1.ResourceRequirements{ + Requests: v1.ResourceList{fakecpu: resource.MustParse("400")}, + Limits: v1.ResourceList{fakecpu: resource.MustParse("400")}, + }, + }, + }, + } + // create ReplicaSet{1,2,3} so as to occupy 780/800 fake resource + rsNum := len(rsConfs) + for i := 0; i < rsNum-1; i++ { + runPauseRS(f, rsConfs[i]) + } + + framework.Logf("pods created so far: %v", podNamesSeen) + framework.Logf("length of pods created so far: %v", len(podNamesSeen)) + + // create ReplicaSet4 + // if runPauseRS failed, it means ReplicaSet4 cannot be scheduled even after 1 minute + // which is unacceptable + runPauseRS(f, rsConfs[rsNum-1]) + + framework.Logf("pods created so far: %v", podNamesSeen) + framework.Logf("length of pods created so far: %v", len(podNamesSeen)) + + // count pods number of ReplicaSet{1,2,3}, if it's more than expected replicas + // then it denotes its pods have been over-preempted + // "*2" means pods of ReplicaSet{1,2} are expected to be only preempted once + maxRSPodsSeen := []int{5 * 2, 4 * 2, 4} + rsPodsSeen := []int{0, 0, 0} + for podName := range podNamesSeen { + if strings.HasPrefix(podName, "rs-pod1") { + rsPodsSeen[0]++ + } else if strings.HasPrefix(podName, "rs-pod2") { + rsPodsSeen[1]++ + } else if strings.HasPrefix(podName, "rs-pod3") { + rsPodsSeen[2]++ + } + } + for i, got := range rsPodsSeen { + expected := maxRSPodsSeen[i] + if got > expected { + framework.Failf("pods of ReplicaSet%d have been over-preempted: expect %v pod names, but got %d", i+1, expected, got) + } + } + }) + +}) + +type pauseRSConfig struct { + Replicas int32 + PodConfig pausePodConfig +} + +func initPauseRS(f *framework.Framework, conf pauseRSConfig) *appsv1.ReplicaSet { + pausePod := initPausePod(f, conf.PodConfig) + pauseRS := &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "rs-" + pausePod.Name, + Namespace: pausePod.Namespace, + }, + Spec: appsv1.ReplicaSetSpec{ + Replicas: &conf.Replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: pausePod.Labels, + }, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: pausePod.ObjectMeta.Labels}, + Spec: pausePod.Spec, + }, + }, + } + return pauseRS +} + +func createPauseRS(f *framework.Framework, conf pauseRSConfig) *appsv1.ReplicaSet { + namespace := conf.PodConfig.Namespace + if len(namespace) == 0 { + namespace = f.Namespace.Name + } + rs, err := f.ClientSet.AppsV1().ReplicaSets(namespace).Create(initPauseRS(f, conf)) + framework.ExpectNoError(err) + return rs +} + +func runPauseRS(f *framework.Framework, conf pauseRSConfig) *appsv1.ReplicaSet { + rs := createPauseRS(f, conf) + framework.ExpectNoError(framework.WaitForReplicaSetTargetAvailableReplicas(f.ClientSet, rs, conf.Replicas)) + return rs +} diff --git a/test/e2e/scheduling/taints.go b/test/e2e/scheduling/taints.go index c74a2dfb0db..579b8ed3bc8 100644 --- a/test/e2e/scheduling/taints.go +++ b/test/e2e/scheduling/taints.go @@ -45,7 +45,7 @@ func getTestTaint() v1.Taint { } } -// Creates a defaut pod for this test, with argument saying if the Pod should have +// Create a defaut pod for this test, with argument saying if the Pod should have // toleration for Taits used in this test. func createPodForTaintsTest(hasToleration bool, tolerationSeconds int, podName, podLabel, ns string) *v1.Pod { grace := int64(1) diff --git a/test/e2e/storage/BUILD b/test/e2e/storage/BUILD index 2d31246c771..118c3a259ba 100644 --- a/test/e2e/storage/BUILD +++ b/test/e2e/storage/BUILD @@ -4,6 +4,7 @@ go_library( name = "go_default_library", srcs = [ "csi_volumes.go", + "detach_mounted.go", "empty_dir_wrapper.go", "ephemeral_volume.go", "flexvolume.go", diff --git a/test/e2e/storage/csi_volumes.go b/test/e2e/storage/csi_volumes.go index dcedb65810c..dd9a25806a7 100644 --- a/test/e2e/storage/csi_volumes.go +++ b/test/e2e/storage/csi_volumes.go @@ -47,6 +47,7 @@ var csiTestDrivers = []func() drivers.TestDriver{ drivers.InitHostPathCSIDriver, drivers.InitGcePDCSIDriver, drivers.InitGcePDExternalCSIDriver, + drivers.InitHostV0PathCSIDriver, } // List of testSuites to be executed in below loop diff --git a/test/e2e/storage/detach_mounted.go b/test/e2e/storage/detach_mounted.go new file mode 100644 index 00000000000..4ee571e8cfb --- /dev/null +++ b/test/e2e/storage/detach_mounted.go @@ -0,0 +1,227 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "fmt" + "math/rand" + "path" + + "time" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/utils" + imageutils "k8s.io/kubernetes/test/utils/image" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var ( + BusyBoxImage = imageutils.GetE2EImage(imageutils.BusyBox) + durationForStuckMount = 110 * time.Second +) + +var _ = utils.SIGDescribe("Detaching volumes", func() { + f := framework.NewDefaultFramework("flexvolume") + + // note that namespace deletion is handled by delete-namespace flag + + var cs clientset.Interface + var ns *v1.Namespace + var node v1.Node + var suffix string + + BeforeEach(func() { + framework.SkipUnlessProviderIs("gce", "local") + framework.SkipUnlessMasterOSDistroIs("debian", "ubuntu", "gci", "custom") + framework.SkipUnlessNodeOSDistroIs("debian", "ubuntu", "gci", "custom") + framework.SkipUnlessSSHKeyPresent() + + cs = f.ClientSet + ns = f.Namespace + nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) + node = nodes.Items[rand.Intn(len(nodes.Items))] + suffix = ns.Name + }) + + It("should not work when mount is in progress", func() { + driver := "attachable-with-long-mount" + driverInstallAs := driver + "-" + suffix + + By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driverInstallAs)) + installFlex(cs, &node, "k8s", driverInstallAs, path.Join(driverDir, driver)) + By(fmt.Sprintf("installing flexvolume %s on master as %s", path.Join(driverDir, driver), driverInstallAs)) + installFlex(cs, nil, "k8s", driverInstallAs, path.Join(driverDir, driver)) + volumeSource := v1.VolumeSource{ + FlexVolume: &v1.FlexVolumeSource{ + Driver: "k8s/" + driverInstallAs, + }, + } + + clientPod := getFlexVolumePod(volumeSource, node.Name) + By("Creating pod that uses slow format volume") + pod, err := cs.CoreV1().Pods(ns.Name).Create(clientPod) + Expect(err).NotTo(HaveOccurred()) + + uniqueVolumeName := getUniqueVolumeName(pod, driverInstallAs) + + By("waiting for volumes to be attached to node") + err = waitForVolumesAttached(cs, node.Name, uniqueVolumeName) + Expect(err).NotTo(HaveOccurred(), "while waiting for volume to attach to %s node", node.Name) + + By("waiting for volume-in-use on the node after pod creation") + err = waitForVolumesInUse(cs, node.Name, uniqueVolumeName) + Expect(err).NotTo(HaveOccurred(), "while waiting for volume in use") + + By("waiting for kubelet to start mounting the volume") + time.Sleep(20 * time.Second) + + By("Deleting the flexvolume pod") + err = framework.DeletePodWithWait(f, cs, pod) + Expect(err).NotTo(HaveOccurred(), "in deleting the pod") + + // Wait a bit for node to sync the volume status + time.Sleep(30 * time.Second) + + By("waiting for volume-in-use on the node after pod deletion") + err = waitForVolumesInUse(cs, node.Name, uniqueVolumeName) + Expect(err).NotTo(HaveOccurred(), "while waiting for volume in use") + + // Wait for 110s because mount device operation has a sleep of 120 seconds + // we previously already waited for 30s. + time.Sleep(durationForStuckMount) + + By("waiting for volume to disappear from node in-use") + err = waitForVolumesNotInUse(cs, node.Name, uniqueVolumeName) + Expect(err).NotTo(HaveOccurred(), "while waiting for volume to be removed from in-use") + + By(fmt.Sprintf("uninstalling flexvolume %s from node %s", driverInstallAs, node.Name)) + uninstallFlex(cs, &node, "k8s", driverInstallAs) + By(fmt.Sprintf("uninstalling flexvolume %s from master", driverInstallAs)) + uninstallFlex(cs, nil, "k8s", driverInstallAs) + }) +}) + +func getUniqueVolumeName(pod *v1.Pod, driverName string) string { + return fmt.Sprintf("flexvolume-k8s/%s/%s", driverName, pod.Spec.Volumes[0].Name) +} + +func waitForVolumesNotInUse(client clientset.Interface, nodeName, volumeName string) error { + return wait.PollImmediate(10*time.Second, 60*time.Second, func() (bool, error) { + node, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + if err != nil { + return false, fmt.Errorf("error fetching node %s with %v", nodeName, err) + } + volumeInUSe := node.Status.VolumesInUse + for _, volume := range volumeInUSe { + if string(volume) == volumeName { + return false, nil + } + } + return true, nil + }) +} + +func waitForVolumesAttached(client clientset.Interface, nodeName, volumeName string) error { + return wait.PollImmediate(2*time.Second, 2*time.Minute, func() (bool, error) { + node, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + if err != nil { + return false, fmt.Errorf("error fetching node %s with %v", nodeName, err) + } + volumeAttached := node.Status.VolumesAttached + for _, volume := range volumeAttached { + if string(volume.Name) == volumeName { + return true, nil + } + } + return false, nil + }) +} + +func waitForVolumesInUse(client clientset.Interface, nodeName, volumeName string) error { + return wait.PollImmediate(10*time.Second, 60*time.Second, func() (bool, error) { + node, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + if err != nil { + return false, fmt.Errorf("error fetching node %s with %v", nodeName, err) + } + volumeInUSe := node.Status.VolumesInUse + for _, volume := range volumeInUSe { + if string(volume) == volumeName { + return true, nil + } + } + return false, nil + }) +} + +func getFlexVolumePod(volumeSource v1.VolumeSource, nodeName string) *v1.Pod { + var gracePeriod int64 + clientPod := &v1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "flexvolume-detach-test" + "-client", + Labels: map[string]string{ + "role": "flexvolume-detach-test" + "-client", + }, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "flexvolume-detach-test" + "-client", + Image: BusyBoxImage, + WorkingDir: "/opt", + // An imperative and easily debuggable container which reads vol contents for + // us to scan in the tests or by eye. + // We expect that /opt is empty in the minimal containers which we use in this test. + Command: []string{ + "/bin/sh", + "-c", + "while true ; do cat /opt/foo/index.html ; sleep 2 ; ls -altrh /opt/ ; sleep 2 ; done ", + }, + VolumeMounts: []v1.VolumeMount{ + { + Name: "test-long-detach-flex", + MountPath: "/opt/foo", + }, + }, + }, + }, + TerminationGracePeriodSeconds: &gracePeriod, + SecurityContext: &v1.PodSecurityContext{ + SELinuxOptions: &v1.SELinuxOptions{ + Level: "s0:c0,c1", + }, + }, + Volumes: []v1.Volume{ + { + Name: "test-long-detach-flex", + VolumeSource: volumeSource, + }, + }, + NodeName: nodeName, + }, + } + return clientPod +} diff --git a/test/e2e/storage/drivers/base.go b/test/e2e/storage/drivers/base.go index ae8f3ab8340..95d6b263c33 100644 --- a/test/e2e/storage/drivers/base.go +++ b/test/e2e/storage/drivers/base.go @@ -76,18 +76,26 @@ type DynamicPVTestDriver interface { GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass } +// Capability represents a feature that a volume plugin supports +type Capability string + +const ( + CapPersistence Capability = "persistence" // data is persisted across pod restarts + CapBlock Capability = "block" // raw block mode + CapFsGroup Capability = "fsGroup" // volume ownership via fsGroup + CapExec Capability = "exec" // exec a file in the volume +) + // DriverInfo represents a combination of parameters to be used in implementation of TestDriver type DriverInfo struct { Name string // Name of the driver FeatureTag string // FeatureTag for the driver - MaxFileSize int64 // Max file size to be tested for this driver - SupportedFsType sets.String // Map of string for supported fs type - SupportedMountOption sets.String // Map of string for supported mount option - RequiredMountOption sets.String // Map of string for required mount option (Optional) - IsPersistent bool // Flag to represent whether it provides persistency - IsFsGroupSupported bool // Flag to represent whether it supports fsGroup - IsBlockSupported bool // Flag to represent whether it supports Block Volume + MaxFileSize int64 // Max file size to be tested for this driver + SupportedFsType sets.String // Map of string for supported fs type + SupportedMountOption sets.String // Map of string for supported mount option + RequiredMountOption sets.String // Map of string for required mount option (Optional) + Capabilities map[Capability]bool // Map that represents plugin capabilities // Parameters below will be set inside test loop by using SetCommonDriverParameters. // Drivers that implement TestDriver is required to set all the above parameters diff --git a/test/e2e/storage/drivers/csi.go b/test/e2e/storage/drivers/csi.go index e6153e62cee..d58faa46f35 100644 --- a/test/e2e/storage/drivers/csi.go +++ b/test/e2e/storage/drivers/csi.go @@ -38,7 +38,6 @@ package drivers import ( "fmt" "math/rand" - "time" . "github.com/onsi/ginkgo" storagev1 "k8s.io/api/storage/v1" @@ -67,9 +66,9 @@ func InitHostPathCSIDriver() TestDriver { SupportedFsType: sets.NewString( "", // Default fsType ), - IsPersistent: true, - IsFsGroupSupported: false, - IsBlockSupported: false, + Capabilities: map[Capability]bool{ + CapPersistence: true, + }, }, } } @@ -134,6 +133,92 @@ func (h *hostpathCSIDriver) CleanupDriver() { } } +// hostpathV0CSIDriver +type hostpathV0CSIDriver struct { + cleanup func() + driverInfo DriverInfo +} + +var _ TestDriver = &hostpathV0CSIDriver{} +var _ DynamicPVTestDriver = &hostpathV0CSIDriver{} + +// InitHostPathV0CSIDriver returns hostpathV0CSIDriver that implements TestDriver interface +func InitHostV0PathCSIDriver() TestDriver { + return &hostpathV0CSIDriver{ + driverInfo: DriverInfo{ + Name: "csi-hostpath-v0", + FeatureTag: "", + MaxFileSize: testpatterns.FileSizeMedium, + SupportedFsType: sets.NewString( + "", // Default fsType + ), + Capabilities: map[Capability]bool{ + CapPersistence: true, + }, + }, + } +} + +func (h *hostpathV0CSIDriver) GetDriverInfo() *DriverInfo { + return &h.driverInfo +} + +func (h *hostpathV0CSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { +} + +func (h *hostpathV0CSIDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass { + provisioner := GetUniqueDriverName(h) + parameters := map[string]string{} + ns := h.driverInfo.Framework.Namespace.Name + suffix := fmt.Sprintf("%s-sc", provisioner) + + return getStorageClass(provisioner, parameters, nil, ns, suffix) +} + +func (h *hostpathV0CSIDriver) CreateDriver() { + By("deploying csi hostpath v0 driver") + f := h.driverInfo.Framework + cs := f.ClientSet + + // pods should be scheduled on the node + nodes := framework.GetReadySchedulableNodesOrDie(cs) + node := nodes.Items[rand.Intn(len(nodes.Items))] + h.driverInfo.Config.ClientNodeName = node.Name + h.driverInfo.Config.ServerNodeName = node.Name + + // TODO (?): the storage.csi.image.version and storage.csi.image.registry + // settings are ignored for this test. We could patch the image definitions. + o := utils.PatchCSIOptions{ + OldDriverName: h.driverInfo.Name, + NewDriverName: GetUniqueDriverName(h), + DriverContainerName: "hostpath", + ProvisionerContainerName: "csi-provisioner-v0", + NodeName: h.driverInfo.Config.ServerNodeName, + } + cleanup, err := h.driverInfo.Framework.CreateFromManifests(func(item interface{}) error { + return utils.PatchCSIDeployment(h.driverInfo.Framework, o, item) + }, + "test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml", + "test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml", + "test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml", + "test/e2e/testing-manifests/storage-csi/hostpath/hostpath-v0/csi-hostpath-attacher.yaml", + "test/e2e/testing-manifests/storage-csi/hostpath/hostpath-v0/csi-hostpath-provisioner.yaml", + "test/e2e/testing-manifests/storage-csi/hostpath/hostpath-v0/csi-hostpathplugin.yaml", + "test/e2e/testing-manifests/storage-csi/hostpath/hostpath-v0/e2e-test-rbac.yaml", + ) + h.cleanup = cleanup + if err != nil { + framework.Failf("deploying csi hostpath v0 driver: %v", err) + } +} + +func (h *hostpathV0CSIDriver) CleanupDriver() { + if h.cleanup != nil { + By("uninstalling csi hostpath v0 driver") + h.cleanup() + } +} + // gce-pd type gcePDCSIDriver struct { cleanup func() @@ -157,9 +242,11 @@ func InitGcePDCSIDriver() TestDriver { "ext4", "xfs", ), - IsPersistent: true, - IsFsGroupSupported: true, - IsBlockSupported: false, + Capabilities: map[Capability]bool{ + CapPersistence: true, + CapFsGroup: true, + CapExec: true, + }, }, } } @@ -170,14 +257,8 @@ func (g *gcePDCSIDriver) GetDriverInfo() *DriverInfo { func (g *gcePDCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { f := g.driverInfo.Framework - cs := f.ClientSet - config := g.driverInfo.Config framework.SkipUnlessProviderIs("gce", "gke") - framework.SkipIfMultizone(cs) - - // TODO(#62561): Use credentials through external pod identity when that goes GA instead of downloading keys. - createGCESecrets(cs, config) - framework.SkipUnlessSecretExistsAfterWait(cs, "cloud-sa", config.Namespace, 3*time.Minute) + framework.SkipIfMultizone(f.ClientSet) } func (g *gcePDCSIDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass { @@ -204,6 +285,8 @@ func (g *gcePDCSIDriver) CreateDriver() { // DriverContainerName: "gce-driver", // ProvisionerContainerName: "csi-external-provisioner", // } + createGCESecrets(g.driverInfo.Framework.ClientSet, g.driverInfo.Config) + cleanup, err := g.driverInfo.Framework.CreateFromManifests(nil, "test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml", "test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml", @@ -248,9 +331,11 @@ func InitGcePDExternalCSIDriver() TestDriver { "ext4", "xfs", ), - IsPersistent: true, - IsFsGroupSupported: true, - IsBlockSupported: false, + Capabilities: map[Capability]bool{ + CapPersistence: true, + CapFsGroup: true, + CapExec: true, + }, }, } } diff --git a/test/e2e/storage/drivers/csi_objects.go b/test/e2e/storage/drivers/csi_objects.go index cd3de2fce40..1a4c2af592f 100644 --- a/test/e2e/storage/drivers/csi_objects.go +++ b/test/e2e/storage/drivers/csi_objects.go @@ -28,6 +28,7 @@ import ( "path/filepath" "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" @@ -115,5 +116,7 @@ func createGCESecrets(client clientset.Interface, config framework.VolumeTestCon } _, err = client.CoreV1().Secrets(config.Namespace).Create(s) - framework.ExpectNoError(err, "Failed to create Secret %v", s.GetName()) + if !apierrors.IsAlreadyExists(err) { + framework.ExpectNoError(err, "Failed to create Secret %v", s.GetName()) + } } diff --git a/test/e2e/storage/drivers/in_tree.go b/test/e2e/storage/drivers/in_tree.go index c69ae3f42fc..cc73bd81a4c 100644 --- a/test/e2e/storage/drivers/in_tree.go +++ b/test/e2e/storage/drivers/in_tree.go @@ -90,9 +90,10 @@ func InitNFSDriver() TestDriver { ), SupportedMountOption: sets.NewString("proto=tcp", "relatime"), RequiredMountOption: sets.NewString("vers=4.1"), - IsPersistent: true, - IsFsGroupSupported: false, - IsBlockSupported: false, + Capabilities: map[Capability]bool{ + CapPersistence: true, + CapExec: true, + }, }, } } @@ -235,9 +236,10 @@ func InitGlusterFSDriver() TestDriver { SupportedFsType: sets.NewString( "", // Default fsType ), - IsPersistent: true, - IsFsGroupSupported: false, - IsBlockSupported: false, + Capabilities: map[Capability]bool{ + CapPersistence: true, + CapExec: true, + }, }, } } @@ -356,9 +358,12 @@ func InitISCSIDriver() TestDriver { //"ext3", "ext4", ), - IsPersistent: true, - IsFsGroupSupported: true, - IsBlockSupported: true, + Capabilities: map[Capability]bool{ + CapPersistence: true, + CapFsGroup: true, + CapBlock: true, + CapExec: true, + }, }, } } @@ -465,9 +470,13 @@ func InitRbdDriver() TestDriver { //"ext3", "ext4", ), - IsPersistent: true, - IsFsGroupSupported: true, - IsBlockSupported: true}, + Capabilities: map[Capability]bool{ + CapPersistence: true, + CapFsGroup: true, + CapBlock: true, + CapExec: true, + }, + }, } } @@ -585,9 +594,10 @@ func InitCephFSDriver() TestDriver { SupportedFsType: sets.NewString( "", // Default fsType ), - IsPersistent: true, - IsFsGroupSupported: false, - IsBlockSupported: false, + Capabilities: map[Capability]bool{ + CapPersistence: true, + CapExec: true, + }, }, } } @@ -684,9 +694,9 @@ func InitHostPathDriver() TestDriver { SupportedFsType: sets.NewString( "", // Default fsType ), - IsPersistent: true, - IsFsGroupSupported: false, - IsBlockSupported: false, + Capabilities: map[Capability]bool{ + CapPersistence: true, + }, }, } } @@ -756,9 +766,9 @@ func InitHostPathSymlinkDriver() TestDriver { SupportedFsType: sets.NewString( "", // Default fsType ), - IsPersistent: true, - IsFsGroupSupported: false, - IsBlockSupported: false, + Capabilities: map[Capability]bool{ + CapPersistence: true, + }, }, } } @@ -896,9 +906,9 @@ func InitEmptydirDriver() TestDriver { SupportedFsType: sets.NewString( "", // Default fsType ), - IsPersistent: false, - IsFsGroupSupported: false, - IsBlockSupported: false, + Capabilities: map[Capability]bool{ + CapExec: true, + }, }, } } @@ -963,9 +973,11 @@ func InitCinderDriver() TestDriver { "", // Default fsType "ext3", ), - IsPersistent: true, - IsFsGroupSupported: true, - IsBlockSupported: false, + Capabilities: map[Capability]bool{ + CapPersistence: true, + CapFsGroup: true, + CapExec: true, + }, }, } } @@ -1121,9 +1133,12 @@ func InitGcePdDriver() TestDriver { "xfs", ), SupportedMountOption: sets.NewString("debug", "nouid32"), - IsPersistent: true, - IsFsGroupSupported: true, - IsBlockSupported: true, + Capabilities: map[Capability]bool{ + CapPersistence: true, + CapFsGroup: true, + CapBlock: true, + CapExec: true, + }, }, } } @@ -1235,9 +1250,11 @@ func InitVSphereDriver() TestDriver { "", // Default fsType "ext4", ), - IsPersistent: true, - IsFsGroupSupported: true, - IsBlockSupported: false, + Capabilities: map[Capability]bool{ + CapPersistence: true, + CapFsGroup: true, + CapExec: true, + }, }, } } @@ -1351,9 +1368,12 @@ func InitAzureDriver() TestDriver { "", // Default fsType "ext4", ), - IsPersistent: true, - IsFsGroupSupported: true, - IsBlockSupported: true, + Capabilities: map[Capability]bool{ + CapPersistence: true, + CapFsGroup: true, + CapBlock: true, + CapExec: true, + }, }, } } @@ -1464,9 +1484,12 @@ func InitAwsDriver() TestDriver { "ext3", ), SupportedMountOption: sets.NewString("debug", "nouid32"), - IsPersistent: true, - IsFsGroupSupported: true, - IsBlockSupported: true, + Capabilities: map[Capability]bool{ + CapPersistence: true, + CapFsGroup: true, + CapBlock: true, + CapExec: true, + }, }, } } diff --git a/test/e2e/storage/flexvolume.go b/test/e2e/storage/flexvolume.go index 8898706f678..6359a8a5e12 100644 --- a/test/e2e/storage/flexvolume.go +++ b/test/e2e/storage/flexvolume.go @@ -44,10 +44,8 @@ const ( // On gci, root is read-only and controller-manager containerized. Assume // controller-manager has started with --flex-volume-plugin-dir equal to this // (see cluster/gce/config-test.sh) - gciVolumePluginDir = "/home/kubernetes/flexvolume" - gciVolumePluginDirLegacy = "/etc/srv/kubernetes/kubelet-plugins/volume/exec" - gciVolumePluginDirVersion = "1.10.0" - detachTimeout = 10 * time.Second + gciVolumePluginDir = "/home/kubernetes/flexvolume" + detachTimeout = 10 * time.Second ) // testFlexVolume tests that a client pod using a given flexvolume driver @@ -130,24 +128,7 @@ func uninstallFlex(c clientset.Interface, node *v1.Node, vendor, driver string) func getFlexDir(c clientset.Interface, node *v1.Node, vendor, driver string) string { volumePluginDir := defaultVolumePluginDir if framework.ProviderIs("gce") { - if node == nil && framework.MasterOSDistroIs("gci", "ubuntu") { - v, err := getMasterVersion(c) - if err != nil { - framework.Failf("Error getting master version: %v", err) - } - - if v.AtLeast(versionutil.MustParseGeneric(gciVolumePluginDirVersion)) { - volumePluginDir = gciVolumePluginDir - } else { - volumePluginDir = gciVolumePluginDirLegacy - } - } else if node != nil && framework.NodeOSDistroIs("gci", "ubuntu") { - if getNodeVersion(node).AtLeast(versionutil.MustParseGeneric(gciVolumePluginDirVersion)) { - volumePluginDir = gciVolumePluginDir - } else { - volumePluginDir = gciVolumePluginDirLegacy - } - } + volumePluginDir = gciVolumePluginDir } flexDir := path.Join(volumePluginDir, fmt.Sprintf("/%s~%s/", vendor, driver)) return flexDir diff --git a/test/e2e/storage/pd.go b/test/e2e/storage/pd.go index aabdb459f9c..5887336d658 100644 --- a/test/e2e/storage/pd.go +++ b/test/e2e/storage/pd.go @@ -334,7 +334,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() { disruptOp := t.disruptOp It(fmt.Sprintf("when %s", t.descr), func() { framework.SkipUnlessProviderIs("gce") - origNodeCnt := len(nodes.Items) // healhy nodes running kublet + origNodeCnt := len(nodes.Items) // healhy nodes running kubelet By("creating a pd") diskName, err := framework.CreatePDWithRetry() diff --git a/test/e2e/storage/pv_protection.go b/test/e2e/storage/pv_protection.go index 44865d1d026..d94aa6effe3 100644 --- a/test/e2e/storage/pv_protection.go +++ b/test/e2e/storage/pv_protection.go @@ -85,7 +85,7 @@ var _ = utils.SIGDescribe("PV Protection", func() { By("Checking that PV Protection finalizer is set") pv, err = client.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred(), "While getting PV status") - Expect(slice.ContainsString(pv.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer, nil)).To(BeTrue()) + Expect(slice.ContainsString(pv.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer, nil)).To(BeTrue(), "PV Protection finalizer(%v) is not set in %v", volumeutil.PVProtectionFinalizer, pv.ObjectMeta.Finalizers) }) AfterEach(func() { diff --git a/test/e2e/storage/pvc_protection.go b/test/e2e/storage/pvc_protection.go index af6ed9c1281..5c2868dc088 100644 --- a/test/e2e/storage/pvc_protection.go +++ b/test/e2e/storage/pvc_protection.go @@ -64,7 +64,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() { By("Checking that PVC Protection finalizer is set") pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred(), "While getting PVC status") - Expect(slice.ContainsString(pvc.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer, nil)).To(BeTrue()) + Expect(slice.ContainsString(pvc.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer, nil)).To(BeTrue(), "PVC Protection finalizer(%v) is not set in %v", volumeutil.PVCProtectionFinalizer, pvc.ObjectMeta.Finalizers) }) AfterEach(func() { diff --git a/test/e2e/storage/testsuites/BUILD b/test/e2e/storage/testsuites/BUILD index 1987fb2cf50..800a19f6093 100644 --- a/test/e2e/storage/testsuites/BUILD +++ b/test/e2e/storage/testsuites/BUILD @@ -18,7 +18,6 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", diff --git a/test/e2e/storage/testsuites/provisioning.go b/test/e2e/storage/testsuites/provisioning.go index 70c3ad0e3cf..ba9f8d025b3 100644 --- a/test/e2e/storage/testsuites/provisioning.go +++ b/test/e2e/storage/testsuites/provisioning.go @@ -187,7 +187,7 @@ func testProvisioning(input *provisioningTestInput) { }) It("should create and delete block persistent volumes", func() { - if !input.dInfo.IsBlockSupported { + if !input.dInfo.Capabilities[drivers.CapBlock] { framework.Skipf("Driver %q does not support BlockVolume - skipping", input.dInfo.Name) } block := v1.PersistentVolumeBlock diff --git a/test/e2e/storage/testsuites/subpath.go b/test/e2e/storage/testsuites/subpath.go index 619b1d545a0..66e1985c9ce 100644 --- a/test/e2e/storage/testsuites/subpath.go +++ b/test/e2e/storage/testsuites/subpath.go @@ -24,7 +24,6 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/util/rand" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" @@ -587,18 +586,54 @@ func testPodFailSubpathError(f *framework.Framework, pod *v1.Pod, errorMsg strin defer func() { framework.DeletePodWithWait(f, f.ClientSet, pod) }() - err = framework.WaitForPodRunningInNamespace(f.ClientSet, pod) - Expect(err).To(HaveOccurred(), "while waiting for pod to be running") + By("Checking for subpath error in container status") + err = waitForPodSubpathError(f, pod) + Expect(err).NotTo(HaveOccurred(), "while waiting for subpath failure") +} - By("Checking for subpath error event") - selector := fields.Set{ - "involvedObject.kind": "Pod", - "involvedObject.name": pod.Name, - "involvedObject.namespace": f.Namespace.Name, - "reason": "Failed", - }.AsSelector().String() - err = framework.WaitTimeoutForPodEvent(f.ClientSet, pod.Name, f.Namespace.Name, selector, errorMsg, framework.PodEventTimeout) - Expect(err).NotTo(HaveOccurred(), "while waiting for failed event to occur") +func findSubpathContainerName(pod *v1.Pod) string { + for _, container := range pod.Spec.Containers { + for _, mount := range container.VolumeMounts { + if mount.SubPath != "" { + return container.Name + } + } + } + return "" +} + +func waitForPodSubpathError(f *framework.Framework, pod *v1.Pod) error { + subpathContainerName := findSubpathContainerName(pod) + if subpathContainerName == "" { + return fmt.Errorf("failed to find container that uses subpath") + } + + return wait.PollImmediate(framework.Poll, framework.PodStartTimeout, func() (bool, error) { + pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + for _, status := range pod.Status.ContainerStatuses { + // 0 is the container that uses subpath + if status.Name == subpathContainerName { + switch { + case status.State.Running != nil: + return false, fmt.Errorf("subpath container unexpectedly became running") + case status.State.Terminated != nil: + return false, fmt.Errorf("subpath container unexpectedly terminated") + case status.State.Waiting != nil: + if status.State.Waiting.Reason == "CreateContainerConfigError" && + strings.Contains(status.State.Waiting.Message, "subPath") { + return true, nil + } + return false, nil + default: + return false, nil + } + } + } + return false, nil + }) } // Tests that the existing subpath mount is detected when a container restarts diff --git a/test/e2e/storage/testsuites/volume_io.go b/test/e2e/storage/testsuites/volume_io.go index 82cb0eaf4b0..3b291517778 100644 --- a/test/e2e/storage/testsuites/volume_io.go +++ b/test/e2e/storage/testsuites/volume_io.go @@ -24,7 +24,7 @@ package testsuites import ( "fmt" "math" - "path" + "path/filepath" "strconv" "strings" "time" @@ -49,6 +49,8 @@ var md5hashes = map[int64]string{ testpatterns.FileSizeLarge: "8d763edc71bd16217664793b5a15e403", } +const mountPath = "/opt" + type volumeIOTestSuite struct { tsInfo TestSuiteInfo } @@ -88,7 +90,7 @@ func createVolumeIOTestInput(pattern testpatterns.TestPattern, resource genericV framework.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name) } - if dInfo.IsFsGroupSupported { + if dInfo.Capabilities[drivers.CapFsGroup] { fsGroupVal := int64(1234) fsGroup = &fsGroupVal } @@ -176,10 +178,9 @@ func createFileSizes(maxFileSize int64) []int64 { } // Return the plugin's client pod spec. Use an InitContainer to setup the file i/o test env. -func makePodSpec(config framework.VolumeTestConfig, dir, initCmd string, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext) *v1.Pod { - volName := fmt.Sprintf("%s-%s", config.Prefix, "io-volume") - +func makePodSpec(config framework.VolumeTestConfig, initCmd string, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext) *v1.Pod { var gracePeriod int64 = 1 + volName := fmt.Sprintf("io-volume-%s", config.Namespace) return &v1.Pod{ TypeMeta: metav1.TypeMeta{ Kind: "Pod", @@ -204,7 +205,7 @@ func makePodSpec(config framework.VolumeTestConfig, dir, initCmd string, volsrc VolumeMounts: []v1.VolumeMount{ { Name: volName, - MountPath: dir, + MountPath: mountPath, }, }, }, @@ -221,7 +222,7 @@ func makePodSpec(config framework.VolumeTestConfig, dir, initCmd string, volsrc VolumeMounts: []v1.VolumeMount{ { Name: volName, - MountPath: dir, + MountPath: mountPath, }, }, }, @@ -302,8 +303,7 @@ func deleteFile(pod *v1.Pod, fpath string) { // Note: `fsizes` values are enforced to each be at least `MinFileSize` and a multiple of `MinFileSize` // bytes. func testVolumeIO(f *framework.Framework, cs clientset.Interface, config framework.VolumeTestConfig, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext, file string, fsizes []int64) (err error) { - dir := path.Join("/opt", config.Prefix, config.Namespace) - ddInput := path.Join(dir, "dd_if") + ddInput := filepath.Join(mountPath, fmt.Sprintf("%s-%s-dd_if", config.Prefix, config.Namespace)) writeBlk := strings.Repeat("abcdefghijklmnopqrstuvwxyz123456", 32) // 1KiB value loopCnt := testpatterns.MinFileSize / int64(len(writeBlk)) // initContainer cmd to create and fill dd's input file. The initContainer is used to create @@ -311,7 +311,7 @@ func testVolumeIO(f *framework.Framework, cs clientset.Interface, config framewo // used to create a 1MiB file in the target directory. initCmd := fmt.Sprintf("i=0; while [ $i -lt %d ]; do echo -n %s >>%s; let i+=1; done", loopCnt, writeBlk, ddInput) - clientPod := makePodSpec(config, dir, initCmd, volsrc, podSecContext) + clientPod := makePodSpec(config, initCmd, volsrc, podSecContext) By(fmt.Sprintf("starting %s", clientPod.Name)) podsNamespacer := cs.CoreV1().Pods(config.Namespace) @@ -320,7 +320,7 @@ func testVolumeIO(f *framework.Framework, cs clientset.Interface, config framewo return fmt.Errorf("failed to create client pod %q: %v", clientPod.Name, err) } defer func() { - // note the test dir will be removed when the kubelet unmounts it + deleteFile(clientPod, ddInput) By(fmt.Sprintf("deleting client pod %q...", clientPod.Name)) e := framework.DeletePodWithWait(f, cs, clientPod) if e != nil { @@ -345,14 +345,16 @@ func testVolumeIO(f *framework.Framework, cs clientset.Interface, config framewo if math.Mod(float64(fsize), float64(testpatterns.MinFileSize)) != 0 { fsize = fsize/testpatterns.MinFileSize + testpatterns.MinFileSize } - fpath := path.Join(dir, fmt.Sprintf("%s-%d", file, fsize)) + fpath := filepath.Join(mountPath, fmt.Sprintf("%s-%d", file, fsize)) + defer func() { + deleteFile(clientPod, fpath) + }() if err = writeToFile(clientPod, fpath, ddInput, fsize); err != nil { return err } if err = verifyFile(clientPod, fpath, fsize, ddInput); err != nil { return err } - deleteFile(clientPod, fpath) } return diff --git a/test/e2e/storage/testsuites/volumemode.go b/test/e2e/storage/testsuites/volumemode.go index 03468f93329..94bef7508d9 100644 --- a/test/e2e/storage/testsuites/volumemode.go +++ b/test/e2e/storage/testsuites/volumemode.go @@ -78,13 +78,13 @@ func createVolumeModeTestInput(pattern testpatterns.TestPattern, resource volume testVolType: pattern.VolType, nodeName: dInfo.Config.ClientNodeName, volMode: pattern.VolMode, - isBlockSupported: dInfo.IsBlockSupported, + isBlockSupported: dInfo.Capabilities[drivers.CapBlock], } } func getVolumeModeTestFunc(pattern testpatterns.TestPattern, driver drivers.TestDriver) func(*volumeModeTestInput) { dInfo := driver.GetDriverInfo() - isBlockSupported := dInfo.IsBlockSupported + isBlockSupported := dInfo.Capabilities[drivers.CapBlock] volMode := pattern.VolMode volType := pattern.VolType diff --git a/test/e2e/storage/testsuites/volumes.go b/test/e2e/storage/testsuites/volumes.go index ce8e2093e86..e8bd845bcfd 100644 --- a/test/e2e/storage/testsuites/volumes.go +++ b/test/e2e/storage/testsuites/volumes.go @@ -23,11 +23,17 @@ package testsuites import ( "fmt" + "path/filepath" . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/storage/drivers" "k8s.io/kubernetes/test/e2e/storage/testpatterns" + imageutils "k8s.io/kubernetes/test/utils/image" ) type volumesTestSuite struct { @@ -68,12 +74,22 @@ func (t *volumesTestSuite) getTestSuiteInfo() TestSuiteInfo { } func (t *volumesTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver drivers.TestDriver) { +} + +func skipPersistenceTest(driver drivers.TestDriver) { dInfo := driver.GetDriverInfo() - if !dInfo.IsPersistent { + if !dInfo.Capabilities[drivers.CapPersistence] { framework.Skipf("Driver %q does not provide persistency - skipping", dInfo.Name) } } +func skipExecTest(driver drivers.TestDriver) { + dInfo := driver.GetDriverInfo() + if !dInfo.Capabilities[drivers.CapExec] { + framework.Skipf("Driver %q does not support exec - skipping", dInfo.Name) + } +} + func createVolumesTestInput(pattern testpatterns.TestPattern, resource genericVolumeTestResource) volumesTestInput { var fsGroup *int64 driver := resource.driver @@ -85,16 +101,17 @@ func createVolumesTestInput(pattern testpatterns.TestPattern, resource genericVo framework.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name) } - if dInfo.IsFsGroupSupported { + if dInfo.Capabilities[drivers.CapFsGroup] { fsGroupVal := int64(1234) fsGroup = &fsGroupVal } return volumesTestInput{ - f: f, - name: dInfo.Name, - config: dInfo.Config, - fsGroup: fsGroup, + f: f, + name: dInfo.Name, + config: dInfo.Config, + fsGroup: fsGroup, + resource: resource, tests: []framework.VolumeTest{ { Volume: *volSource, @@ -140,11 +157,12 @@ func (t *volumesTestSuite) execTest(driver drivers.TestDriver, pattern testpatte } type volumesTestInput struct { - f *framework.Framework - name string - config framework.VolumeTestConfig - fsGroup *int64 - tests []framework.VolumeTest + f *framework.Framework + name string + config framework.VolumeTestConfig + fsGroup *int64 + tests []framework.VolumeTest + resource genericVolumeTestResource } func testVolumes(input *volumesTestInput) { @@ -153,8 +171,68 @@ func testVolumes(input *volumesTestInput) { cs := f.ClientSet defer framework.VolumeTestCleanup(f, input.config) + skipPersistenceTest(input.resource.driver) + volumeTest := input.tests framework.InjectHtml(cs, input.config, volumeTest[0].Volume, volumeTest[0].ExpectedContent) framework.TestVolumeClient(cs, input.config, input.fsGroup, input.tests) }) + It("should allow exec of files on the volume", func() { + f := input.f + skipExecTest(input.resource.driver) + + testScriptInPod(f, input.resource.volType, input.resource.volSource, input.config.NodeSelector) + }) +} + +func testScriptInPod( + f *framework.Framework, + volumeType string, + source *v1.VolumeSource, + nodeSelector map[string]string) { + + const ( + volPath = "/vol1" + volName = "vol1" + ) + suffix := generateSuffixForPodName(volumeType) + scriptName := fmt.Sprintf("test-%s.sh", suffix) + fullPath := filepath.Join(volPath, scriptName) + cmd := fmt.Sprintf("echo \"ls %s\" > %s; chmod u+x %s; %s", volPath, fullPath, fullPath, fullPath) + + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("exec-volume-test-%s", suffix), + Namespace: f.Namespace.Name, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: fmt.Sprintf("exec-container-%s", suffix), + Image: imageutils.GetE2EImage(imageutils.Nginx), + Command: []string{"/bin/sh", "-ec", cmd}, + VolumeMounts: []v1.VolumeMount{ + { + Name: volName, + MountPath: volPath, + }, + }, + }, + }, + Volumes: []v1.Volume{ + { + Name: volName, + VolumeSource: *source, + }, + }, + RestartPolicy: v1.RestartPolicyNever, + NodeSelector: nodeSelector, + }, + } + By(fmt.Sprintf("Creating pod %s", pod.Name)) + f.TestContainerOutput("exec-volume-test", pod, 0, []string{scriptName}) + + By(fmt.Sprintf("Deleting pod %s", pod.Name)) + err := framework.DeletePodWithWait(f, f.ClientSet, pod) + Expect(err).NotTo(HaveOccurred(), "while deleting pod") } diff --git a/test/e2e/storage/vsphere/config.go b/test/e2e/storage/vsphere/config.go index 07a608ae701..98f3feb0c35 100644 --- a/test/e2e/storage/vsphere/config.go +++ b/test/e2e/storage/vsphere/config.go @@ -19,10 +19,11 @@ package vsphere import ( "errors" "fmt" - "gopkg.in/gcfg.v1" "io" - "k8s.io/kubernetes/test/e2e/framework" "os" + + "gopkg.in/gcfg.v1" + "k8s.io/kubernetes/test/e2e/framework" ) const ( @@ -147,7 +148,7 @@ func populateInstanceMap(cfg *ConfigFile) (map[string]*VSphere, error) { vcConfig.Password = cfg.Global.Password } if vcConfig.Username == "" { - msg := fmt.Sprintf("vcConfig.User is empty for vc %s!", vcServer) + msg := fmt.Sprintf("vcConfig.Username is empty for vc %s!", vcServer) framework.Logf(msg) return nil, errors.New(msg) } diff --git a/test/e2e/storage/vsphere/pvc_label_selector.go b/test/e2e/storage/vsphere/pvc_label_selector.go index 21eb3d5b1e4..3de1e2bbcf4 100644 --- a/test/e2e/storage/vsphere/pvc_label_selector.go +++ b/test/e2e/storage/vsphere/pvc_label_selector.go @@ -105,9 +105,8 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:LabelSelector]", func() { }) func testSetupVSpherePVClabelselector(c clientset.Interface, nodeInfo *NodeInfo, ns string, ssdlabels map[string]string, vvollabels map[string]string) (volumePath string, pv_ssd *v1.PersistentVolume, pvc_ssd *v1.PersistentVolumeClaim, pvc_vvol *v1.PersistentVolumeClaim, err error) { - volumePath = "" By("creating vmdk") - Expect(err).NotTo(HaveOccurred()) + volumePath = "" volumePath, err = nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef) if err != nil { return diff --git a/test/e2e/testing-manifests/flexvolume/attachable-with-long-mount b/test/e2e/testing-manifests/flexvolume/attachable-with-long-mount new file mode 100644 index 00000000000..6785147a20a --- /dev/null +++ b/test/e2e/testing-manifests/flexvolume/attachable-with-long-mount @@ -0,0 +1,145 @@ +#!/bin/sh + +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This driver is especially designed to test a long mounting scenario +# which can cause a volume to be detached while mount is in progress. + + +FLEX_DUMMY_LOG=${FLEX_DUMMY_LOG:-"/tmp/flex-dummy.log"} + +VALID_MNTDEVICE=foo + +# attach always returns one valid mount device so a different device +# showing up in a subsequent driver call implies a bug +validateMountDeviceOrDie() { + MNTDEVICE=$1 + CALL=$2 + if [ "$MNTDEVICE" != "$VALID_MNTDEVICE" ]; then + log "{\"status\":\"Failure\",\"message\":\"call "${CALL}" expected device "${VALID_MNTDEVICE}", got device "${MNTDEVICE}"\"}" + exit 0 + fi +} + +log() { + printf "$*" >&1 +} + +debug() { + echo "$(date) $*" >> "${FLEX_DUMMY_LOG}" +} + +attach() { + debug "attach $@" + log "{\"status\":\"Success\",\"device\":\""${VALID_MNTDEVICE}"\"}" + exit 0 +} + +detach() { + debug "detach $@" + # TODO issue 44737 detach is passed PV name, not mount device + log "{\"status\":\"Success\"}" + exit 0 +} + +waitforattach() { + debug "waitforattach $@" + MNTDEVICE=$1 + validateMountDeviceOrDie "$MNTDEVICE" "waitforattach" + log "{\"status\":\"Success\",\"device\":\""${MNTDEVICE}"\"}" + exit 0 +} + +isattached() { + debug "isattached $@" + log "{\"status\":\"Success\",\"attached\":true}" + exit 0 +} + +domountdevice() { + debug "domountdevice $@" + MNTDEVICE=$2 + validateMountDeviceOrDie "$MNTDEVICE" "domountdevice" + MNTPATH=$1 + mkdir -p ${MNTPATH} >/dev/null 2>&1 + mount -t tmpfs none ${MNTPATH} >/dev/null 2>&1 + sleep 120 + echo "Hello from flexvolume!" >> "${MNTPATH}/index.html" + log "{\"status\":\"Success\"}" + exit 0 +} + +unmountdevice() { + debug "unmountdevice $@" + MNTPATH=$1 + rm "${MNTPATH}/index.html" >/dev/null 2>&1 + umount ${MNTPATH} >/dev/null 2>&1 + log "{\"status\":\"Success\"}" + exit 0 +} + +expandvolume() { + debug "expandvolume $@" + log "{\"status\":\"Success\"}" + exit 0 +} + +expandfs() { + debug "expandfs $@" + log "{\"status\":\"Success\"}" + exit 0 +} + +op=$1 + +if [ "$op" = "init" ]; then + debug "init $@" + log "{\"status\":\"Success\",\"capabilities\":{\"attach\":true, \"requiresFSResize\":true}}" + exit 0 +fi + +shift + +case "$op" in + attach) + attach $* + ;; + detach) + detach $* + ;; + waitforattach) + waitforattach $* + ;; + isattached) + isattached $* + ;; + mountdevice) + domountdevice $* + ;; + unmountdevice) + unmountdevice $* + ;; + expandvolume) + expandvolume $* + ;; + expandfs) + expandfs $* + ;; + *) + log "{\"status\":\"Not supported\"}" + exit 0 +esac + +exit 1 diff --git a/test/e2e/testing-manifests/flexvolume/dummy-attachable b/test/e2e/testing-manifests/flexvolume/dummy-attachable index 2c47faaf3fe..ed9e73e7a01 100755 --- a/test/e2e/testing-manifests/flexvolume/dummy-attachable +++ b/test/e2e/testing-manifests/flexvolume/dummy-attachable @@ -81,8 +81,6 @@ domountdevice() { unmountdevice() { debug "unmountdevice $@" - MNTDEVICE=$2 - validateMountDeviceOrDie "$MNTDEVICE" "unmountdevice" MNTPATH=$1 rm "${MNTPATH}/index.html" >/dev/null 2>&1 umount ${MNTPATH} >/dev/null 2>&1 diff --git a/test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml b/test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml index 0ed8af14999..6a4de2194df 100644 --- a/test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml +++ b/test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml @@ -14,7 +14,7 @@ spec: serviceAccountName: csi-node-sa containers: - name: csi-driver-registrar - image: gcr.io/gke-release/csi-driver-registrar:v1.0.0-gke.0 + image: gcr.io/gke-release/csi-driver-registrar:v1.0.1-gke.0 args: - "--v=5" - "--csi-address=/csi/csi.sock" diff --git a/test/e2e/testing-manifests/storage-csi/hostpath/hostpath-v0/csi-hostpath-attacher.yaml b/test/e2e/testing-manifests/storage-csi/hostpath/hostpath-v0/csi-hostpath-attacher.yaml new file mode 100644 index 00000000000..5598a161d07 --- /dev/null +++ b/test/e2e/testing-manifests/storage-csi/hostpath/hostpath-v0/csi-hostpath-attacher.yaml @@ -0,0 +1,48 @@ +kind: Service +apiVersion: v1 +metadata: + name: csi-hostpath-attacher + labels: + app: csi-hostpath-attacher +spec: + selector: + app: csi-hostpath-attacher + ports: + - name: dummy + port: 12345 + +--- +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: csi-hostpath-attacher +spec: + serviceName: "csi-hostpath-attacher" + replicas: 1 + selector: + matchLabels: + app: csi-hostpath-attacher + template: + metadata: + labels: + app: csi-hostpath-attacher + spec: + serviceAccountName: csi-attacher + containers: + - name: csi-attacher + image: gcr.io/gke-release/csi-attacher:v0.4.1-gke.0 + args: + - --v=5 + - --csi-address=$(ADDRESS) + env: + - name: ADDRESS + value: /csi/csi.sock + imagePullPolicy: Always + volumeMounts: + - mountPath: /csi + name: socket-dir + volumes: + - hostPath: + path: /var/lib/kubelet/plugins/csi-hostpath-v0 + type: DirectoryOrCreate + name: socket-dir diff --git a/test/e2e/testing-manifests/storage-csi/hostpath/hostpath-v0/csi-hostpath-provisioner.yaml b/test/e2e/testing-manifests/storage-csi/hostpath/hostpath-v0/csi-hostpath-provisioner.yaml new file mode 100644 index 00000000000..4c2b58a08f6 --- /dev/null +++ b/test/e2e/testing-manifests/storage-csi/hostpath/hostpath-v0/csi-hostpath-provisioner.yaml @@ -0,0 +1,49 @@ +kind: Service +apiVersion: v1 +metadata: + name: csi-hostpath-provisioner + labels: + app: csi-hostpath-provisioner +spec: + selector: + app: csi-hostpath-provisioner + ports: + - name: dummy + port: 12345 + +--- +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: csi-hostpath-provisioner +spec: + serviceName: "csi-hostpath-provisioner" + replicas: 1 + selector: + matchLabels: + app: csi-hostpath-provisioner + template: + metadata: + labels: + app: csi-hostpath-provisioner + spec: + serviceAccountName: csi-provisioner + containers: + - name: csi-provisioner-v0 + image: gcr.io/gke-release/csi-provisioner:v0.4.1-gke.0 + args: + - "--provisioner=csi-hostpath-v0" + - "--csi-address=$(ADDRESS)" + - "--connection-timeout=15s" + env: + - name: ADDRESS + value: /csi/csi.sock + imagePullPolicy: Always + volumeMounts: + - mountPath: /csi + name: socket-dir + volumes: + - hostPath: + path: /var/lib/kubelet/plugins/csi-hostpath-v0 + type: DirectoryOrCreate + name: socket-dir diff --git a/test/e2e/testing-manifests/storage-csi/hostpath/hostpath-v0/csi-hostpathplugin.yaml b/test/e2e/testing-manifests/storage-csi/hostpath/hostpath-v0/csi-hostpathplugin.yaml new file mode 100644 index 00000000000..a8f34832d1d --- /dev/null +++ b/test/e2e/testing-manifests/storage-csi/hostpath/hostpath-v0/csi-hostpathplugin.yaml @@ -0,0 +1,70 @@ +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: csi-hostpathplugin +spec: + selector: + matchLabels: + app: csi-hostpathplugin + template: + metadata: + labels: + app: csi-hostpathplugin + spec: + serviceAccountName: csi-node-sa + hostNetwork: true + containers: + - name: driver-registrar + image: gcr.io/gke-release/csi-driver-registrar:v0.4.1-gke.0 + args: + - --v=5 + - --csi-address=/csi/csi.sock + - --kubelet-registration-path=/var/lib/kubelet/plugins/csi-hostpath-v0/csi.sock + env: + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + imagePullPolicy: Always + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /registration + name: registration-dir + - name: hostpath + image: quay.io/k8scsi/hostpathplugin:v0.4.1 + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + env: + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + imagePullPolicy: Always + securityContext: + privileged: true + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /var/lib/kubelet/pods + mountPropagation: Bidirectional + name: mountpoint-dir + volumes: + - hostPath: + path: /var/lib/kubelet/plugins/csi-hostpath-v0 + type: DirectoryOrCreate + name: socket-dir + - hostPath: + path: /var/lib/kubelet/pods + type: DirectoryOrCreate + name: mountpoint-dir + - hostPath: + path: /var/lib/kubelet/plugins + type: Directory + name: registration-dir diff --git a/test/e2e/testing-manifests/storage-csi/hostpath/hostpath-v0/e2e-test-rbac.yaml b/test/e2e/testing-manifests/storage-csi/hostpath/hostpath-v0/e2e-test-rbac.yaml new file mode 100644 index 00000000000..aa008ecac29 --- /dev/null +++ b/test/e2e/testing-manifests/storage-csi/hostpath/hostpath-v0/e2e-test-rbac.yaml @@ -0,0 +1,19 @@ +# priviledged Pod Security Policy, previously defined just for gcePD via PrivilegedTestPSPClusterRoleBinding() +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: psp-csi-hostpath-role +subjects: + - kind: ServiceAccount + name: csi-attacher + namespace: default + - kind: ServiceAccount + name: csi-node-sa + namespace: default + - kind: ServiceAccount + name: csi-provisioner + namespace: default +roleRef: + kind: ClusterRole + name: e2e-test-privileged-psp + apiGroup: rbac.authorization.k8s.io diff --git a/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpathplugin.yaml b/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpathplugin.yaml index 2c66e11eca0..237b759a1c3 100644 --- a/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpathplugin.yaml +++ b/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpathplugin.yaml @@ -15,7 +15,7 @@ spec: hostNetwork: true containers: - name: driver-registrar - image: gcr.io/gke-release/csi-driver-registrar:v1.0.0-gke.0 + image: gcr.io/gke-release/csi-driver-registrar:v1.0.1-gke.0 args: - --v=5 - --csi-address=/csi/csi.sock diff --git a/test/e2e_node/node_problem_detector_linux.go b/test/e2e_node/node_problem_detector_linux.go index 64aa7ac7207..36a63193c9e 100644 --- a/test/e2e_node/node_problem_detector_linux.go +++ b/test/e2e_node/node_problem_detector_linux.go @@ -22,7 +22,6 @@ import ( "fmt" "os" "path" - "syscall" "time" "k8s.io/api/core/v1" @@ -34,6 +33,7 @@ import ( clientset "k8s.io/client-go/kubernetes" coreclientset "k8s.io/client-go/kubernetes/typed/core/v1" nodeutil "k8s.io/kubernetes/pkg/api/v1/node" + "k8s.io/kubernetes/pkg/kubelet/util" "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" @@ -97,8 +97,11 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete BeforeEach(func() { By("Calculate Lookback duration") var err error - nodeTime, bootTime, err = getNodeTime() + + nodeTime = time.Now() + bootTime, err = util.GetBootTime() Expect(err).To(BeNil()) + // Set lookback duration longer than node up time. // Assume the test won't take more than 1 hour, in fact it usually only takes 90 seconds. lookback = nodeTime.Sub(bootTime) + time.Hour @@ -387,24 +390,6 @@ func injectLog(file string, timestamp time.Time, log string, num int) error { return nil } -// getNodeTime gets node boot time and current time. -func getNodeTime() (time.Time, time.Time, error) { - // Get node current time. - nodeTime := time.Now() - - // Get system uptime. - var info syscall.Sysinfo_t - if err := syscall.Sysinfo(&info); err != nil { - return time.Time{}, time.Time{}, err - } - // Get node boot time. NOTE that because we get node current time before uptime, the boot time - // calculated will be a little earlier than the real boot time. This won't affect the correctness - // of the test result. - bootTime := nodeTime.Add(-time.Duration(info.Uptime) * time.Second) - - return nodeTime, bootTime, nil -} - // verifyEvents verifies there are num specific events generated func verifyEvents(e coreclientset.EventInterface, options metav1.ListOptions, num int, reason, message string) error { events, err := e.List(options) diff --git a/test/e2e_node/services/kubelet.go b/test/e2e_node/services/kubelet.go index 276dfbe36fa..71487e753d6 100644 --- a/test/e2e_node/services/kubelet.go +++ b/test/e2e_node/services/kubelet.go @@ -108,7 +108,7 @@ func (e *E2EServices) startKubelet() (*server, error) { klog.Info("Starting kubelet") // set feature gates so we can check which features are enabled and pass the appropriate flags - utilfeature.DefaultFeatureGate.SetFromMap(framework.TestContext.FeatureGates) + utilfeature.DefaultMutableFeatureGate.SetFromMap(framework.TestContext.FeatureGates) // Build kubeconfig kubeconfigPath, err := createKubeconfigCWD() diff --git a/test/e2e_node/services/services.go b/test/e2e_node/services/services.go index 58ac3534ada..51cccdaa393 100644 --- a/test/e2e_node/services/services.go +++ b/test/e2e_node/services/services.go @@ -109,7 +109,7 @@ func (e *E2EServices) Stop() { func RunE2EServices(t *testing.T) { // Populate global DefaultFeatureGate with value from TestContext.FeatureGates. // This way, statically-linked components see the same feature gate config as the test context. - utilfeature.DefaultFeatureGate.SetFromMap(framework.TestContext.FeatureGates) + utilfeature.DefaultMutableFeatureGate.SetFromMap(framework.TestContext.FeatureGates) e := newE2EServices() if err := e.run(t); err != nil { klog.Fatalf("Failed to run e2e services: %v", err) diff --git a/test/images/Makefile b/test/images/Makefile index 089c2696de9..b600a2d9d17 100644 --- a/test/images/Makefile +++ b/test/images/Makefile @@ -17,7 +17,7 @@ include ../../hack/make-rules/Makefile.manifest REGISTRY ?= gcr.io/kubernetes-e2e-test-images GOARM=7 QEMUVERSION=v2.9.1 -GOLANG_VERSION=1.11.2 +GOLANG_VERSION=1.11.3 export ifndef WHAT diff --git a/test/images/net/nat/closewait.go b/test/images/net/nat/closewait.go index d06aab79d21..cb32391cde3 100644 --- a/test/images/net/nat/closewait.go +++ b/test/images/net/nat/closewait.go @@ -40,7 +40,7 @@ import ( // connection assigned here. var leakedConnection *net.TCPConn -// Server JSON options. +// CloseWaitServerOptions holds server JSON options. type CloseWaitServerOptions struct { // Address to bind for the test LocalAddr string @@ -110,7 +110,7 @@ func (server *closeWaitServer) Run(logger *log.Logger, rawOptions interface{}) e return nil } -// Client JSON options +// CloseWaitClientOptions holds client JSON options. type CloseWaitClientOptions struct { // RemoteAddr of the server to connect to. RemoteAddr string diff --git a/test/images/netexec/netexec.go b/test/images/netexec/netexec.go index e66ae53538b..8bd6f632bc1 100644 --- a/test/images/netexec/netexec.go +++ b/test/images/netexec/netexec.go @@ -80,7 +80,7 @@ func main() { func startHTTPServer(httpPort int) { http.HandleFunc("/", rootHandler) - http.HandleFunc("/clientip", clientIpHandler) + http.HandleFunc("/clientip", clientIPHandler) http.HandleFunc("/echo", echoHandler) http.HandleFunc("/exit", exitHandler) http.HandleFunc("/hostname", hostnameHandler) @@ -104,7 +104,7 @@ func echoHandler(w http.ResponseWriter, r *http.Request) { fmt.Fprintf(w, "%s", r.FormValue("msg")) } -func clientIpHandler(w http.ResponseWriter, r *http.Request) { +func clientIPHandler(w http.ResponseWriter, r *http.Request) { log.Printf("GET /clientip") fmt.Fprintf(w, r.RemoteAddr) } diff --git a/test/images/nettest/nettest.go b/test/images/nettest/nettest.go index 2a70587ef0f..8336d7cfeb8 100644 --- a/test/images/nettest/nettest.go +++ b/test/images/nettest/nettest.go @@ -133,7 +133,7 @@ func (s *State) serveWrite(w http.ResponseWriter, r *http.Request) { if s.Received == nil { s.Received = map[string]int{} } - s.Received[wp.Source] += 1 + s.Received[wp.Source]++ } s.appendErr(json.NewEncoder(w).Encode(&WriteResp{Hostname: s.Hostname})) } @@ -164,7 +164,7 @@ func (s *State) appendSuccessfulSend(toHostname string) { if s.Sent == nil { s.Sent = map[string]int{} } - s.Sent[toHostname] += 1 + s.Sent[toHostname]++ } var ( diff --git a/test/images/no-snat-test-proxy/main.go b/test/images/no-snat-test-proxy/main.go index a133343b794..b7f95876588 100644 --- a/test/images/no-snat-test-proxy/main.go +++ b/test/images/no-snat-test-proxy/main.go @@ -30,22 +30,22 @@ import ( // This Pod's /checknosnat takes `target` and `ips` arguments, and queries {target}/checknosnat?ips={ips} -type MasqTestProxy struct { +type masqTestProxy struct { Port string } -func NewMasqTestProxy() *MasqTestProxy { - return &MasqTestProxy{ +func newMasqTestProxy() *masqTestProxy { + return &masqTestProxy{ Port: "31235", } } -func (m *MasqTestProxy) AddFlags(fs *pflag.FlagSet) { +func (m *masqTestProxy) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&m.Port, "port", m.Port, "The port to serve /checknosnat endpoint on.") } func main() { - m := NewMasqTestProxy() + m := newMasqTestProxy() m.AddFlags(pflag.CommandLine) flag.InitFlags() @@ -58,7 +58,7 @@ func main() { } } -func (m *MasqTestProxy) Run() error { +func (m *masqTestProxy) Run() error { // register handler http.HandleFunc("/checknosnat", checknosnat) diff --git a/test/images/no-snat-test/main.go b/test/images/no-snat-test/main.go index 40f16eb36a8..b5279852d4f 100644 --- a/test/images/no-snat-test/main.go +++ b/test/images/no-snat-test/main.go @@ -34,22 +34,22 @@ import ( // pip = this pod's ip // nip = this node's ip -type MasqTester struct { +type masqTester struct { Port string } -func NewMasqTester() *MasqTester { - return &MasqTester{ +func newMasqTester() *masqTester { + return &masqTester{ Port: "8080", } } -func (m *MasqTester) AddFlags(fs *pflag.FlagSet) { +func (m *masqTester) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&m.Port, "port", m.Port, "The port to serve /checknosnat and /whoami endpoints on.") } func main() { - m := NewMasqTester() + m := newMasqTester() m.AddFlags(pflag.CommandLine) flag.InitFlags() @@ -62,7 +62,7 @@ func main() { } } -func (m *MasqTester) Run() error { +func (m *masqTester) Run() error { // pip is the current pod's IP and nip is the current node's IP // pull the pip and nip out of the env pip, ok := os.LookupEnv("POD_IP") @@ -145,9 +145,8 @@ func check(ip string, pip string, nip string) error { if rip != pip { if rip == nip { return fmt.Errorf("Returned ip %q != my Pod ip %q, == my Node ip %q - SNAT", rip, pip, nip) - } else { - return fmt.Errorf("Returned ip %q != my Pod ip %q or my Node ip %q - SNAT to unexpected ip (possible SNAT through unexpected interface on the way into another node)", rip, pip, nip) } + return fmt.Errorf("Returned ip %q != my Pod ip %q or my Node ip %q - SNAT to unexpected ip (possible SNAT through unexpected interface on the way into another node)", rip, pip, nip) } return nil } diff --git a/test/images/resource-consumer/common/common.go b/test/images/resource-consumer/common/common.go index a1423146f80..ef5b9c8d447 100644 --- a/test/images/resource-consumer/common/common.go +++ b/test/images/resource-consumer/common/common.go @@ -16,6 +16,7 @@ limitations under the License. package common +// Constants related to Prometheus metrics. const ( ConsumeCPUAddress = "/ConsumeCPU" ConsumeMemAddress = "/ConsumeMem" diff --git a/test/images/resource-consumer/controller/controller.go b/test/images/resource-consumer/controller/controller.go index f0afbd52a01..32f9763f7f6 100644 --- a/test/images/resource-consumer/controller/controller.go +++ b/test/images/resource-consumer/controller/controller.go @@ -25,7 +25,7 @@ import ( "strconv" "sync" - . "k8s.io/kubernetes/test/images/resource-consumer/common" + "k8s.io/kubernetes/test/images/resource-consumer/common" ) var port = flag.Int("port", 8080, "Port number.") @@ -35,23 +35,23 @@ var consumerServiceNamespace = flag.String("consumer-service-namespace", "defaul func main() { flag.Parse() - mgr := NewController() + mgr := newController() log.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", *port), mgr)) } -type Controller struct { +type controller struct { responseWriterLock sync.Mutex waitGroup sync.WaitGroup } -func NewController() *Controller { - c := &Controller{} +func newController() *controller { + c := &controller{} return c } -func (handler *Controller) ServeHTTP(w http.ResponseWriter, req *http.Request) { +func (c *controller) ServeHTTP(w http.ResponseWriter, req *http.Request) { if req.Method != "POST" { - http.Error(w, BadRequest, http.StatusBadRequest) + http.Error(w, common.BadRequest, http.StatusBadRequest) return } // parsing POST request data and URL data @@ -60,30 +60,30 @@ func (handler *Controller) ServeHTTP(w http.ResponseWriter, req *http.Request) { return } // handle consumeCPU - if req.URL.Path == ConsumeCPUAddress { - handler.handleConsumeCPU(w, req.Form) + if req.URL.Path == common.ConsumeCPUAddress { + c.handleConsumeCPU(w, req.Form) return } // handle consumeMem - if req.URL.Path == ConsumeMemAddress { - handler.handleConsumeMem(w, req.Form) + if req.URL.Path == common.ConsumeMemAddress { + c.handleConsumeMem(w, req.Form) return } // handle bumpMetric - if req.URL.Path == BumpMetricAddress { - handler.handleBumpMetric(w, req.Form) + if req.URL.Path == common.BumpMetricAddress { + c.handleBumpMetric(w, req.Form) return } - http.Error(w, UnknownFunction, http.StatusNotFound) + http.Error(w, common.UnknownFunction, http.StatusNotFound) } -func (handler *Controller) handleConsumeCPU(w http.ResponseWriter, query url.Values) { +func (c *controller) handleConsumeCPU(w http.ResponseWriter, query url.Values) { // getting string data for consumeCPU - durationSecString := query.Get(DurationSecQuery) - millicoresString := query.Get(MillicoresQuery) - requestSizeInMillicoresString := query.Get(RequestSizeInMillicoresQuery) + durationSecString := query.Get(common.DurationSecQuery) + millicoresString := query.Get(common.MillicoresQuery) + requestSizeInMillicoresString := query.Get(common.RequestSizeInMillicoresQuery) if durationSecString == "" || millicoresString == "" || requestSizeInMillicoresString == "" { - http.Error(w, NotGivenFunctionArgument, http.StatusBadRequest) + http.Error(w, common.NotGivenFunctionArgument, http.StatusBadRequest) return } @@ -92,7 +92,7 @@ func (handler *Controller) handleConsumeCPU(w http.ResponseWriter, query url.Val millicores, millicoresError := strconv.Atoi(millicoresString) requestSizeInMillicores, requestSizeInMillicoresError := strconv.Atoi(requestSizeInMillicoresString) if durationSecError != nil || millicoresError != nil || requestSizeInMillicoresError != nil || requestSizeInMillicores <= 0 { - http.Error(w, IncorrectFunctionArgument, http.StatusBadRequest) + http.Error(w, common.IncorrectFunctionArgument, http.StatusBadRequest) return } @@ -100,23 +100,23 @@ func (handler *Controller) handleConsumeCPU(w http.ResponseWriter, query url.Val rest := millicores - count*requestSizeInMillicores fmt.Fprintf(w, "RC manager: sending %v requests to consume %v millicores each and 1 request to consume %v millicores\n", count, requestSizeInMillicores, rest) if count > 0 { - handler.waitGroup.Add(count) - handler.sendConsumeCPURequests(w, count, requestSizeInMillicores, durationSec) + c.waitGroup.Add(count) + c.sendConsumeCPURequests(w, count, requestSizeInMillicores, durationSec) } if rest > 0 { - handler.waitGroup.Add(1) - go handler.sendOneConsumeCPURequest(w, rest, durationSec) + c.waitGroup.Add(1) + go c.sendOneConsumeCPURequest(w, rest, durationSec) } - handler.waitGroup.Wait() + c.waitGroup.Wait() } -func (handler *Controller) handleConsumeMem(w http.ResponseWriter, query url.Values) { +func (c *controller) handleConsumeMem(w http.ResponseWriter, query url.Values) { // getting string data for consumeMem - durationSecString := query.Get(DurationSecQuery) - megabytesString := query.Get(MegabytesQuery) - requestSizeInMegabytesString := query.Get(RequestSizeInMegabytesQuery) + durationSecString := query.Get(common.DurationSecQuery) + megabytesString := query.Get(common.MegabytesQuery) + requestSizeInMegabytesString := query.Get(common.RequestSizeInMegabytesQuery) if durationSecString == "" || megabytesString == "" || requestSizeInMegabytesString == "" { - http.Error(w, NotGivenFunctionArgument, http.StatusBadRequest) + http.Error(w, common.NotGivenFunctionArgument, http.StatusBadRequest) return } @@ -125,7 +125,7 @@ func (handler *Controller) handleConsumeMem(w http.ResponseWriter, query url.Val megabytes, megabytesError := strconv.Atoi(megabytesString) requestSizeInMegabytes, requestSizeInMegabytesError := strconv.Atoi(requestSizeInMegabytesString) if durationSecError != nil || megabytesError != nil || requestSizeInMegabytesError != nil || requestSizeInMegabytes <= 0 { - http.Error(w, IncorrectFunctionArgument, http.StatusBadRequest) + http.Error(w, common.IncorrectFunctionArgument, http.StatusBadRequest) return } @@ -133,24 +133,24 @@ func (handler *Controller) handleConsumeMem(w http.ResponseWriter, query url.Val rest := megabytes - count*requestSizeInMegabytes fmt.Fprintf(w, "RC manager: sending %v requests to consume %v MB each and 1 request to consume %v MB\n", count, requestSizeInMegabytes, rest) if count > 0 { - handler.waitGroup.Add(count) - handler.sendConsumeMemRequests(w, count, requestSizeInMegabytes, durationSec) + c.waitGroup.Add(count) + c.sendConsumeMemRequests(w, count, requestSizeInMegabytes, durationSec) } if rest > 0 { - handler.waitGroup.Add(1) - go handler.sendOneConsumeMemRequest(w, rest, durationSec) + c.waitGroup.Add(1) + go c.sendOneConsumeMemRequest(w, rest, durationSec) } - handler.waitGroup.Wait() + c.waitGroup.Wait() } -func (handler *Controller) handleBumpMetric(w http.ResponseWriter, query url.Values) { +func (c *controller) handleBumpMetric(w http.ResponseWriter, query url.Values) { // getting string data for handleBumpMetric - metric := query.Get(MetricNameQuery) - deltaString := query.Get(DeltaQuery) - durationSecString := query.Get(DurationSecQuery) - requestSizeCustomMetricString := query.Get(RequestSizeCustomMetricQuery) + metric := query.Get(common.MetricNameQuery) + deltaString := query.Get(common.DeltaQuery) + durationSecString := query.Get(common.DurationSecQuery) + requestSizeCustomMetricString := query.Get(common.RequestSizeCustomMetricQuery) if durationSecString == "" || metric == "" || deltaString == "" || requestSizeCustomMetricString == "" { - http.Error(w, NotGivenFunctionArgument, http.StatusBadRequest) + http.Error(w, common.NotGivenFunctionArgument, http.StatusBadRequest) return } @@ -159,7 +159,7 @@ func (handler *Controller) handleBumpMetric(w http.ResponseWriter, query url.Val delta, deltaError := strconv.Atoi(deltaString) requestSizeCustomMetric, requestSizeCustomMetricError := strconv.Atoi(requestSizeCustomMetricString) if durationSecError != nil || deltaError != nil || requestSizeCustomMetricError != nil || requestSizeCustomMetric <= 0 { - http.Error(w, IncorrectFunctionArgument, http.StatusBadRequest) + http.Error(w, common.IncorrectFunctionArgument, http.StatusBadRequest) return } @@ -167,31 +167,31 @@ func (handler *Controller) handleBumpMetric(w http.ResponseWriter, query url.Val rest := delta - count*requestSizeCustomMetric fmt.Fprintf(w, "RC manager: sending %v requests to bump custom metric by %v each and 1 request to bump by %v\n", count, requestSizeCustomMetric, rest) if count > 0 { - handler.waitGroup.Add(count) - handler.sendConsumeCustomMetric(w, metric, count, requestSizeCustomMetric, durationSec) + c.waitGroup.Add(count) + c.sendConsumeCustomMetric(w, metric, count, requestSizeCustomMetric, durationSec) } if rest > 0 { - handler.waitGroup.Add(1) - go handler.sendOneConsumeCustomMetric(w, metric, rest, durationSec) + c.waitGroup.Add(1) + go c.sendOneConsumeCustomMetric(w, metric, rest, durationSec) } - handler.waitGroup.Wait() + c.waitGroup.Wait() } -func (manager *Controller) sendConsumeCPURequests(w http.ResponseWriter, requests, millicores, durationSec int) { +func (c *controller) sendConsumeCPURequests(w http.ResponseWriter, requests, millicores, durationSec int) { for i := 0; i < requests; i++ { - go manager.sendOneConsumeCPURequest(w, millicores, durationSec) + go c.sendOneConsumeCPURequest(w, millicores, durationSec) } } -func (manager *Controller) sendConsumeMemRequests(w http.ResponseWriter, requests, megabytes, durationSec int) { +func (c *controller) sendConsumeMemRequests(w http.ResponseWriter, requests, megabytes, durationSec int) { for i := 0; i < requests; i++ { - go manager.sendOneConsumeMemRequest(w, megabytes, durationSec) + go c.sendOneConsumeMemRequest(w, megabytes, durationSec) } } -func (manager *Controller) sendConsumeCustomMetric(w http.ResponseWriter, metric string, requests, delta, durationSec int) { +func (c *controller) sendConsumeCustomMetric(w http.ResponseWriter, metric string, requests, delta, durationSec int) { for i := 0; i < requests; i++ { - go manager.sendOneConsumeCustomMetric(w, metric, delta, durationSec) + go c.sendOneConsumeCustomMetric(w, metric, delta, durationSec) } } @@ -200,10 +200,10 @@ func createConsumerURL(suffix string) string { } // sendOneConsumeCPURequest sends POST request for cpu consumption -func (c *Controller) sendOneConsumeCPURequest(w http.ResponseWriter, millicores int, durationSec int) { +func (c *controller) sendOneConsumeCPURequest(w http.ResponseWriter, millicores int, durationSec int) { defer c.waitGroup.Done() - query := createConsumerURL(ConsumeCPUAddress) - _, err := http.PostForm(query, url.Values{MillicoresQuery: {strconv.Itoa(millicores)}, DurationSecQuery: {strconv.Itoa(durationSec)}}) + query := createConsumerURL(common.ConsumeCPUAddress) + _, err := http.PostForm(query, url.Values{common.MillicoresQuery: {strconv.Itoa(millicores)}, common.DurationSecQuery: {strconv.Itoa(durationSec)}}) c.responseWriterLock.Lock() defer c.responseWriterLock.Unlock() if err != nil { @@ -214,10 +214,10 @@ func (c *Controller) sendOneConsumeCPURequest(w http.ResponseWriter, millicores } // sendOneConsumeMemRequest sends POST request for memory consumption -func (c *Controller) sendOneConsumeMemRequest(w http.ResponseWriter, megabytes int, durationSec int) { +func (c *controller) sendOneConsumeMemRequest(w http.ResponseWriter, megabytes int, durationSec int) { defer c.waitGroup.Done() - query := createConsumerURL(ConsumeMemAddress) - _, err := http.PostForm(query, url.Values{MegabytesQuery: {strconv.Itoa(megabytes)}, DurationSecQuery: {strconv.Itoa(durationSec)}}) + query := createConsumerURL(common.ConsumeMemAddress) + _, err := http.PostForm(query, url.Values{common.MegabytesQuery: {strconv.Itoa(megabytes)}, common.DurationSecQuery: {strconv.Itoa(durationSec)}}) c.responseWriterLock.Lock() defer c.responseWriterLock.Unlock() if err != nil { @@ -228,11 +228,11 @@ func (c *Controller) sendOneConsumeMemRequest(w http.ResponseWriter, megabytes i } // sendOneConsumeCustomMetric sends POST request for custom metric consumption -func (c *Controller) sendOneConsumeCustomMetric(w http.ResponseWriter, customMetricName string, delta int, durationSec int) { +func (c *controller) sendOneConsumeCustomMetric(w http.ResponseWriter, customMetricName string, delta int, durationSec int) { defer c.waitGroup.Done() - query := createConsumerURL(BumpMetricAddress) + query := createConsumerURL(common.BumpMetricAddress) _, err := http.PostForm(query, - url.Values{MetricNameQuery: {customMetricName}, DurationSecQuery: {strconv.Itoa(durationSec)}, DeltaQuery: {strconv.Itoa(delta)}}) + url.Values{common.MetricNameQuery: {customMetricName}, common.DurationSecQuery: {strconv.Itoa(durationSec)}, common.DeltaQuery: {strconv.Itoa(delta)}}) c.responseWriterLock.Lock() defer c.responseWriterLock.Unlock() if err != nil { diff --git a/test/images/resource-consumer/resource_consumer_handler.go b/test/images/resource-consumer/resource_consumer_handler.go index ffcbb04f549..d67de9a211b 100644 --- a/test/images/resource-consumer/resource_consumer_handler.go +++ b/test/images/resource-consumer/resource_consumer_handler.go @@ -24,26 +24,28 @@ import ( "sync" "time" - . "k8s.io/kubernetes/test/images/resource-consumer/common" + "k8s.io/kubernetes/test/images/resource-consumer/common" ) +// ResourceConsumerHandler holds metrics for a resource consumer. type ResourceConsumerHandler struct { metrics map[string]float64 metricsLock sync.Mutex } +// NewResourceConsumerHandler creates and initializes a ResourceConsumerHandler to defaults. func NewResourceConsumerHandler() *ResourceConsumerHandler { return &ResourceConsumerHandler{metrics: map[string]float64{}} } func (handler *ResourceConsumerHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { // handle exposing metrics in Prometheus format (both GET & POST) - if req.URL.Path == MetricsAddress { + if req.URL.Path == common.MetricsAddress { handler.handleMetrics(w) return } if req.Method != "POST" { - http.Error(w, BadRequest, http.StatusBadRequest) + http.Error(w, common.BadRequest, http.StatusBadRequest) return } // parsing POST request data and URL data @@ -52,34 +54,34 @@ func (handler *ResourceConsumerHandler) ServeHTTP(w http.ResponseWriter, req *ht return } // handle consumeCPU - if req.URL.Path == ConsumeCPUAddress { + if req.URL.Path == common.ConsumeCPUAddress { handler.handleConsumeCPU(w, req.Form) return } // handle consumeMem - if req.URL.Path == ConsumeMemAddress { + if req.URL.Path == common.ConsumeMemAddress { handler.handleConsumeMem(w, req.Form) return } // handle getCurrentStatus - if req.URL.Path == GetCurrentStatusAddress { + if req.URL.Path == common.GetCurrentStatusAddress { handler.handleGetCurrentStatus(w) return } // handle bumpMetric - if req.URL.Path == BumpMetricAddress { + if req.URL.Path == common.BumpMetricAddress { handler.handleBumpMetric(w, req.Form) return } - http.Error(w, fmt.Sprintf("%s: %s", UnknownFunction, req.URL.Path), http.StatusNotFound) + http.Error(w, fmt.Sprintf("%s: %s", common.UnknownFunction, req.URL.Path), http.StatusNotFound) } func (handler *ResourceConsumerHandler) handleConsumeCPU(w http.ResponseWriter, query url.Values) { // getting string data for consumeCPU - durationSecString := query.Get(DurationSecQuery) - millicoresString := query.Get(MillicoresQuery) + durationSecString := query.Get(common.DurationSecQuery) + millicoresString := query.Get(common.MillicoresQuery) if durationSecString == "" || millicoresString == "" { - http.Error(w, NotGivenFunctionArgument, http.StatusBadRequest) + http.Error(w, common.NotGivenFunctionArgument, http.StatusBadRequest) return } @@ -87,22 +89,22 @@ func (handler *ResourceConsumerHandler) handleConsumeCPU(w http.ResponseWriter, durationSec, durationSecError := strconv.Atoi(durationSecString) millicores, millicoresError := strconv.Atoi(millicoresString) if durationSecError != nil || millicoresError != nil { - http.Error(w, IncorrectFunctionArgument, http.StatusBadRequest) + http.Error(w, common.IncorrectFunctionArgument, http.StatusBadRequest) return } go ConsumeCPU(millicores, durationSec) - fmt.Fprintln(w, ConsumeCPUAddress[1:]) - fmt.Fprintln(w, millicores, MillicoresQuery) - fmt.Fprintln(w, durationSec, DurationSecQuery) + fmt.Fprintln(w, common.ConsumeCPUAddress[1:]) + fmt.Fprintln(w, millicores, common.MillicoresQuery) + fmt.Fprintln(w, durationSec, common.DurationSecQuery) } func (handler *ResourceConsumerHandler) handleConsumeMem(w http.ResponseWriter, query url.Values) { // getting string data for consumeMem - durationSecString := query.Get(DurationSecQuery) - megabytesString := query.Get(MegabytesQuery) + durationSecString := query.Get(common.DurationSecQuery) + megabytesString := query.Get(common.MegabytesQuery) if durationSecString == "" || megabytesString == "" { - http.Error(w, NotGivenFunctionArgument, http.StatusBadRequest) + http.Error(w, common.NotGivenFunctionArgument, http.StatusBadRequest) return } @@ -110,20 +112,20 @@ func (handler *ResourceConsumerHandler) handleConsumeMem(w http.ResponseWriter, durationSec, durationSecError := strconv.Atoi(durationSecString) megabytes, megabytesError := strconv.Atoi(megabytesString) if durationSecError != nil || megabytesError != nil { - http.Error(w, IncorrectFunctionArgument, http.StatusBadRequest) + http.Error(w, common.IncorrectFunctionArgument, http.StatusBadRequest) return } go ConsumeMem(megabytes, durationSec) - fmt.Fprintln(w, ConsumeMemAddress[1:]) - fmt.Fprintln(w, megabytes, MegabytesQuery) - fmt.Fprintln(w, durationSec, DurationSecQuery) + fmt.Fprintln(w, common.ConsumeMemAddress[1:]) + fmt.Fprintln(w, megabytes, common.MegabytesQuery) + fmt.Fprintln(w, durationSec, common.DurationSecQuery) } func (handler *ResourceConsumerHandler) handleGetCurrentStatus(w http.ResponseWriter) { GetCurrentStatus() fmt.Fprintln(w, "Warning: not implemented!") - fmt.Fprint(w, GetCurrentStatusAddress[1:]) + fmt.Fprint(w, common.GetCurrentStatusAddress[1:]) } func (handler *ResourceConsumerHandler) handleMetrics(w http.ResponseWriter) { @@ -154,11 +156,11 @@ func (handler *ResourceConsumerHandler) bumpMetric(metric string, delta float64, func (handler *ResourceConsumerHandler) handleBumpMetric(w http.ResponseWriter, query url.Values) { // getting string data for handleBumpMetric - metric := query.Get(MetricNameQuery) - deltaString := query.Get(DeltaQuery) - durationSecString := query.Get(DurationSecQuery) + metric := query.Get(common.MetricNameQuery) + deltaString := query.Get(common.DeltaQuery) + durationSecString := query.Get(common.DurationSecQuery) if durationSecString == "" || metric == "" || deltaString == "" { - http.Error(w, NotGivenFunctionArgument, http.StatusBadRequest) + http.Error(w, common.NotGivenFunctionArgument, http.StatusBadRequest) return } @@ -166,13 +168,13 @@ func (handler *ResourceConsumerHandler) handleBumpMetric(w http.ResponseWriter, durationSec, durationSecError := strconv.Atoi(durationSecString) delta, deltaError := strconv.ParseFloat(deltaString, 64) if durationSecError != nil || deltaError != nil { - http.Error(w, IncorrectFunctionArgument, http.StatusBadRequest) + http.Error(w, common.IncorrectFunctionArgument, http.StatusBadRequest) return } go handler.bumpMetric(metric, delta, time.Duration(durationSec)*time.Second) - fmt.Fprintln(w, BumpMetricAddress[1:]) - fmt.Fprintln(w, metric, MetricNameQuery) - fmt.Fprintln(w, delta, DeltaQuery) - fmt.Fprintln(w, durationSec, DurationSecQuery) + fmt.Fprintln(w, common.BumpMetricAddress[1:]) + fmt.Fprintln(w, metric, common.MetricNameQuery) + fmt.Fprintln(w, delta, common.DeltaQuery) + fmt.Fprintln(w, durationSec, common.DurationSecQuery) } diff --git a/test/images/resource-consumer/utils.go b/test/images/resource-consumer/utils.go index 2e11b2ee9f3..64ea8838c72 100644 --- a/test/images/resource-consumer/utils.go +++ b/test/images/resource-consumer/utils.go @@ -28,6 +28,7 @@ const ( consumeMemBinary = "stress" ) +// ConsumeCPU consumes a given number of millicores for the specified duration. func ConsumeCPU(millicores int, durationSec int) { log.Printf("ConsumeCPU millicores: %v, durationSec: %v", millicores, durationSec) // creating new consume cpu process @@ -37,6 +38,7 @@ func ConsumeCPU(millicores int, durationSec int) { consumeCPU.Run() } +// ConsumeMem consumes a given number of megabytes for the specified duration. func ConsumeMem(megabytes int, durationSec int) { log.Printf("ConsumeMem megabytes: %v, durationSec: %v", megabytes, durationSec) megabytesString := strconv.Itoa(megabytes) + "M" @@ -46,6 +48,7 @@ func ConsumeMem(megabytes int, durationSec int) { consumeMem.Run() } +// GetCurrentStatus prints out a no-op. func GetCurrentStatus() { log.Printf("GetCurrentStatus") // not implemented diff --git a/test/images/serve-hostname/VERSION b/test/images/serve-hostname/VERSION index 9459d4ba2a0..5625e59da88 100644 --- a/test/images/serve-hostname/VERSION +++ b/test/images/serve-hostname/VERSION @@ -1 +1 @@ -1.1 +1.2 diff --git a/test/images/serve-hostname/serve_hostname.go b/test/images/serve-hostname/serve_hostname.go index 21793fbbf9f..f6d7507e1f4 100644 --- a/test/images/serve-hostname/serve_hostname.go +++ b/test/images/serve-hostname/serve_hostname.go @@ -30,10 +30,11 @@ import ( ) var ( - doTCP = flag.Bool("tcp", false, "Serve raw over TCP.") - doUDP = flag.Bool("udp", false, "Serve raw over UDP.") - doHTTP = flag.Bool("http", true, "Serve HTTP.") - port = flag.Int("port", 9376, "Port number.") + doTCP = flag.Bool("tcp", false, "Serve raw over TCP.") + doUDP = flag.Bool("udp", false, "Serve raw over UDP.") + doHTTP = flag.Bool("http", true, "Serve HTTP.") + doClose = flag.Bool("close", false, "Close connection per each HTTP request") + port = flag.Int("port", 9376, "Port number.") ) func main() { @@ -88,6 +89,12 @@ func main() { if *doHTTP { http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { log.Printf("HTTP request from %s", r.RemoteAddr) + + if *doClose { + // Add this header to force to close the connection after serving the request. + w.Header().Add("Connection", "close") + } + fmt.Fprintf(w, "%s", hostname) }) go func() { diff --git a/test/integration/apiserver/BUILD b/test/integration/apiserver/BUILD index ce40df99c5e..585cd2918c2 100644 --- a/test/integration/apiserver/BUILD +++ b/test/integration/apiserver/BUILD @@ -43,6 +43,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apiserver/pkg/features:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library", "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", "//staging/src/k8s.io/client-go/discovery:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", diff --git a/test/integration/apiserver/apiserver_test.go b/test/integration/apiserver/apiserver_test.go index 37606dcd13f..3fe3f27ff25 100644 --- a/test/integration/apiserver/apiserver_test.go +++ b/test/integration/apiserver/apiserver_test.go @@ -32,8 +32,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - genericfeatures "k8s.io/apiserver/pkg/features" + "k8s.io/apiserver/pkg/features" utilfeature "k8s.io/apiserver/pkg/util/feature" + utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/pager" @@ -170,9 +171,7 @@ func Test202StatusCode(t *testing.T) { } func TestAPIListChunking(t *testing.T) { - if err := utilfeature.DefaultFeatureGate.Set(string(genericfeatures.APIListChunking) + "=true"); err != nil { - t.Fatal(err) - } + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.APIListChunking, true)() s, clientSet, closeFn := setup(t) defer closeFn() diff --git a/test/integration/daemonset/BUILD b/test/integration/daemonset/BUILD index 10aad679212..8814d8e5048 100644 --- a/test/integration/daemonset/BUILD +++ b/test/integration/daemonset/BUILD @@ -33,6 +33,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library", diff --git a/test/integration/daemonset/daemonset_test.go b/test/integration/daemonset/daemonset_test.go index c9450c3c50b..f73cbe93a9b 100644 --- a/test/integration/daemonset/daemonset_test.go +++ b/test/integration/daemonset/daemonset_test.go @@ -31,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" + utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" "k8s.io/client-go/informers" clientset "k8s.io/client-go/kubernetes" appstyped "k8s.io/client-go/kubernetes/typed/apps/v1" @@ -485,21 +486,12 @@ func updateDS(t *testing.T, dsClient appstyped.DaemonSetInterface, dsName string func forEachFeatureGate(t *testing.T, tf func(t *testing.T)) { for _, fg := range featureGates() { - func() { - enabled := utilfeature.DefaultFeatureGate.Enabled(fg) - defer func() { - if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, enabled)); err != nil { - t.Fatalf("Failed to set FeatureGate %v to %t", fg, enabled) - } - }() - - for _, f := range []bool{true, false} { - if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, f)); err != nil { - t.Fatalf("Failed to set FeatureGate %v to %t", fg, f) - } + for _, f := range []bool{true, false} { + func() { + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, fg, f)() t.Run(fmt.Sprintf("%v (%t)", fg, f), tf) - } - }() + }() + } } } @@ -704,23 +696,10 @@ func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) { }) } -func setFeatureGate(t *testing.T, feature utilfeature.Feature, enabled bool) { - if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t", feature, enabled)); err != nil { - t.Fatalf("Failed to set FeatureGate %v to %t: %v", feature, enabled, err) - } -} - // When ScheduleDaemonSetPods is disabled, DaemonSets should not launch onto nodes with insufficient capacity. // Look for TestInsufficientCapacityNodeWhenScheduleDaemonSetPodsEnabled, we don't need this test anymore. func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) { - enabled := utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) - // Rollback feature gate. - defer func() { - if enabled { - setFeatureGate(t, features.ScheduleDaemonSetPods, true) - } - }() - setFeatureGate(t, features.ScheduleDaemonSetPods, false) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, false)() forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) { server, closeFn, dc, informers, clientset := setup(t) defer closeFn() @@ -761,17 +740,7 @@ func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) { // feature is enabled, the DaemonSet should create Pods for all the nodes regardless of available resource // on the nodes, and kube-scheduler should not schedule Pods onto the nodes with insufficient resource. func TestInsufficientCapacityNodeWhenScheduleDaemonSetPodsEnabled(t *testing.T) { - enabled := utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) - defer func() { - if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t", - features.ScheduleDaemonSetPods, enabled)); err != nil { - t.Fatalf("Failed to set FeatureGate %v to %t", features.ScheduleDaemonSetPods, enabled) - } - }() - - if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t", features.ScheduleDaemonSetPods, true)); err != nil { - t.Fatalf("Failed to set FeatureGate %v to %t", features.ScheduleDaemonSetPods, true) - } + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, true)() forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) { server, closeFn, dc, informers, clientset := setup(t) @@ -1012,16 +981,7 @@ func TestTaintedNode(t *testing.T) { // TestUnschedulableNodeDaemonDoesLaunchPod tests that the DaemonSet Pods can still be scheduled // to the Unschedulable nodes when TaintNodesByCondition are enabled. func TestUnschedulableNodeDaemonDoesLaunchPod(t *testing.T) { - enabledTaint := utilfeature.DefaultFeatureGate.Enabled(features.TaintNodesByCondition) - defer func() { - if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t", - features.TaintNodesByCondition, enabledTaint)); err != nil { - t.Fatalf("Failed to set FeatureGate %v to %t", features.TaintNodesByCondition, enabledTaint) - } - }() - if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t", features.TaintNodesByCondition, true)); err != nil { - t.Fatalf("Failed to set FeatureGate %v to %t", features.TaintNodesByCondition, true) - } + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.TaintNodesByCondition, true)() forEachFeatureGate(t, func(t *testing.T) { forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) { diff --git a/test/integration/master/transformation_testcase.go b/test/integration/master/transformation_testcase.go index b5f4eb63179..3778b514cff 100644 --- a/test/integration/master/transformation_testcase.go +++ b/test/integration/master/transformation_testcase.go @@ -164,7 +164,7 @@ func (e *transformTest) getRawSecretFromETCD() ([]byte, error) { func (e *transformTest) getEncryptionOptions() []string { if e.transformerConfig != "" { - return []string{"--experimental-encryption-provider-config", path.Join(e.configDir, encryptionConfigFileName)} + return []string{"--encryption-provider-config", path.Join(e.configDir, encryptionConfigFileName)} } return nil diff --git a/test/integration/scheduler/BUILD b/test/integration/scheduler/BUILD index 4ef611c9967..5bed16953a2 100644 --- a/test/integration/scheduler/BUILD +++ b/test/integration/scheduler/BUILD @@ -12,6 +12,7 @@ go_test( srcs = [ "extender_test.go", "main_test.go", + "plugin_test.go", "predicates_test.go", "preemption_test.go", "priorities_test.go", @@ -35,8 +36,9 @@ go_test( "//pkg/scheduler/algorithmprovider:go_default_library", "//pkg/scheduler/api:go_default_library", "//pkg/scheduler/apis/config:go_default_library", - "//pkg/scheduler/cache:go_default_library", "//pkg/scheduler/factory:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", + "//pkg/scheduler/plugins/v1alpha1:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", "//plugin/pkg/admission/podtolerationrestriction:go_default_library", @@ -54,6 +56,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", @@ -95,6 +98,7 @@ go_library( "//pkg/scheduler/algorithmprovider:go_default_library", "//pkg/scheduler/api:go_default_library", "//pkg/scheduler/factory:go_default_library", + "//pkg/scheduler/plugins/v1alpha1:go_default_library", "//pkg/util/taints:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/policy/v1beta1:go_default_library", diff --git a/test/integration/scheduler/plugin_test.go b/test/integration/scheduler/plugin_test.go new file mode 100644 index 00000000000..1fdf513d01b --- /dev/null +++ b/test/integration/scheduler/plugin_test.go @@ -0,0 +1,269 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheduler + +import ( + "fmt" + "testing" + "time" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/wait" + plugins "k8s.io/kubernetes/pkg/scheduler/plugins/v1alpha1" +) + +// StatefulMultipointExample is an example plugin that is executed at multiple extension points. +// This plugin is stateful. It receives arguments at initialization (NewMultipointPlugin) +// and changes its state when it is executed. +type TesterPlugin struct { + numReserveCalled int + numPrebindCalled int + failReserve bool + failPrebind bool + rejectPrebind bool +} + +var _ = plugins.ReservePlugin(&TesterPlugin{}) +var _ = plugins.PrebindPlugin(&TesterPlugin{}) + +// Name returns name of the plugin. +func (tp *TesterPlugin) Name() string { + return "tester-plugin" +} + +// Reserve is a test function that returns an error or nil, depending on the +// value of "failReserve". +func (tp *TesterPlugin) Reserve(ps plugins.PluginSet, pod *v1.Pod, nodeName string) error { + tp.numReserveCalled++ + if tp.failReserve { + return fmt.Errorf("injecting failure for pod %v", pod.Name) + } + return nil +} + +// Prebind is a test function that returns (true, nil) or errors for testing. +func (tp *TesterPlugin) Prebind(ps plugins.PluginSet, pod *v1.Pod, nodeName string) (bool, error) { + var err error = nil + tp.numPrebindCalled++ + if tp.failPrebind { + err = fmt.Errorf("injecting failure for pod %v", pod.Name) + } + if tp.rejectPrebind { + return false, err + } + return true, err +} + +// TestPluginSet is a plugin set used for testing purposes. +type TestPluginSet struct { + data *plugins.PluginData + reservePlugins []plugins.ReservePlugin + prebindPlugins []plugins.PrebindPlugin +} + +var _ = plugins.PluginSet(&TestPluginSet{}) + +// ReservePlugins returns a slice of default reserve plugins. +func (r *TestPluginSet) ReservePlugins() []plugins.ReservePlugin { + return r.reservePlugins +} + +// PrebindPlugins returns a slice of default prebind plugins. +func (r *TestPluginSet) PrebindPlugins() []plugins.PrebindPlugin { + return r.prebindPlugins +} + +// Data returns a pointer to PluginData. +func (r *TestPluginSet) Data() *plugins.PluginData { + return r.data +} + +// TestReservePlugin tests invocation of reserve plugins. +func TestReservePlugin(t *testing.T) { + // Create a plugin set for testing. Register only a reserve plugin. + testerPlugin := &TesterPlugin{} + testPluginSet := &TestPluginSet{ + data: &plugins.PluginData{ + Ctx: plugins.NewPluginContext(), + }, + reservePlugins: []plugins.ReservePlugin{testerPlugin}, + } + + // Create the master and the scheduler with the test plugin set. + context := initTestSchedulerWithOptions(t, + initTestMaster(t, "reserve-plugin", nil), + false, nil, testPluginSet, false, true, time.Second) + defer cleanupTest(t, context) + + cs := context.clientSet + // Add a few nodes. + _, err := createNodes(cs, "test-node", nil, 2) + if err != nil { + t.Fatalf("Cannot create nodes: %v", err) + } + + for _, fail := range []bool{false, true} { + testerPlugin.failReserve = fail + // Create a best effort pod. + pod, err := createPausePod(cs, + initPausePod(cs, &pausePodConfig{Name: "test-pod", Namespace: context.ns.Name})) + if err != nil { + t.Errorf("Error while creating a test pod: %v", err) + } + + if fail { + if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(cs, pod.Namespace, pod.Name)); err != nil { + t.Errorf("Didn't expected the pod to be scheduled. error: %v", err) + } + } else { + if err = waitForPodToSchedule(cs, pod); err != nil { + t.Errorf("Expected the pod to be scheduled. error: %v", err) + } + } + + if testerPlugin.numReserveCalled == 0 { + t.Errorf("Expected the reserve plugin to be called.") + } + + cleanupPods(cs, t, []*v1.Pod{pod}) + } +} + +// TestPrebindPlugin tests invocation of prebind plugins. +func TestPrebindPlugin(t *testing.T) { + // Create a plugin set for testing. Register only a prebind plugin. + testerPlugin := &TesterPlugin{} + testPluginSet := &TestPluginSet{ + data: &plugins.PluginData{ + Ctx: plugins.NewPluginContext(), + }, + prebindPlugins: []plugins.PrebindPlugin{testerPlugin}, + } + + // Create the master and the scheduler with the test plugin set. + context := initTestSchedulerWithOptions(t, + initTestMaster(t, "prebind-plugin", nil), + false, nil, testPluginSet, false, true, time.Second) + defer cleanupTest(t, context) + + cs := context.clientSet + // Add a few nodes. + _, err := createNodes(cs, "test-node", nil, 2) + if err != nil { + t.Fatalf("Cannot create nodes: %v", err) + } + + tests := []struct { + fail bool + reject bool + }{ + { + fail: false, + reject: false, + }, + { + fail: true, + reject: false, + }, + { + fail: false, + reject: true, + }, + { + fail: true, + reject: true, + }, + } + + for i, test := range tests { + testerPlugin.failPrebind = test.fail + testerPlugin.rejectPrebind = test.reject + // Create a best effort pod. + pod, err := createPausePod(cs, + initPausePod(cs, &pausePodConfig{Name: "test-pod", Namespace: context.ns.Name})) + if err != nil { + t.Errorf("Error while creating a test pod: %v", err) + } + + if test.fail { + if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(cs, pod.Namespace, pod.Name)); err != nil { + t.Errorf("test #%v: Expected a scheduling error, but didn't get it. error: %v", i, err) + } + } else { + if test.reject { + if err = waitForPodUnschedulable(cs, pod); err != nil { + t.Errorf("test #%v: Didn't expected the pod to be scheduled. error: %v", i, err) + } + } else { + if err = waitForPodToSchedule(cs, pod); err != nil { + t.Errorf("test #%v: Expected the pod to be scheduled. error: %v", i, err) + } + } + } + + if testerPlugin.numPrebindCalled == 0 { + t.Errorf("Expected the prebind plugin to be called.") + } + + cleanupPods(cs, t, []*v1.Pod{pod}) + } +} + +// TestContextCleanup tests that data inserted in the pluginContext is removed +// after a scheduling cycle is over. +func TestContextCleanup(t *testing.T) { + // Create a plugin set for testing. + testerPlugin := &TesterPlugin{} + testPluginSet := &TestPluginSet{ + data: &plugins.PluginData{ + Ctx: plugins.NewPluginContext(), + }, + reservePlugins: []plugins.ReservePlugin{testerPlugin}, + prebindPlugins: []plugins.PrebindPlugin{testerPlugin}, + } + + // Create the master and the scheduler with the test plugin set. + context := initTestSchedulerWithOptions(t, + initTestMaster(t, "plugin-context-cleanup", nil), + false, nil, testPluginSet, false, true, time.Second) + defer cleanupTest(t, context) + + cs := context.clientSet + // Add a few nodes. + _, err := createNodes(cs, "test-node", nil, 2) + if err != nil { + t.Fatalf("Cannot create nodes: %v", err) + } + + // Insert something in the plugin context. + testPluginSet.Data().Ctx.Write("test", "foo") + + // Create and schedule a best effort pod. + pod, err := runPausePod(cs, + initPausePod(cs, &pausePodConfig{Name: "test-pod", Namespace: context.ns.Name})) + if err != nil { + t.Errorf("Error while creating or scheduling a test pod: %v", err) + } + + // Make sure the data inserted in the plugin context is removed. + _, err = testPluginSet.Data().Ctx.Read("test") + if err == nil || err.Error() != plugins.NotFound { + t.Errorf("Expected the plugin context to be cleaned up after a scheduling cycle. error: %v", err) + } + + cleanupPods(cs, t, []*v1.Pod{pod}) +} diff --git a/test/integration/scheduler/preemption_test.go b/test/integration/scheduler/preemption_test.go index a1fc044a78c..ffa0e4237ea 100644 --- a/test/integration/scheduler/preemption_test.go +++ b/test/integration/scheduler/preemption_test.go @@ -31,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" + utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/pkg/features" _ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider" @@ -64,7 +65,7 @@ func waitForNominatedNodeName(cs clientset.Interface, pod *v1.Pod) error { // TestPreemption tests a few preemption scenarios. func TestPreemption(t *testing.T) { // Enable PodPriority feature gate. - utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority)) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)() // Initialize scheduler. context := initTest(t, "preemption") defer cleanupTest(t, context) @@ -292,7 +293,7 @@ func TestPreemption(t *testing.T) { // TestDisablePreemption tests disable pod preemption of scheduler works as expected. func TestDisablePreemption(t *testing.T) { // Enable PodPriority feature gate. - utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority)) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)() // Initialize scheduler, and disable preemption. context := initTestDisablePreemption(t, "disable-preemption") defer cleanupTest(t, context) @@ -394,7 +395,7 @@ func mkPriorityPodWithGrace(tc *TestContext, name string, priority int32, grace // after preemption and while the higher priority pods is not scheduled yet. func TestPreemptionStarvation(t *testing.T) { // Enable PodPriority feature gate. - utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority)) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)() // Initialize scheduler. context := initTest(t, "preemption") defer cleanupTest(t, context) @@ -501,7 +502,7 @@ func TestPreemptionStarvation(t *testing.T) { // node name of the medium priority pod is cleared. func TestNominatedNodeCleanUp(t *testing.T) { // Enable PodPriority feature gate. - utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority)) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)() // Initialize scheduler. context := initTest(t, "preemption") defer cleanupTest(t, context) @@ -615,7 +616,7 @@ func addPodConditionReady(pod *v1.Pod) { // TestPDBInPreemption tests PodDisruptionBudget support in preemption. func TestPDBInPreemption(t *testing.T) { // Enable PodPriority feature gate. - utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority)) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)() // Initialize scheduler. context := initTest(t, "preemption-pdb") defer cleanupTest(t, context) diff --git a/test/integration/scheduler/scheduler_test.go b/test/integration/scheduler/scheduler_test.go index 3f7938bb5ff..e67457b3d57 100644 --- a/test/integration/scheduler/scheduler_test.go +++ b/test/integration/scheduler/scheduler_test.go @@ -44,8 +44,8 @@ import ( _ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" "k8s.io/kubernetes/pkg/scheduler/factory" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" "k8s.io/kubernetes/test/integration/framework" ) @@ -56,19 +56,19 @@ type nodeStateManager struct { makeUnSchedulable nodeMutationFunc } -func PredicateOne(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func PredicateOne(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { return true, nil, nil } -func PredicateTwo(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func PredicateTwo(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { return true, nil, nil } -func PriorityOne(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { +func PriorityOne(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { return []schedulerapi.HostPriority{}, nil } -func PriorityTwo(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { +func PriorityTwo(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { return []schedulerapi.HostPriority{}, nil } diff --git a/test/integration/scheduler/taint_test.go b/test/integration/scheduler/taint_test.go index e6964760ec4..bdcd0b4a08b 100644 --- a/test/integration/scheduler/taint_test.go +++ b/test/integration/scheduler/taint_test.go @@ -28,10 +28,12 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" utilfeature "k8s.io/apiserver/pkg/util/feature" + utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" "k8s.io/kubernetes/pkg/controller/nodelifecycle" + "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/scheduler/algorithmprovider" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction" @@ -61,14 +63,8 @@ func newPod(nsName, name string, req, limit v1.ResourceList) *v1.Pod { // TestTaintNodeByCondition tests related cases for TaintNodeByCondition feature. func TestTaintNodeByCondition(t *testing.T) { - enabled := utilfeature.DefaultFeatureGate.Enabled("TaintNodesByCondition") - defer func() { - if !enabled { - utilfeature.DefaultFeatureGate.Set("TaintNodesByCondition=False") - } - }() // Enable TaintNodeByCondition - utilfeature.DefaultFeatureGate.Set("TaintNodesByCondition=True") + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.TaintNodesByCondition, true)() // Build PodToleration Admission. admission := podtolerationrestriction.NewPodTolerationsPlugin(&pluginapi.Configuration{}) @@ -145,24 +141,12 @@ func TestTaintNodeByCondition(t *testing.T) { Effect: v1.TaintEffectNoSchedule, } - unreachableToleration := v1.Toleration{ - Key: schedulerapi.TaintNodeUnreachable, - Operator: v1.TolerationOpExists, - Effect: v1.TaintEffectNoSchedule, - } - unschedulableToleration := v1.Toleration{ Key: schedulerapi.TaintNodeUnschedulable, Operator: v1.TolerationOpExists, Effect: v1.TaintEffectNoSchedule, } - outOfDiskToleration := v1.Toleration{ - Key: schedulerapi.TaintNodeOutOfDisk, - Operator: v1.TolerationOpExists, - Effect: v1.TaintEffectNoSchedule, - } - memoryPressureToleration := v1.Toleration{ Key: schedulerapi.TaintNodeMemoryPressure, Operator: v1.TolerationOpExists, @@ -240,46 +224,6 @@ func TestTaintNodeByCondition(t *testing.T) { }, }, }, - { - name: "unreachable node", - existingTaints: []v1.Taint{ - { - Key: schedulerapi.TaintNodeUnreachable, - Effect: v1.TaintEffectNoSchedule, - }, - }, - nodeConditions: []v1.NodeCondition{ - { - Type: v1.NodeReady, - Status: v1.ConditionUnknown, // node status is "Unknown" - }, - }, - expectedTaints: []v1.Taint{ - { - Key: schedulerapi.TaintNodeUnreachable, - Effect: v1.TaintEffectNoSchedule, - }, - }, - pods: []podCase{ - { - pod: bestEffortPod, - fits: false, - }, - { - pod: burstablePod, - fits: false, - }, - { - pod: guaranteePod, - fits: false, - }, - { - pod: bestEffortPod, - tolerations: []v1.Toleration{unreachableToleration}, - fits: true, - }, - }, - }, { name: "unschedulable node", unschedulable: true, // node.spec.unschedulable = true @@ -315,50 +259,6 @@ func TestTaintNodeByCondition(t *testing.T) { }, }, }, - { - name: "out of disk node", - nodeConditions: []v1.NodeCondition{ - { - Type: v1.NodeOutOfDisk, - Status: v1.ConditionTrue, - }, - { - Type: v1.NodeReady, - Status: v1.ConditionTrue, - }, - }, - expectedTaints: []v1.Taint{ - { - Key: schedulerapi.TaintNodeOutOfDisk, - Effect: v1.TaintEffectNoSchedule, - }, - }, - // In OutOfDisk condition, only pods with toleration can be scheduled. - pods: []podCase{ - { - pod: bestEffortPod, - fits: false, - }, - { - pod: burstablePod, - fits: false, - }, - { - pod: guaranteePod, - fits: false, - }, - { - pod: bestEffortPod, - tolerations: []v1.Toleration{outOfDiskToleration}, - fits: true, - }, - { - pod: bestEffortPod, - tolerations: []v1.Toleration{diskPressureToleration}, - fits: false, - }, - }, - }, { name: "memory pressure node", nodeConditions: []v1.NodeCondition{ diff --git a/test/integration/scheduler/util.go b/test/integration/scheduler/util.go index 71875405ee6..f993d8342bf 100644 --- a/test/integration/scheduler/util.go +++ b/test/integration/scheduler/util.go @@ -51,6 +51,7 @@ import ( _ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" "k8s.io/kubernetes/pkg/scheduler/factory" + plugins "k8s.io/kubernetes/pkg/scheduler/plugins/v1alpha1" taintutils "k8s.io/kubernetes/pkg/util/taints" "k8s.io/kubernetes/test/integration/framework" imageutils "k8s.io/kubernetes/test/utils/image" @@ -148,7 +149,7 @@ func initTestScheduler( ) *TestContext { // Pod preemption is enabled by default scheduler configuration, but preemption only happens when PodPriority // feature gate is enabled at the same time. - return initTestSchedulerWithOptions(t, context, setPodInformer, policy, false, true, time.Second) + return initTestSchedulerWithOptions(t, context, setPodInformer, policy, nil, false, true, time.Second) } // initTestSchedulerWithOptions initializes a test environment and creates a scheduler with default @@ -158,6 +159,7 @@ func initTestSchedulerWithOptions( context *TestContext, setPodInformer bool, policy *schedulerapi.Policy, + pluginSet plugins.PluginSet, disablePreemption bool, disableEquivalenceCache bool, resyncPeriod time.Duration, @@ -205,6 +207,11 @@ func initTestSchedulerWithOptions( controller.WaitForCacheSync("scheduler", context.schedulerConfig.StopEverything, podInformer.Informer().HasSynced) } + // Set pluginSet if provided. DefaultPluginSet is used if this is not specified. + if pluginSet != nil { + context.schedulerConfig.PluginSet = pluginSet + } + eventBroadcaster := record.NewBroadcaster() context.schedulerConfig.Recorder = eventBroadcaster.NewRecorder( legacyscheme.Scheme, @@ -257,7 +264,7 @@ func initTest(t *testing.T, nsPrefix string) *TestContext { // configuration but with pod preemption disabled. func initTestDisablePreemption(t *testing.T, nsPrefix string) *TestContext { return initTestSchedulerWithOptions( - t, initTestMaster(t, nsPrefix, nil), true, nil, true, true, time.Second) + t, initTestMaster(t, nsPrefix, nil), true, nil, nil, true, true, time.Second) } // cleanupTest deletes the scheduler and the test namespace. It should be called @@ -605,6 +612,25 @@ func podUnschedulable(c clientset.Interface, podNamespace, podName string) wait. } } +// podSchedulingError returns a condition function that returns true if the given pod +// gets unschedulable status for reasons other than "Unschedulable". The scheduler +// records such reasons in case of error. +func podSchedulingError(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc { + return func() (bool, error) { + pod, err := c.CoreV1().Pods(podNamespace).Get(podName, metav1.GetOptions{}) + if errors.IsNotFound(err) { + return false, nil + } + if err != nil { + // This could be a connection error so we want to retry. + return false, nil + } + _, cond := podutil.GetPodCondition(&pod.Status, v1.PodScheduled) + return cond != nil && cond.Status == v1.ConditionFalse && + cond.Reason != v1.PodReasonUnschedulable, nil + } +} + // waitForPodToScheduleWithTimeout waits for a pod to get scheduled and returns // an error if it does not scheduled within the given timeout. func waitForPodToScheduleWithTimeout(cs clientset.Interface, pod *v1.Pod, timeout time.Duration) error { diff --git a/test/integration/scheduler/volume_binding_test.go b/test/integration/scheduler/volume_binding_test.go index 80cc3d85a45..7fa38f7b753 100644 --- a/test/integration/scheduler/volume_binding_test.go +++ b/test/integration/scheduler/volume_binding_test.go @@ -36,10 +36,12 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" + utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" "k8s.io/client-go/informers" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/pkg/controller/volume/persistentvolume" persistentvolumeoptions "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/options" + "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" "k8s.io/kubernetes/pkg/volume" volumetest "k8s.io/kubernetes/pkg/volume/testing" @@ -95,11 +97,9 @@ type testPVC struct { } func TestVolumeBinding(t *testing.T) { - features := map[string]bool{ - "VolumeScheduling": true, - "PersistentLocalVolumes": true, - } - config := setupCluster(t, "volume-scheduling-", 2, features, 0, 0, true) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeScheduling, true)() + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PersistentLocalVolumes, true)() + config := setupCluster(t, "volume-scheduling-", 2, 0, 0, true) defer config.teardown() cases := map[string]struct { @@ -268,11 +268,9 @@ func TestVolumeBinding(t *testing.T) { // TestVolumeBindingRescheduling tests scheduler will retry scheduling when needed. func TestVolumeBindingRescheduling(t *testing.T) { - features := map[string]bool{ - "VolumeScheduling": true, - "PersistentLocalVolumes": true, - } - config := setupCluster(t, "volume-scheduling-", 2, features, 0, 0, true) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeScheduling, true)() + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PersistentLocalVolumes, true)() + config := setupCluster(t, "volume-scheduling-", 2, 0, 0, true) defer config.teardown() storageClassName := "local-storage" @@ -389,7 +387,7 @@ func TestVolumeBindingRescheduling(t *testing.T) { } } -// TestVolumeBindingStress creates pods, each with unbound PVCs. +// TestVolumeBindingStress creates pods, each with unbound or prebound PVCs. // PVs are precreated. func TestVolumeBindingStress(t *testing.T) { testVolumeBindingStress(t, 0, false, 0) @@ -414,11 +412,9 @@ func TestVolumeBindingDynamicStressSlow(t *testing.T) { } func testVolumeBindingStress(t *testing.T, schedulerResyncPeriod time.Duration, dynamic bool, provisionDelaySeconds int) { - features := map[string]bool{ - "VolumeScheduling": true, - "PersistentLocalVolumes": true, - } - config := setupCluster(t, "volume-binding-stress-", 1, features, schedulerResyncPeriod, provisionDelaySeconds, true) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeScheduling, true)() + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PersistentLocalVolumes, true)() + config := setupCluster(t, "volume-binding-stress-", 1, schedulerResyncPeriod, provisionDelaySeconds, true) defer config.teardown() // Set max volume limit to the number of PVCs the test will create @@ -441,16 +437,31 @@ func testVolumeBindingStress(t *testing.T, schedulerResyncPeriod time.Duration, pvs := []*v1.PersistentVolume{} pvcs := []*v1.PersistentVolumeClaim{} for i := 0; i < podLimit*volsPerPod; i++ { + var ( + pv *v1.PersistentVolume + pvc *v1.PersistentVolumeClaim + pvName = fmt.Sprintf("pv-stress-%v", i) + pvcName = fmt.Sprintf("pvc-stress-%v", i) + ) // Don't create pvs for dynamic provisioning test if !dynamic { - pv := makePV(fmt.Sprintf("pv-stress-%v", i), *scName, "", "", node1) + if rand.Int()%2 == 0 { + // static unbound pvs + pv = makePV(pvName, *scName, "", "", node1) + } else { + // static prebound pvs + pv = makePV(pvName, classImmediate, pvcName, config.ns, node1) + } if pv, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil { t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err) } pvs = append(pvs, pv) } - - pvc := makePVC(fmt.Sprintf("pvc-stress-%v", i), config.ns, scName, "") + if pv != nil && pv.Spec.ClaimRef != nil && pv.Spec.ClaimRef.Name == pvcName { + pvc = makePVC(pvcName, config.ns, &classImmediate, pv.Name) + } else { + pvc = makePVC(pvcName, config.ns, scName, "") + } if pvc, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(pvc); err != nil { t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err) } @@ -491,12 +502,10 @@ func testVolumeBindingStress(t *testing.T, schedulerResyncPeriod time.Duration, } func testVolumeBindingWithAffinity(t *testing.T, anti bool, numNodes, numPods, numPVsFirstNode int) { - features := map[string]bool{ - "VolumeScheduling": true, - "PersistentLocalVolumes": true, - } + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeScheduling, true)() + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PersistentLocalVolumes, true)() // TODO: disable equivalence cache until kubernetes/kubernetes#67680 is fixed - config := setupCluster(t, "volume-pod-affinity-", numNodes, features, 0, 0, true) + config := setupCluster(t, "volume-pod-affinity-", numNodes, 0, 0, true) defer config.teardown() pods := []*v1.Pod{} @@ -621,11 +630,9 @@ func TestVolumeBindingWithAffinity(t *testing.T) { } func TestPVAffinityConflict(t *testing.T) { - features := map[string]bool{ - "VolumeScheduling": true, - "PersistentLocalVolumes": true, - } - config := setupCluster(t, "volume-scheduling-", 3, features, 0, 0, true) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeScheduling, true)() + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PersistentLocalVolumes, true)() + config := setupCluster(t, "volume-scheduling-", 3, 0, 0, true) defer config.teardown() pv := makePV("local-pv", classImmediate, "", "", node1) @@ -684,11 +691,9 @@ func TestPVAffinityConflict(t *testing.T) { } func TestVolumeProvision(t *testing.T) { - features := map[string]bool{ - "VolumeScheduling": true, - "PersistentLocalVolumes": true, - } - config := setupCluster(t, "volume-scheduling", 1, features, 0, 0, true) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeScheduling, true)() + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PersistentLocalVolumes, true)() + config := setupCluster(t, "volume-scheduling", 1, 0, 0, true) defer config.teardown() cases := map[string]struct { @@ -825,15 +830,8 @@ func TestVolumeProvision(t *testing.T) { // selectedNode annotation from a claim to reschedule volume provision // on provision failure. func TestRescheduleProvisioning(t *testing.T) { - features := map[string]bool{ - "VolumeScheduling": true, - } - oldFeatures := make(map[string]bool, len(features)) - for feature := range features { - oldFeatures[feature] = utilfeature.DefaultFeatureGate.Enabled(utilfeature.Feature(feature)) - } // Set feature gates - utilfeature.DefaultFeatureGate.SetFromMap(features) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeScheduling, true)() controllerCh := make(chan struct{}) context := initTestMaster(t, "reschedule-volume-provision", nil) @@ -846,8 +844,6 @@ func TestRescheduleProvisioning(t *testing.T) { deleteTestObjects(clientset, ns, nil) context.clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{}) context.closeFn() - // Restore feature gates - utilfeature.DefaultFeatureGate.SetFromMap(oldFeatures) }() ctrl, informerFactory, err := initPVController(context, 0) @@ -893,15 +889,8 @@ func TestRescheduleProvisioning(t *testing.T) { } } -func setupCluster(t *testing.T, nsName string, numberOfNodes int, features map[string]bool, resyncPeriod time.Duration, provisionDelaySeconds int, disableEquivalenceCache bool) *testConfig { - oldFeatures := make(map[string]bool, len(features)) - for feature := range features { - oldFeatures[feature] = utilfeature.DefaultFeatureGate.Enabled(utilfeature.Feature(feature)) - } - // Set feature gates - utilfeature.DefaultFeatureGate.SetFromMap(features) - - context := initTestSchedulerWithOptions(t, initTestMaster(t, nsName, nil), false, nil, false, disableEquivalenceCache, resyncPeriod) +func setupCluster(t *testing.T, nsName string, numberOfNodes int, resyncPeriod time.Duration, provisionDelaySeconds int, disableEquivalenceCache bool) *testConfig { + context := initTestSchedulerWithOptions(t, initTestMaster(t, nsName, nil), false, nil, nil, false, disableEquivalenceCache, resyncPeriod) clientset := context.clientSet ns := context.ns.Name @@ -938,8 +927,6 @@ func setupCluster(t *testing.T, nsName string, numberOfNodes int, features map[s teardown: func() { deleteTestObjects(clientset, ns, nil) cleanupTest(t, context) - // Restore feature gates - utilfeature.DefaultFeatureGate.SetFromMap(oldFeatures) }, } } diff --git a/test/integration/scheduler_perf/scheduler_bench_test.go b/test/integration/scheduler_perf/scheduler_bench_test.go index 85af9567234..818eb9a9819 100644 --- a/test/integration/scheduler_perf/scheduler_bench_test.go +++ b/test/integration/scheduler_perf/scheduler_bench_test.go @@ -43,6 +43,7 @@ func BenchmarkScheduling(b *testing.B) { {nodes: 100, existingPods: 1000, minPods: 100}, {nodes: 1000, existingPods: 0, minPods: 100}, {nodes: 1000, existingPods: 1000, minPods: 100}, + {nodes: 5000, existingPods: 1000, minPods: 1000}, } setupStrategy := testutils.NewSimpleWithControllerCreatePodStrategy("rc1") testStrategy := testutils.NewSimpleWithControllerCreatePodStrategy("rc2") @@ -62,6 +63,7 @@ func BenchmarkSchedulingPodAntiAffinity(b *testing.B) { {nodes: 500, existingPods: 250, minPods: 250}, {nodes: 500, existingPods: 5000, minPods: 250}, {nodes: 1000, existingPods: 1000, minPods: 500}, + {nodes: 5000, existingPods: 1000, minPods: 1000}, } // The setup strategy creates pods with no affinity rules. setupStrategy := testutils.NewSimpleWithControllerCreatePodStrategy("setup") @@ -86,6 +88,7 @@ func BenchmarkSchedulingPodAffinity(b *testing.B) { {nodes: 500, existingPods: 250, minPods: 250}, {nodes: 500, existingPods: 5000, minPods: 250}, {nodes: 1000, existingPods: 1000, minPods: 500}, + {nodes: 5000, existingPods: 1000, minPods: 1000}, } // The setup strategy creates pods with no affinity rules. setupStrategy := testutils.NewSimpleWithControllerCreatePodStrategy("setup") @@ -112,6 +115,7 @@ func BenchmarkSchedulingNodeAffinity(b *testing.B) { {nodes: 500, existingPods: 250, minPods: 250}, {nodes: 500, existingPods: 5000, minPods: 250}, {nodes: 1000, existingPods: 1000, minPods: 500}, + {nodes: 5000, existingPods: 1000, minPods: 1000}, } // The setup strategy creates pods with no affinity rules. setupStrategy := testutils.NewSimpleWithControllerCreatePodStrategy("setup") diff --git a/test/kubemark/resources/start-kubemark-master.sh b/test/kubemark/resources/start-kubemark-master.sh index 4f0aae5e258..9b813b19a18 100755 --- a/test/kubemark/resources/start-kubemark-master.sh +++ b/test/kubemark/resources/start-kubemark-master.sh @@ -489,11 +489,11 @@ function compute-etcd-events-params { function compute-kube-apiserver-params { local params="${APISERVER_TEST_ARGS:-}" params+=" --insecure-bind-address=0.0.0.0" + params+=" --etcd-servers=${ETCD_SERVERS:-http://127.0.0.1:2379}" if [[ -z "${ETCD_SERVERS:-}" ]]; then - params+=" --etcd-servers=http://127.0.0.1:2379" - params+=" --etcd-servers-overrides=/events#${EVENT_STORE_URL}" - else - params+=" --etcd-servers=${ETCD_SERVERS}" + params+=" --etcd-servers-overrides=${ETCD_SERVERS_OVERRIDES:-/events#${EVENT_STORE_URL}}" + elif [[ -n "${ETCD_SERVERS_OVERRIDES:-}" ]]; then + params+=" --etcd-servers-overrides=${ETCD_SERVERS_OVERRIDES:-}" fi params+=" --tls-cert-file=/etc/srv/kubernetes/server.cert" params+=" --tls-private-key-file=/etc/srv/kubernetes/server.key" @@ -704,9 +704,11 @@ readonly audit_policy_file="/etc/audit_policy.config" # Start kubelet as a supervisord process and master components as pods. start-kubelet -start-kubemaster-component "etcd" -if [ "${EVENT_STORE_IP:-}" == "127.0.0.1" ]; then - start-kubemaster-component "etcd-events" +if [[ -z "${ETCD_SERVERS:-}" ]]; then + start-kubemaster-component "etcd" + if [ "${EVENT_STORE_IP:-}" == "127.0.0.1" ]; then + start-kubemaster-component "etcd-events" + fi fi start-kubemaster-component "kube-apiserver" start-kubemaster-component "kube-controller-manager" diff --git a/test/kubemark/start-kubemark.sh b/test/kubemark/start-kubemark.sh index 94507ebce61..ddb17833f4c 100755 --- a/test/kubemark/start-kubemark.sh +++ b/test/kubemark/start-kubemark.sh @@ -64,7 +64,7 @@ SERVICE_CLUSTER_IP_RANGE="${SERVICE_CLUSTER_IP_RANGE:-}" EVENT_PD="${EVENT_PD:-}" # Etcd related variables. -ETCD_IMAGE="${ETCD_IMAGE:-3.2.24-1}" +ETCD_IMAGE="${ETCD_IMAGE:-3.3.10-0}" ETCD_VERSION="${ETCD_VERSION:-}" # Controller-manager related variables. @@ -80,6 +80,8 @@ SCHEDULER_TEST_ARGS="${SCHEDULER_TEST_ARGS:-}" APISERVER_TEST_ARGS="${APISERVER_TEST_ARGS:-}" STORAGE_MEDIA_TYPE="${STORAGE_MEDIA_TYPE:-}" STORAGE_BACKEND="${STORAGE_BACKEND:-etcd3}" +ETCD_SERVERS="${ETCD_SERVERS:-}" +ETCD_SERVERS_OVERRIDES="${ETCD_SERVERS_OVERRIDES:-}" ETCD_COMPACTION_INTERVAL_SEC="${ETCD_COMPACTION_INTERVAL_SEC:-}" RUNTIME_CONFIG="${RUNTIME_CONFIG:-}" NUM_NODES="${NUM_NODES:-}" diff --git a/test/test_owners.csv b/test/test_owners.csv index c8684e2ceca..16ba34c28db 100644 --- a/test/test_owners.csv +++ b/test/test_owners.csv @@ -763,7 +763,7 @@ k8s.io/kubernetes/pkg/scheduler/algorithm/priorities,fgrzadkowski,0, k8s.io/kubernetes/pkg/scheduler/algorithmprovider,fgrzadkowski,0, k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults,fgrzadkowski,0, k8s.io/kubernetes/pkg/scheduler/api/validation,fgrzadkowski,0, -k8s.io/kubernetes/pkg/scheduler/cache,fgrzadkowski,0, +k8s.io/kubernetes/pkg/scheduler/nodeinfo,fgrzadkowski,0, k8s.io/kubernetes/pkg/scheduler/core,madhusudancs,1, k8s.io/kubernetes/pkg/scheduler/factory,fgrzadkowski,0, k8s.io/kubernetes/pkg/scheduler/util,wojtek-t,1, diff --git a/vendor/BUILD b/vendor/BUILD index e60e2512ce9..216936e881b 100644 --- a/vendor/BUILD +++ b/vendor/BUILD @@ -389,7 +389,6 @@ filegroup( "//vendor/golang.org/x/crypto/poly1305:all-srcs", "//vendor/golang.org/x/crypto/salsa20/salsa:all-srcs", "//vendor/golang.org/x/crypto/ssh:all-srcs", - "//vendor/golang.org/x/exp/inotify:all-srcs", "//vendor/golang.org/x/net/context:all-srcs", "//vendor/golang.org/x/net/html:all-srcs", "//vendor/golang.org/x/net/http2:all-srcs", diff --git a/vendor/github.com/json-iterator/go/Gopkg.lock b/vendor/github.com/json-iterator/go/Gopkg.lock index 3719afe8e0d..c8a9fbb3871 100644 --- a/vendor/github.com/json-iterator/go/Gopkg.lock +++ b/vendor/github.com/json-iterator/go/Gopkg.lock @@ -1,12 +1,6 @@ # This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. -[[projects]] - name = "github.com/json-iterator/go" - packages = ["."] - revision = "ca39e5af3ece67bbcda3d0f4f56a8e24d9f2dad4" - version = "1.1.3" - [[projects]] name = "github.com/modern-go/concurrent" packages = ["."] @@ -16,12 +10,12 @@ [[projects]] name = "github.com/modern-go/reflect2" packages = ["."] - revision = "1df9eeb2bb81f327b96228865c5687bc2194af3f" - version = "1.0.0" + revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" + version = "1.0.1" [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "56a0b9e9e61d2bc8af5e1b68537401b7f4d60805eda3d107058f3171aa5cf793" + inputs-digest = "ea54a775e5a354cb015502d2e7aa4b74230fc77e894f34a838b268c25ec8eeb8" solver-name = "gps-cdcl" solver-version = 1 diff --git a/vendor/github.com/json-iterator/go/Gopkg.toml b/vendor/github.com/json-iterator/go/Gopkg.toml index 5801ffa1e98..313a0f887b6 100644 --- a/vendor/github.com/json-iterator/go/Gopkg.toml +++ b/vendor/github.com/json-iterator/go/Gopkg.toml @@ -23,4 +23,4 @@ ignored = ["github.com/davecgh/go-spew*","github.com/google/gofuzz*","github.com [[constraint]] name = "github.com/modern-go/reflect2" - version = "1.0.0" + version = "1.0.1" diff --git a/vendor/golang.org/x/exp/AUTHORS b/vendor/golang.org/x/exp/AUTHORS deleted file mode 100644 index 15167cd746c..00000000000 --- a/vendor/golang.org/x/exp/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/exp/CONTRIBUTORS b/vendor/golang.org/x/exp/CONTRIBUTORS deleted file mode 100644 index 1c4577e9680..00000000000 --- a/vendor/golang.org/x/exp/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/exp/LICENSE b/vendor/golang.org/x/exp/LICENSE deleted file mode 100644 index 6a66aea5eaf..00000000000 --- a/vendor/golang.org/x/exp/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/exp/PATENTS b/vendor/golang.org/x/exp/PATENTS deleted file mode 100644 index 733099041f8..00000000000 --- a/vendor/golang.org/x/exp/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/exp/inotify/inotify_linux.go b/vendor/golang.org/x/exp/inotify/inotify_linux.go deleted file mode 100644 index 901f308d84a..00000000000 --- a/vendor/golang.org/x/exp/inotify/inotify_linux.go +++ /dev/null @@ -1,306 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package inotify implements a wrapper for the Linux inotify system. - -Example: - watcher, err := inotify.NewWatcher() - if err != nil { - log.Fatal(err) - } - err = watcher.Watch("/tmp") - if err != nil { - log.Fatal(err) - } - for { - select { - case ev := <-watcher.Event: - log.Println("event:", ev) - case err := <-watcher.Error: - log.Println("error:", err) - } - } - -*/ -package inotify - -import ( - "errors" - "fmt" - "os" - "strings" - "sync" - "syscall" - "unsafe" -) - -type Event struct { - Mask uint32 // Mask of events - Cookie uint32 // Unique cookie associating related events (for rename(2)) - Name string // File name (optional) -} - -type watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) -} - -type Watcher struct { - mu sync.Mutex - fd int // File descriptor (as returned by the inotify_init() syscall) - watches map[string]*watch // Map of inotify watches (key: path) - paths map[int]string // Map of watched paths (key: watch descriptor) - Error chan error // Errors are sent on this channel - Event chan *Event // Events are returned on this channel - done chan bool // Channel for sending a "quit message" to the reader goroutine - isClosed bool // Set to true when Close() is first called -} - -// NewWatcher creates and returns a new inotify instance using inotify_init(2) -func NewWatcher() (*Watcher, error) { - fd, errno := syscall.InotifyInit() - if fd == -1 { - return nil, os.NewSyscallError("inotify_init", errno) - } - w := &Watcher{ - fd: fd, - watches: make(map[string]*watch), - paths: make(map[int]string), - Event: make(chan *Event), - Error: make(chan error), - done: make(chan bool, 1), - } - - go w.readEvents() - return w, nil -} - -// Close closes an inotify watcher instance -// It sends a message to the reader goroutine to quit and removes all watches -// associated with the inotify instance -func (w *Watcher) Close() error { - if w.isClosed { - return nil - } - w.isClosed = true - - // Send "quit" message to the reader goroutine - w.done <- true - for path := range w.watches { - w.RemoveWatch(path) - } - - return nil -} - -// AddWatch adds path to the watched file set. -// The flags are interpreted as described in inotify_add_watch(2). -func (w *Watcher) AddWatch(path string, flags uint32) error { - if w.isClosed { - return errors.New("inotify instance already closed") - } - - watchEntry, found := w.watches[path] - if found { - watchEntry.flags |= flags - flags |= syscall.IN_MASK_ADD - } - - w.mu.Lock() // synchronize with readEvents goroutine - - wd, err := syscall.InotifyAddWatch(w.fd, path, flags) - if err != nil { - w.mu.Unlock() - return &os.PathError{ - Op: "inotify_add_watch", - Path: path, - Err: err, - } - } - - if !found { - w.watches[path] = &watch{wd: uint32(wd), flags: flags} - w.paths[wd] = path - } - w.mu.Unlock() - return nil -} - -// Watch adds path to the watched file set, watching all events. -func (w *Watcher) Watch(path string) error { - return w.AddWatch(path, IN_ALL_EVENTS) -} - -// RemoveWatch removes path from the watched file set. -func (w *Watcher) RemoveWatch(path string) error { - watch, ok := w.watches[path] - if !ok { - return errors.New(fmt.Sprintf("can't remove non-existent inotify watch for: %s", path)) - } - success, errno := syscall.InotifyRmWatch(w.fd, watch.wd) - if success == -1 { - return os.NewSyscallError("inotify_rm_watch", errno) - } - delete(w.watches, path) - // Locking here to protect the read from paths in readEvents. - w.mu.Lock() - delete(w.paths, int(watch.wd)) - w.mu.Unlock() - return nil -} - -// readEvents reads from the inotify file descriptor, converts the -// received events into Event objects and sends them via the Event channel -func (w *Watcher) readEvents() { - var buf [syscall.SizeofInotifyEvent * 4096]byte - - for { - n, err := syscall.Read(w.fd, buf[:]) - // See if there is a message on the "done" channel - var done bool - select { - case done = <-w.done: - default: - } - - // If EOF or a "done" message is received - if n == 0 || done { - // The syscall.Close can be slow. Close - // w.Event first. - close(w.Event) - err := syscall.Close(w.fd) - if err != nil { - w.Error <- os.NewSyscallError("close", err) - } - close(w.Error) - return - } - if n < 0 { - w.Error <- os.NewSyscallError("read", err) - continue - } - if n < syscall.SizeofInotifyEvent { - w.Error <- errors.New("inotify: short read in readEvents()") - continue - } - - var offset uint32 = 0 - // We don't know how many events we just read into the buffer - // While the offset points to at least one whole event... - for offset <= uint32(n-syscall.SizeofInotifyEvent) { - // Point "raw" to the event in the buffer - raw := (*syscall.InotifyEvent)(unsafe.Pointer(&buf[offset])) - event := new(Event) - event.Mask = uint32(raw.Mask) - event.Cookie = uint32(raw.Cookie) - nameLen := uint32(raw.Len) - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. - w.mu.Lock() - name, ok := w.paths[int(raw.Wd)] - w.mu.Unlock() - if ok { - event.Name = name - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[syscall.PathMax]byte)(unsafe.Pointer(&buf[offset+syscall.SizeofInotifyEvent])) - // The filename is padded with NUL bytes. TrimRight() gets rid of those. - event.Name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") - } - // Send the event on the events channel - w.Event <- event - } - // Move to the next event in the buffer - offset += syscall.SizeofInotifyEvent + nameLen - } - } -} - -// String formats the event e in the form -// "filename: 0xEventMask = IN_ACCESS|IN_ATTRIB_|..." -func (e *Event) String() string { - var events string = "" - - m := e.Mask - for _, b := range eventBits { - if m&b.Value == b.Value { - m &^= b.Value - events += "|" + b.Name - } - } - - if m != 0 { - events += fmt.Sprintf("|%#x", m) - } - if len(events) > 0 { - events = " == " + events[1:] - } - - return fmt.Sprintf("%q: %#x%s", e.Name, e.Mask, events) -} - -const ( - // Options for inotify_init() are not exported - // IN_CLOEXEC uint32 = syscall.IN_CLOEXEC - // IN_NONBLOCK uint32 = syscall.IN_NONBLOCK - - // Options for AddWatch - IN_DONT_FOLLOW uint32 = syscall.IN_DONT_FOLLOW - IN_ONESHOT uint32 = syscall.IN_ONESHOT - IN_ONLYDIR uint32 = syscall.IN_ONLYDIR - - // The "IN_MASK_ADD" option is not exported, as AddWatch - // adds it automatically, if there is already a watch for the given path - // IN_MASK_ADD uint32 = syscall.IN_MASK_ADD - - // Events - IN_ACCESS uint32 = syscall.IN_ACCESS - IN_ALL_EVENTS uint32 = syscall.IN_ALL_EVENTS - IN_ATTRIB uint32 = syscall.IN_ATTRIB - IN_CLOSE uint32 = syscall.IN_CLOSE - IN_CLOSE_NOWRITE uint32 = syscall.IN_CLOSE_NOWRITE - IN_CLOSE_WRITE uint32 = syscall.IN_CLOSE_WRITE - IN_CREATE uint32 = syscall.IN_CREATE - IN_DELETE uint32 = syscall.IN_DELETE - IN_DELETE_SELF uint32 = syscall.IN_DELETE_SELF - IN_MODIFY uint32 = syscall.IN_MODIFY - IN_MOVE uint32 = syscall.IN_MOVE - IN_MOVED_FROM uint32 = syscall.IN_MOVED_FROM - IN_MOVED_TO uint32 = syscall.IN_MOVED_TO - IN_MOVE_SELF uint32 = syscall.IN_MOVE_SELF - IN_OPEN uint32 = syscall.IN_OPEN - - // Special events - IN_ISDIR uint32 = syscall.IN_ISDIR - IN_IGNORED uint32 = syscall.IN_IGNORED - IN_Q_OVERFLOW uint32 = syscall.IN_Q_OVERFLOW - IN_UNMOUNT uint32 = syscall.IN_UNMOUNT -) - -var eventBits = []struct { - Value uint32 - Name string -}{ - {IN_ACCESS, "IN_ACCESS"}, - {IN_ATTRIB, "IN_ATTRIB"}, - {IN_CLOSE, "IN_CLOSE"}, - {IN_CLOSE_NOWRITE, "IN_CLOSE_NOWRITE"}, - {IN_CLOSE_WRITE, "IN_CLOSE_WRITE"}, - {IN_CREATE, "IN_CREATE"}, - {IN_DELETE, "IN_DELETE"}, - {IN_DELETE_SELF, "IN_DELETE_SELF"}, - {IN_MODIFY, "IN_MODIFY"}, - {IN_MOVE, "IN_MOVE"}, - {IN_MOVED_FROM, "IN_MOVED_FROM"}, - {IN_MOVED_TO, "IN_MOVED_TO"}, - {IN_MOVE_SELF, "IN_MOVE_SELF"}, - {IN_OPEN, "IN_OPEN"}, - {IN_ISDIR, "IN_ISDIR"}, - {IN_IGNORED, "IN_IGNORED"}, - {IN_Q_OVERFLOW, "IN_Q_OVERFLOW"}, - {IN_UNMOUNT, "IN_UNMOUNT"}, -} diff --git a/vendor/k8s.io/utils/clock/clock.go b/vendor/k8s.io/utils/clock/clock.go index 3d53c62b1a5..789c0238c8a 100644 --- a/vendor/k8s.io/utils/clock/clock.go +++ b/vendor/k8s.io/utils/clock/clock.go @@ -44,21 +44,24 @@ func (RealClock) Since(ts time.Time) time.Duration { return time.Since(ts) } -// Same as time.After(d). +// After is the same as time.After(d). func (RealClock) After(d time.Duration) <-chan time.Time { return time.After(d) } +// NewTimer is the same as time.NewTimer(d) func (RealClock) NewTimer(d time.Duration) Timer { return &realTimer{ timer: time.NewTimer(d), } } +// Tick is the same as time.Tick(d) func (RealClock) Tick(d time.Duration) <-chan time.Time { return time.Tick(d) } +// Sleep is the same as time.Sleep(d) func (RealClock) Sleep(d time.Duration) { time.Sleep(d) } diff --git a/vendor/k8s.io/utils/exec/exec.go b/vendor/k8s.io/utils/exec/exec.go index 07735d88144..96bec01ca8b 100644 --- a/vendor/k8s.io/utils/exec/exec.go +++ b/vendor/k8s.io/utils/exec/exec.go @@ -60,6 +60,17 @@ type Cmd interface { SetStdin(in io.Reader) SetStdout(out io.Writer) SetStderr(out io.Writer) + SetEnv(env []string) + + // StdoutPipe and StderrPipe for getting the process' Stdout and Stderr as + // Readers + StdoutPipe() (io.ReadCloser, error) + StderrPipe() (io.ReadCloser, error) + + // Start and Wait are for running a process non-blocking + Start() error + Wait() error + // Stops the command by sending SIGTERM. It is not guaranteed the // process will stop before this function returns. If the process is not // responding, an internal timer function will send a SIGKILL to force @@ -121,6 +132,30 @@ func (cmd *cmdWrapper) SetStderr(out io.Writer) { cmd.Stderr = out } +func (cmd *cmdWrapper) SetEnv(env []string) { + cmd.Env = env +} + +func (cmd *cmdWrapper) StdoutPipe() (io.ReadCloser, error) { + r, err := (*osexec.Cmd)(cmd).StdoutPipe() + return r, handleError(err) +} + +func (cmd *cmdWrapper) StderrPipe() (io.ReadCloser, error) { + r, err := (*osexec.Cmd)(cmd).StderrPipe() + return r, handleError(err) +} + +func (cmd *cmdWrapper) Start() error { + err := (*osexec.Cmd)(cmd).Start() + return handleError(err) +} + +func (cmd *cmdWrapper) Wait() error { + err := (*osexec.Cmd)(cmd).Wait() + return handleError(err) +} + // Run is part of the Cmd interface. func (cmd *cmdWrapper) Run() error { err := (*osexec.Cmd)(cmd).Run() @@ -206,10 +241,12 @@ func (e CodeExitError) String() string { return e.Err.Error() } +// Exited is to check if the process has finished func (e CodeExitError) Exited() bool { return true } +// ExitStatus is for checking the error code func (e CodeExitError) ExitStatus() int { return e.Code } diff --git a/vendor/k8s.io/utils/exec/testing/fake_exec.go b/vendor/k8s.io/utils/exec/testing/fake_exec.go index 32cbae2523a..66b5de8b31b 100644 --- a/vendor/k8s.io/utils/exec/testing/fake_exec.go +++ b/vendor/k8s.io/utils/exec/testing/fake_exec.go @@ -24,7 +24,7 @@ import ( "k8s.io/utils/exec" ) -// A simple scripted Interface type. +// FakeExec is a simple scripted Interface type. type FakeExec struct { CommandScript []FakeCommandAction CommandCalls int @@ -33,8 +33,10 @@ type FakeExec struct { var _ exec.Interface = &FakeExec{} +// FakeCommandAction is the function to be executed type FakeCommandAction func(cmd string, args ...string) exec.Cmd +// Command is to track the commands that are executed func (fake *FakeExec) Command(cmd string, args ...string) exec.Cmd { if fake.CommandCalls > len(fake.CommandScript)-1 { panic(fmt.Sprintf("ran out of Command() actions. Could not handle command [%d]: %s args: %v", fake.CommandCalls, cmd, args)) @@ -44,15 +46,17 @@ func (fake *FakeExec) Command(cmd string, args ...string) exec.Cmd { return fake.CommandScript[i](cmd, args...) } +// CommandContext wraps arguments into exec.Cmd func (fake *FakeExec) CommandContext(ctx context.Context, cmd string, args ...string) exec.Cmd { return fake.Command(cmd, args...) } +// LookPath is for finding the path of a file func (fake *FakeExec) LookPath(file string) (string, error) { return fake.LookPathFunc(file) } -// A simple scripted Cmd type. +// FakeCmd is a simple scripted Cmd type. type FakeCmd struct { Argv []string CombinedOutputScript []FakeCombinedOutputAction @@ -65,34 +69,84 @@ type FakeCmd struct { Stdin io.Reader Stdout io.Writer Stderr io.Writer + Env []string + StdoutPipeResponse FakeStdIOPipeResponse + StderrPipeResponse FakeStdIOPipeResponse + WaitResponse error + StartResponse error } var _ exec.Cmd = &FakeCmd{} +// InitFakeCmd is for creating a fake exec.Cmd func InitFakeCmd(fake *FakeCmd, cmd string, args ...string) exec.Cmd { fake.Argv = append([]string{cmd}, args...) return fake } +// FakeStdIOPipeResponse holds responses to use as fakes for the StdoutPipe and +// StderrPipe method calls +type FakeStdIOPipeResponse struct { + ReadCloser io.ReadCloser + Error error +} + +// FakeCombinedOutputAction is a function type type FakeCombinedOutputAction func() ([]byte, error) + +// FakeRunAction is a function type type FakeRunAction func() ([]byte, []byte, error) +// SetDir sets the directory func (fake *FakeCmd) SetDir(dir string) { fake.Dirs = append(fake.Dirs, dir) } +// SetStdin sets the stdin func (fake *FakeCmd) SetStdin(in io.Reader) { fake.Stdin = in } +// SetStdout sets the stdout func (fake *FakeCmd) SetStdout(out io.Writer) { fake.Stdout = out } +// SetStderr sets the stderr func (fake *FakeCmd) SetStderr(out io.Writer) { fake.Stderr = out } +// SetEnv sets the environment variables +func (fake *FakeCmd) SetEnv(env []string) { + fake.Env = env +} + +// StdoutPipe returns an injected ReadCloser & error (via StdoutPipeResponse) +// to be able to inject an output stream on Stdout +func (fake *FakeCmd) StdoutPipe() (io.ReadCloser, error) { + return fake.StdoutPipeResponse.ReadCloser, fake.StdoutPipeResponse.Error +} + +// StderrPipe returns an injected ReadCloser & error (via StderrPipeResponse) +// to be able to inject an output stream on Stderr +func (fake *FakeCmd) StderrPipe() (io.ReadCloser, error) { + return fake.StderrPipeResponse.ReadCloser, fake.StderrPipeResponse.Error +} + +// Start mimicks starting the process (in the background) and returns the +// injected StartResponse +func (fake *FakeCmd) Start() error { + return fake.StartResponse +} + +// Wait mimicks waiting for the process to exit returns the +// injected WaitResponse +func (fake *FakeCmd) Wait() error { + return fake.WaitResponse +} + +// Run sets runs the command func (fake *FakeCmd) Run() error { if fake.RunCalls > len(fake.RunScript)-1 { panic("ran out of Run() actions") @@ -113,6 +167,7 @@ func (fake *FakeCmd) Run() error { return err } +// CombinedOutput returns the output from the command func (fake *FakeCmd) CombinedOutput() ([]byte, error) { if fake.CombinedOutputCalls > len(fake.CombinedOutputScript)-1 { panic("ran out of CombinedOutput() actions") @@ -126,15 +181,17 @@ func (fake *FakeCmd) CombinedOutput() ([]byte, error) { return fake.CombinedOutputScript[i]() } +// Output is the response from the command func (fake *FakeCmd) Output() ([]byte, error) { return nil, fmt.Errorf("unimplemented") } +// Stop is to stop the process func (fake *FakeCmd) Stop() { // no-op } -// A simple fake ExitError type. +// FakeExitError is a simple fake ExitError type. type FakeExitError struct { Status int } @@ -149,10 +206,12 @@ func (fake FakeExitError) Error() string { return fake.String() } +// Exited always returns true func (fake FakeExitError) Exited() bool { return true } +// ExitStatus returns the fake status func (fake FakeExitError) ExitStatus() int { return fake.Status } diff --git a/vendor/k8s.io/utils/pointer/OWNERS b/vendor/k8s.io/utils/pointer/OWNERS new file mode 100644 index 00000000000..2f328f4c905 --- /dev/null +++ b/vendor/k8s.io/utils/pointer/OWNERS @@ -0,0 +1,9 @@ +# See the OWNERS docs at https://go.k8s.io/owners +approvers: +- apelisse +- stewart-yu +- thockin +reviewers: +- apelisse +- stewart-yu +- thockin diff --git a/vendor/k8s.io/utils/pointer/pointer.go b/vendor/k8s.io/utils/pointer/pointer.go index 5fbfc6e00fc..a11a540f464 100644 --- a/vendor/k8s.io/utils/pointer/pointer.go +++ b/vendor/k8s.io/utils/pointer/pointer.go @@ -74,3 +74,13 @@ func BoolPtr(b bool) *bool { func StringPtr(s string) *string { return &s } + +// Float32Ptr returns a pointer to the passed float32. +func Float32Ptr(i float32) *float32 { + return &i +} + +// Float64Ptr returns a pointer to the passed float64. +func Float64Ptr(i float64) *float64 { + return &i +}