Merge branch 'master' of https://github.com/kubernetes/kubernetes into fix-golint-test/e2e/storage/utils

This commit is contained in:
Nils Domrose 2019-05-28 22:18:42 +02:00
commit cc93ec05e8
270 changed files with 7855 additions and 3329 deletions

View File

@ -1,66 +1,73 @@
<!-- BEGIN MUNGE: GENERATED_TOC --> <!-- BEGIN MUNGE: GENERATED_TOC -->
- [v1.12.8](#v1128) - [v1.12.9](#v1129)
- [Downloads for v1.12.8](#downloads-for-v1128) - [Downloads for v1.12.9](#downloads-for-v1129)
- [Client Binaries](#client-binaries) - [Client Binaries](#client-binaries)
- [Server Binaries](#server-binaries) - [Server Binaries](#server-binaries)
- [Node Binaries](#node-binaries) - [Node Binaries](#node-binaries)
- [Changelog since v1.12.7](#changelog-since-v1127) - [Changelog since v1.12.8](#changelog-since-v1128)
- [Other notable changes](#other-notable-changes) - [Other notable changes](#other-notable-changes)
- [v1.12.7](#v1127) - [v1.12.8](#v1128)
- [Downloads for v1.12.7](#downloads-for-v1127) - [Downloads for v1.12.8](#downloads-for-v1128)
- [Client Binaries](#client-binaries-1) - [Client Binaries](#client-binaries-1)
- [Server Binaries](#server-binaries-1) - [Server Binaries](#server-binaries-1)
- [Node Binaries](#node-binaries-1) - [Node Binaries](#node-binaries-1)
- [Changelog since v1.12.6](#changelog-since-v1126) - [Changelog since v1.12.7](#changelog-since-v1127)
- [Other notable changes](#other-notable-changes-1) - [Other notable changes](#other-notable-changes-1)
- [v1.12.6](#v1126) - [v1.12.7](#v1127)
- [Downloads for v1.12.6](#downloads-for-v1126) - [Downloads for v1.12.7](#downloads-for-v1127)
- [Client Binaries](#client-binaries-2) - [Client Binaries](#client-binaries-2)
- [Server Binaries](#server-binaries-2) - [Server Binaries](#server-binaries-2)
- [Node Binaries](#node-binaries-2) - [Node Binaries](#node-binaries-2)
- [Changelog since v1.12.5](#changelog-since-v1125) - [Changelog since v1.12.6](#changelog-since-v1126)
- [Other notable changes](#other-notable-changes-2) - [Other notable changes](#other-notable-changes-2)
- [v1.12.5](#v1125) - [v1.12.6](#v1126)
- [Downloads for v1.12.5](#downloads-for-v1125) - [Downloads for v1.12.6](#downloads-for-v1126)
- [Client Binaries](#client-binaries-3) - [Client Binaries](#client-binaries-3)
- [Server Binaries](#server-binaries-3) - [Server Binaries](#server-binaries-3)
- [Node Binaries](#node-binaries-3) - [Node Binaries](#node-binaries-3)
- [Changelog since v1.12.4](#changelog-since-v1124) - [Changelog since v1.12.5](#changelog-since-v1125)
- [Other notable changes](#other-notable-changes-3) - [Other notable changes](#other-notable-changes-3)
- [v1.12.4](#v1124) - [v1.12.5](#v1125)
- [Downloads for v1.12.4](#downloads-for-v1124) - [Downloads for v1.12.5](#downloads-for-v1125)
- [Client Binaries](#client-binaries-4) - [Client Binaries](#client-binaries-4)
- [Server Binaries](#server-binaries-4) - [Server Binaries](#server-binaries-4)
- [Node Binaries](#node-binaries-4) - [Node Binaries](#node-binaries-4)
- [Changelog since v1.12.3](#changelog-since-v1123) - [Changelog since v1.12.4](#changelog-since-v1124)
- [Action Required](#action-required)
- [Other notable changes](#other-notable-changes-4) - [Other notable changes](#other-notable-changes-4)
- [v1.12.3](#v1123) - [v1.12.4](#v1124)
- [Downloads for v1.12.3](#downloads-for-v1123) - [Downloads for v1.12.4](#downloads-for-v1124)
- [Client Binaries](#client-binaries-5) - [Client Binaries](#client-binaries-5)
- [Server Binaries](#server-binaries-5) - [Server Binaries](#server-binaries-5)
- [Node Binaries](#node-binaries-5) - [Node Binaries](#node-binaries-5)
- [Changelog since v1.12.2](#changelog-since-v1122) - [Changelog since v1.12.3](#changelog-since-v1123)
- [Action Required](#action-required)
- [Other notable changes](#other-notable-changes-5) - [Other notable changes](#other-notable-changes-5)
- [v1.12.2](#v1122) - [v1.12.3](#v1123)
- [Downloads for v1.12.2](#downloads-for-v1122) - [Downloads for v1.12.3](#downloads-for-v1123)
- [Client Binaries](#client-binaries-6) - [Client Binaries](#client-binaries-6)
- [Server Binaries](#server-binaries-6) - [Server Binaries](#server-binaries-6)
- [Node Binaries](#node-binaries-6) - [Node Binaries](#node-binaries-6)
- [Changelog since v1.12.1](#changelog-since-v1121) - [Changelog since v1.12.2](#changelog-since-v1122)
- [Other notable changes](#other-notable-changes-6) - [Other notable changes](#other-notable-changes-6)
- [v1.12.1](#v1121) - [v1.12.2](#v1122)
- [Downloads for v1.12.1](#downloads-for-v1121) - [Downloads for v1.12.2](#downloads-for-v1122)
- [Client Binaries](#client-binaries-7) - [Client Binaries](#client-binaries-7)
- [Server Binaries](#server-binaries-7) - [Server Binaries](#server-binaries-7)
- [Node Binaries](#node-binaries-7) - [Node Binaries](#node-binaries-7)
- [Changelog since v1.12.0](#changelog-since-v1120) - [Changelog since v1.12.1](#changelog-since-v1121)
- [Other notable changes](#other-notable-changes-7) - [Other notable changes](#other-notable-changes-7)
- [v1.12.0](#v1120) - [v1.12.1](#v1121)
- [Downloads for v1.12.0](#downloads-for-v1120) - [Downloads for v1.12.1](#downloads-for-v1121)
- [Client Binaries](#client-binaries-8) - [Client Binaries](#client-binaries-8)
- [Server Binaries](#server-binaries-8) - [Server Binaries](#server-binaries-8)
- [Node Binaries](#node-binaries-8) - [Node Binaries](#node-binaries-8)
- [Changelog since v1.12.0](#changelog-since-v1120)
- [Other notable changes](#other-notable-changes-8)
- [v1.12.0](#v1120)
- [Downloads for v1.12.0](#downloads-for-v1120)
- [Client Binaries](#client-binaries-9)
- [Server Binaries](#server-binaries-9)
- [Node Binaries](#node-binaries-9)
- [Known Issues](#known-issues) - [Known Issues](#known-issues)
- [Major Themes](#major-themes) - [Major Themes](#major-themes)
- [SIG API Machinery](#sig-api-machinery) - [SIG API Machinery](#sig-api-machinery)
@ -82,7 +89,7 @@
- [Deprecations and removals](#deprecations-and-removals) - [Deprecations and removals](#deprecations-and-removals)
- [New Features](#new-features) - [New Features](#new-features)
- [API Changes](#api-changes) - [API Changes](#api-changes)
- [Other Notable Changes](#other-notable-changes-8) - [Other Notable Changes](#other-notable-changes-9)
- [SIG API Machinery](#sig-api-machinery-1) - [SIG API Machinery](#sig-api-machinery-1)
- [SIG Apps](#sig-apps) - [SIG Apps](#sig-apps)
- [SIG Auth](#sig-auth) - [SIG Auth](#sig-auth)
@ -101,54 +108,126 @@
- [SIG Storage](#sig-storage-1) - [SIG Storage](#sig-storage-1)
- [SIG VMWare](#sig-vmware-1) - [SIG VMWare](#sig-vmware-1)
- [SIG Windows](#sig-windows-1) - [SIG Windows](#sig-windows-1)
- [Other Notable Changes](#other-notable-changes-9) - [Other Notable Changes](#other-notable-changes-10)
- [Bug Fixes](#bug-fixes) - [Bug Fixes](#bug-fixes)
- [Not Very Notable (that is, non-user-facing)](#not-very-notable-that-is-non-user-facing) - [Not Very Notable (that is, non-user-facing)](#not-very-notable-that-is-non-user-facing)
- [External Dependencies](#external-dependencies) - [External Dependencies](#external-dependencies)
- [v1.12.0-rc.2](#v1120-rc2) - [v1.12.0-rc.2](#v1120-rc2)
- [Downloads for v1.12.0-rc.2](#downloads-for-v1120-rc2) - [Downloads for v1.12.0-rc.2](#downloads-for-v1120-rc2)
- [Client Binaries](#client-binaries-9)
- [Server Binaries](#server-binaries-9)
- [Node Binaries](#node-binaries-9)
- [Changelog since v1.12.0-rc.1](#changelog-since-v1120-rc1)
- [Other notable changes](#other-notable-changes-10)
- [v1.12.0-rc.1](#v1120-rc1)
- [Downloads for v1.12.0-rc.1](#downloads-for-v1120-rc1)
- [Client Binaries](#client-binaries-10) - [Client Binaries](#client-binaries-10)
- [Server Binaries](#server-binaries-10) - [Server Binaries](#server-binaries-10)
- [Node Binaries](#node-binaries-10) - [Node Binaries](#node-binaries-10)
- [Changelog since v1.12.0-beta.2](#changelog-since-v1120-beta2) - [Changelog since v1.12.0-rc.1](#changelog-since-v1120-rc1)
- [Action Required](#action-required-2)
- [Other notable changes](#other-notable-changes-11) - [Other notable changes](#other-notable-changes-11)
- [v1.12.0-beta.2](#v1120-beta2) - [v1.12.0-rc.1](#v1120-rc1)
- [Downloads for v1.12.0-beta.2](#downloads-for-v1120-beta2) - [Downloads for v1.12.0-rc.1](#downloads-for-v1120-rc1)
- [Client Binaries](#client-binaries-11) - [Client Binaries](#client-binaries-11)
- [Server Binaries](#server-binaries-11) - [Server Binaries](#server-binaries-11)
- [Node Binaries](#node-binaries-11) - [Node Binaries](#node-binaries-11)
- [Changelog since v1.12.0-beta.1](#changelog-since-v1120-beta1) - [Changelog since v1.12.0-beta.2](#changelog-since-v1120-beta2)
- [Action Required](#action-required-3) - [Action Required](#action-required-2)
- [Other notable changes](#other-notable-changes-12) - [Other notable changes](#other-notable-changes-12)
- [v1.12.0-beta.1](#v1120-beta1) - [v1.12.0-beta.2](#v1120-beta2)
- [Downloads for v1.12.0-beta.1](#downloads-for-v1120-beta1) - [Downloads for v1.12.0-beta.2](#downloads-for-v1120-beta2)
- [Client Binaries](#client-binaries-12) - [Client Binaries](#client-binaries-12)
- [Server Binaries](#server-binaries-12) - [Server Binaries](#server-binaries-12)
- [Node Binaries](#node-binaries-12) - [Node Binaries](#node-binaries-12)
- [Changelog since v1.12.0-alpha.1](#changelog-since-v1120-alpha1) - [Changelog since v1.12.0-beta.1](#changelog-since-v1120-beta1)
- [Action Required](#action-required-4) - [Action Required](#action-required-3)
- [Other notable changes](#other-notable-changes-13) - [Other notable changes](#other-notable-changes-13)
- [v1.12.0-alpha.1](#v1120-alpha1) - [v1.12.0-beta.1](#v1120-beta1)
- [Downloads for v1.12.0-alpha.1](#downloads-for-v1120-alpha1) - [Downloads for v1.12.0-beta.1](#downloads-for-v1120-beta1)
- [Client Binaries](#client-binaries-13) - [Client Binaries](#client-binaries-13)
- [Server Binaries](#server-binaries-13) - [Server Binaries](#server-binaries-13)
- [Node Binaries](#node-binaries-13) - [Node Binaries](#node-binaries-13)
- [Changelog since v1.12.0-alpha.1](#changelog-since-v1120-alpha1)
- [Action Required](#action-required-4)
- [Other notable changes](#other-notable-changes-14)
- [v1.12.0-alpha.1](#v1120-alpha1)
- [Downloads for v1.12.0-alpha.1](#downloads-for-v1120-alpha1)
- [Client Binaries](#client-binaries-14)
- [Server Binaries](#server-binaries-14)
- [Node Binaries](#node-binaries-14)
- [Changelog since v1.11.0](#changelog-since-v1110) - [Changelog since v1.11.0](#changelog-since-v1110)
- [Action Required](#action-required-5) - [Action Required](#action-required-5)
- [Other notable changes](#other-notable-changes-14) - [Other notable changes](#other-notable-changes-15)
<!-- END MUNGE: GENERATED_TOC --> <!-- END MUNGE: GENERATED_TOC -->
<!-- NEW RELEASE NOTES ENTRY --> <!-- NEW RELEASE NOTES ENTRY -->
# v1.12.9
[Documentation](https://docs.k8s.io)
## Downloads for v1.12.9
filename | sha512 hash
-------- | -----------
[kubernetes.tar.gz](https://dl.k8s.io/v1.12.9/kubernetes.tar.gz) | `27482e1704256927b2c494e933e5e481280350eddbf4858ab8bbf980784630c295e9b8a882e363e2e619439c3636a849b95a010eb55dc98f73b19e37d3d4ea2e`
[kubernetes-src.tar.gz](https://dl.k8s.io/v1.12.9/kubernetes-src.tar.gz) | `654ea2da90e61f9c5f962f2131af2d46452d2f7f629c87edcacdd3b197c0e2ea83fed341cebcfffe4c47df42ce70b6709254517215960fbde18443c43692d4fe`
### Client Binaries
filename | sha512 hash
-------- | -----------
[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.12.9/kubernetes-client-darwin-386.tar.gz) | `809a9e225234cb37748c4f33d8028ca8ac52e12f4459ee710e4969230adf90340b7c245b835b8108e16333b0e159aa108dae1a4a9f80fb9d1c3c6220ddc59b97`
[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.12.9/kubernetes-client-darwin-amd64.tar.gz) | `660b1ee830c75d0a8b2be611cea0c1fdbd922895f4bfc714d66345c214b63e72528c873b337c03102d7a216df01de98cfd9c74f176172e165c120b7199e22687`
[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.12.9/kubernetes-client-linux-386.tar.gz) | `c663732322edb13f3958766c9e50197d654abe89ce8ca943f35948bd083f89b38b5a59561fac583b826e445b399d7ad2280eb2ee55824b5e5674816d80f799f5`
[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.12.9/kubernetes-client-linux-amd64.tar.gz) | `e1c4945644e2434d0938a4a565672143156ceb517618a695f84e92a8bc04f514491323c54e4e53762ad8c6199578a6600e976b2010f27e2feb006227da3d1235`
[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.12.9/kubernetes-client-linux-arm.tar.gz) | `fe9d040544b0880475834d57a5bc4eb2865d391650076ab86d0c73c8d76348c357a083f9eff60e100bf9e40a63a5dd489c17a17e85efda12f6b23991821a73bd`
[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.12.9/kubernetes-client-linux-arm64.tar.gz) | `26d8b699a663f3bd9ffc43b32861a4903f714e2a147c75d2407c0c479bea97ba5fdeeb7570f1554df5be939f5f113d3d2f0c1ca5b3f4a5a7e551cc1ecc9b22ad`
[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.12.9/kubernetes-client-linux-ppc64le.tar.gz) | `c4b0f62c3b6418c2efc85c57355461342c97c8032a61d3aa5952cb63e62dd7032546c1a936399e89531bf3458377bd20810439b49fe77a1642bbf0023d29113e`
[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.12.9/kubernetes-client-linux-s390x.tar.gz) | `c3ce8e29c6c144e203c15c04414f907a68d89089a8e7f451f80cc2507665abcbfd8ecdedccec466644036ca5612aea5b4113a62f558bcb827c64a928d736fb27`
[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.12.9/kubernetes-client-windows-386.tar.gz) | `f62e08eba18a94867a595ed5c4256b250cc8fe3a87de9dd1ca8c455704070d17f47a80a11a802fdf94ab61bc7df1107476d13f785dc21beb00db83969ac201b7`
[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.12.9/kubernetes-client-windows-amd64.tar.gz) | `0b655afcf05e54c6eb78a3a58f5713a09672a11915129d769e4a1c0d8c6b5ae6301f58efb7a65b115c04d74b578a7cc81b16ba91f37635493cf0435495d71835`
### Server Binaries
filename | sha512 hash
-------- | -----------
[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.12.9/kubernetes-server-linux-amd64.tar.gz) | `fb5a0f5cd8c06fd8178affe081118db4f11e618c40be251f4348ea241fdde35bec3fdabeb1ac0e99056f64cd13d99715edfbd44ff197072185793cbe23badbdc`
[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.12.9/kubernetes-server-linux-arm.tar.gz) | `5c998f415ea8e1b96385d557aca46691f041e98bafa5ad8a4e110f60155b05106dcf313256c7622819145be44edf897a789fa7304ccb4e1df634071d37de3ad1`
[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.12.9/kubernetes-server-linux-arm64.tar.gz) | `9501823b79673b129a7abb5f07700259faee10849da710bf12979468b44e6fda2e93e7f2e77db913edcdad4477580d398cbe3b24eca1cc0f0731c407357b8e4c`
[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.12.9/kubernetes-server-linux-ppc64le.tar.gz) | `c4d814fc498923f257c0a96e48d0adbea2487308f269c0e70118de89df4b557b523d507d48bb2432f74235b4461f3a50fa26d5bbee1e020436d5c6591ae88958`
[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.12.9/kubernetes-server-linux-s390x.tar.gz) | `dfb06bb352db236ea6763d0237bb1cf6a26c2d04c54b85de19dd6bc096ce93259038123049a32d0efc982e559628ed27cb41c56d2bf7f46b13a8687e4d478fb0`
### Node Binaries
filename | sha512 hash
-------- | -----------
[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.12.9/kubernetes-node-linux-amd64.tar.gz) | `32017a8f8d47bf4f8ac9507f278bbc814d70a804d1d9b27ffd4ae4c2b9646b34513b7ea972839dabc8d83e0be67f32b0baa4b77a4f2eaefa56ba5f6d917fe1a0`
[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.12.9/kubernetes-node-linux-arm.tar.gz) | `cb2b3563806dc488a2e8b9d54d545b3f4cbad746d38e3d847d5ab5a1b59af10183dc72835fc22557416d2c2722afa4b37083c1000edc1cd522ce144580c2746e`
[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.12.9/kubernetes-node-linux-arm64.tar.gz) | `e1bd333c3bd8bad52af1c696eb28ffcc058ba5fd6c554244627de8d9231b69466e33a40132b3e03158dd57b61c2080eecd559ec295c2db91b6340dee625b0277`
[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.12.9/kubernetes-node-linux-ppc64le.tar.gz) | `ecf696b8522e1dffa61e00aa3e27aad27135487640f8c58af84ca883827ad568ec96e9eb4ccd2220e43bb3c1afae9da639047d21e545c8fa9a17ffa66b2a130a`
[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.12.9/kubernetes-node-linux-s390x.tar.gz) | `e6177729ce9aadc31dd8237f7282cbbe30f6432ab14931310eb6fe892edb126e4ac6e5212b2aa8d2cfd1f00af228dcb1303faf4cc25a0a9abf3cee7892b9cda7`
[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.12.9/kubernetes-node-windows-amd64.tar.gz) | `c2daa564e89d6ec8c5322e3b2b7188ac1cb1091ab7f56121f3017ae8ea334e4b626978b31dbed31f7e9b2b1e8e4ada7bb8376831fb61e364568296041bf39fe0`
## Changelog since v1.12.8
### Other notable changes
* Active watches of custom resources now terminate properly if the CRD is modified. ([#78029](https://github.com/kubernetes/kubernetes/pull/78029), [@liggitt](https://github.com/liggitt))
* fix incorrect prometheus azure metrics ([#77722](https://github.com/kubernetes/kubernetes/pull/77722), [@andyzhangx](https://github.com/andyzhangx))
* Fixed a bug in the apiserver storage that could cause just-added finalizers to be ignored on an immediately following delete request, leading to premature deletion. ([#77619](https://github.com/kubernetes/kubernetes/pull/77619), [@caesarxuchao](https://github.com/caesarxuchao))
* client-go and kubectl no longer write cached discovery files with world-accessible file permissions ([#77874](https://github.com/kubernetes/kubernetes/pull/77874), [@yuchengwu](https://github.com/yuchengwu))
* Check if container memory stats are available before accessing it ([#77656](https://github.com/kubernetes/kubernetes/pull/77656), [@yastij](https://github.com/yastij))
* Fixes segmentation fault issue with Protobuf library when log entries are deeply nested. ([#77224](https://github.com/kubernetes/kubernetes/pull/77224), [@qingling128](https://github.com/qingling128))
* Clean links handling in cp's tar code ([#76788](https://github.com/kubernetes/kubernetes/pull/76788), [@soltysh](https://github.com/soltysh))
* [fluentd-gcp addon] Bump fluentd-gcp-scaler to v0.5.2 to pick up security fixes. ([#76762](https://github.com/kubernetes/kubernetes/pull/76762), [@serathius](https://github.com/serathius))
* Fixes an error with stuck informers when an etcd watch receives update or delete events with missing data ([#76675](https://github.com/kubernetes/kubernetes/pull/76675), [@ryanmcnamara](https://github.com/ryanmcnamara))
* fix azure disk list corruption issue ([#77187](https://github.com/kubernetes/kubernetes/pull/77187), [@andyzhangx](https://github.com/andyzhangx))
* Fixed scanning of failed iSCSI targets. ([#74306](https://github.com/kubernetes/kubernetes/pull/74306), [@jsafrane](https://github.com/jsafrane))
* fix detach azure disk back off issue which has too big lock in failure retry condition ([#76573](https://github.com/kubernetes/kubernetes/pull/76573), [@andyzhangx](https://github.com/andyzhangx))
* specify azure file share name in azure file plugin ([#76988](https://github.com/kubernetes/kubernetes/pull/76988), [@andyzhangx](https://github.com/andyzhangx))
* [metrics-server addon] Restore connecting to nodes via IP addresses ([#76819](https://github.com/kubernetes/kubernetes/pull/76819), [@serathius](https://github.com/serathius))
* Update Cluster Autoscaler to 1.12.5 ([#77063](https://github.com/kubernetes/kubernetes/pull/77063), [@losipiuk](https://github.com/losipiuk))
* - https://github.com/kubernetes/autoscaler/releases/tag/cluster-autoscaler-1.12.5
* - https://github.com/kubernetes/autoscaler/releases/tag/cluster-autoscaler-1.12.4
# v1.12.8 # v1.12.8
[Documentation](https://docs.k8s.io) [Documentation](https://docs.k8s.io)

View File

@ -967,12 +967,7 @@ filename | sha512 hash
* While this is a backwards-incompatible change, it would have been impossible to setup reliable monitoring around these metrics since the labels were not stable. * While this is a backwards-incompatible change, it would have been impossible to setup reliable monitoring around these metrics since the labels were not stable.
* Add a configuration field to shorten the timeout of validating/mutating admission webhook call. The timeout value must be between 1 and 30 seconds. Default to 30 seconds when unspecified. ([#74562](https://github.com/kubernetes/kubernetes/pull/74562), [@roycaihw](https://github.com/roycaihw)) * Add a configuration field to shorten the timeout of validating/mutating admission webhook call. The timeout value must be between 1 and 30 seconds. Default to 30 seconds when unspecified. ([#74562](https://github.com/kubernetes/kubernetes/pull/74562), [@roycaihw](https://github.com/roycaihw))
* client-go: PortForwarder.GetPorts() now contain correct local port if no local port was initially specified when setting up the port forwarder ([#73676](https://github.com/kubernetes/kubernetes/pull/73676), [@martin-helmich](https://github.com/martin-helmich)) * client-go: PortForwarder.GetPorts() now contain correct local port if no local port was initially specified when setting up the port forwarder ([#73676](https://github.com/kubernetes/kubernetes/pull/73676), [@martin-helmich](https://github.com/martin-helmich))
* # Apply resources from a directory containing kustomization.yaml ([#74140](https://github.com/kubernetes/kubernetes/pull/74140), [@Liujingfang1](https://github.com/Liujingfang1)) * The examples in kubectl apply/get/delete are updated to support `-k` which uses a `kustomization.yaml` file. ([#74140](https://github.com/kubernetes/kubernetes/pull/74140), [@Liujingfang1](https://github.com/Liujingfang1))
* kubectl apply -k dir
* # Delete resources from a directory containing kustomization.yaml.
* kubectl delete -k dir
* # List resources from a directory containing kustomization.yaml
* kubectl get -k dir
* kubeadm: Allow to download certificate secrets uploaded by `init` or `upload-certs` phase, allowing to transfer certificate secrets (certificates and keys) from the cluster to other master machines when creating HA deployments. ([#74168](https://github.com/kubernetes/kubernetes/pull/74168), [@ereslibre](https://github.com/ereslibre)) * kubeadm: Allow to download certificate secrets uploaded by `init` or `upload-certs` phase, allowing to transfer certificate secrets (certificates and keys) from the cluster to other master machines when creating HA deployments. ([#74168](https://github.com/kubernetes/kubernetes/pull/74168), [@ereslibre](https://github.com/ereslibre))
* Fixes an issue with missing apiVersion/kind in object data sent to admission webhooks ([#74448](https://github.com/kubernetes/kubernetes/pull/74448), [@liggitt](https://github.com/liggitt)) * Fixes an issue with missing apiVersion/kind in object data sent to admission webhooks ([#74448](https://github.com/kubernetes/kubernetes/pull/74448), [@liggitt](https://github.com/liggitt))
* client-go: the deprecated versionless API group accessors (like `clientset.Apps()` have been removed). Use an explicit version instead (like `clientset.AppsV1()`) ([#74422](https://github.com/kubernetes/kubernetes/pull/74422), [@liggitt](https://github.com/liggitt)) * client-go: the deprecated versionless API group accessors (like `clientset.Apps()` have been removed). Use an explicit version instead (like `clientset.AppsV1()`) ([#74422](https://github.com/kubernetes/kubernetes/pull/74422), [@liggitt](https://github.com/liggitt))

View File

@ -11222,6 +11222,16 @@
}, },
"io.k8s.api.core.v1.WindowsSecurityContextOptions": { "io.k8s.api.core.v1.WindowsSecurityContextOptions": {
"description": "WindowsSecurityContextOptions contain Windows-specific options and credentials.", "description": "WindowsSecurityContextOptions contain Windows-specific options and credentials.",
"properties": {
"gmsaCredentialSpec": {
"description": "GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.",
"type": "string"
},
"gmsaCredentialSpecName": {
"description": "GMSACredentialSpecName is the name of the GMSA credential spec to use. This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.",
"type": "string"
}
},
"type": "object" "type": "object"
}, },
"io.k8s.api.events.v1beta1.Event": { "io.k8s.api.events.v1beta1.Event": {
@ -16606,7 +16616,7 @@
"description": "CustomResourceSubresourceScale defines how to serve the scale subresource for CustomResources.", "description": "CustomResourceSubresourceScale defines how to serve the scale subresource for CustomResources.",
"properties": { "properties": {
"labelSelectorPath": { "labelSelectorPath": {
"description": "LabelSelectorPath defines the JSON path inside of a CustomResource that corresponds to Scale.Status.Selector. Only JSON paths without the array notation are allowed. Must be a JSON Path under .status. Must be set to work with HPA. If there is no value under the given path in the CustomResource, the status label selector value in the /scale subresource will default to the empty string.", "description": "LabelSelectorPath defines the JSON path inside of a CustomResource that corresponds to Scale.Status.Selector. Only JSON paths without the array notation are allowed. Must be a JSON Path under .status or .spec. Must be set to work with HPA. The field pointed by this JSON path must be a string field (not a complex selector struct) which contains a serialized label selector in string form. More info: https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions#scale-subresource If there is no value under the given path in the CustomResource, the status label selector value in the /scale subresource will default to the empty string.",
"type": "string" "type": "string"
}, },
"specReplicasPath": { "specReplicasPath": {

View File

@ -78,7 +78,7 @@ spec:
- /eventer - /eventer
- --source=kubernetes:'' - --source=kubernetes:''
- --sink=gcl - --sink=gcl
- image: k8s.gcr.io/addon-resizer:1.8.4 - image: k8s.gcr.io/addon-resizer:1.8.5
name: heapster-nanny name: heapster-nanny
resources: resources:
limits: limits:
@ -114,7 +114,7 @@ spec:
# Specifies the smallest cluster (defined in number of nodes) # Specifies the smallest cluster (defined in number of nodes)
# resources will be scaled to. # resources will be scaled to.
- --minClusterSize={{ heapster_min_cluster_size }} - --minClusterSize={{ heapster_min_cluster_size }}
- image: k8s.gcr.io/addon-resizer:1.8.4 - image: k8s.gcr.io/addon-resizer:1.8.5
name: eventer-nanny name: eventer-nanny
resources: resources:
limits: limits:

View File

@ -79,7 +79,7 @@ spec:
- /eventer - /eventer
- --source=kubernetes:'' - --source=kubernetes:''
- --sink=gcl - --sink=gcl
- image: k8s.gcr.io/addon-resizer:1.8.4 - image: k8s.gcr.io/addon-resizer:1.8.5
name: heapster-nanny name: heapster-nanny
resources: resources:
limits: limits:
@ -115,7 +115,7 @@ spec:
# Specifies the smallest cluster (defined in number of nodes) # Specifies the smallest cluster (defined in number of nodes)
# resources will be scaled to. # resources will be scaled to.
- --minClusterSize={{ heapster_min_cluster_size }} - --minClusterSize={{ heapster_min_cluster_size }}
- image: k8s.gcr.io/addon-resizer:1.8.4 - image: k8s.gcr.io/addon-resizer:1.8.5
name: eventer-nanny name: eventer-nanny
resources: resources:
limits: limits:

View File

@ -78,7 +78,7 @@ spec:
- /eventer - /eventer
- --source=kubernetes:'' - --source=kubernetes:''
- --sink=influxdb:http://monitoring-influxdb:8086 - --sink=influxdb:http://monitoring-influxdb:8086
- image: k8s.gcr.io/addon-resizer:1.8.4 - image: k8s.gcr.io/addon-resizer:1.8.5
name: heapster-nanny name: heapster-nanny
resources: resources:
limits: limits:
@ -114,7 +114,7 @@ spec:
# Specifies the smallest cluster (defined in number of nodes) # Specifies the smallest cluster (defined in number of nodes)
# resources will be scaled to. # resources will be scaled to.
- --minClusterSize={{ heapster_min_cluster_size }} - --minClusterSize={{ heapster_min_cluster_size }}
- image: k8s.gcr.io/addon-resizer:1.8.4 - image: k8s.gcr.io/addon-resizer:1.8.5
name: eventer-nanny name: eventer-nanny
resources: resources:
limits: limits:

View File

@ -81,7 +81,7 @@ spec:
fieldRef: fieldRef:
fieldPath: metadata.namespace fieldPath: metadata.namespace
# END_PROMETHEUS_TO_SD # END_PROMETHEUS_TO_SD
- image: k8s.gcr.io/addon-resizer:1.8.4 - image: k8s.gcr.io/addon-resizer:1.8.5
name: heapster-nanny name: heapster-nanny
resources: resources:
limits: limits:

View File

@ -59,7 +59,7 @@ spec:
command: command:
- /heapster - /heapster
- --source=kubernetes.summary_api:'' - --source=kubernetes.summary_api:''
- image: k8s.gcr.io/addon-resizer:1.8.4 - image: k8s.gcr.io/addon-resizer:1.8.5
name: heapster-nanny name: heapster-nanny
resources: resources:
limits: limits:

View File

@ -63,7 +63,7 @@ spec:
name: https name: https
protocol: TCP protocol: TCP
- name: metrics-server-nanny - name: metrics-server-nanny
image: k8s.gcr.io/addon-resizer:1.8.4 image: k8s.gcr.io/addon-resizer:1.8.5
resources: resources:
limits: limits:
cpu: 100m cpu: 100m

View File

@ -39,7 +39,7 @@ spec:
initialDelaySeconds: 5 initialDelaySeconds: 5
timeoutSeconds: 5 timeoutSeconds: 5
- name: addon-resizer - name: addon-resizer
image: k8s.gcr.io/addon-resizer:1.8.4 image: k8s.gcr.io/addon-resizer:1.8.5
resources: resources:
limits: limits:
cpu: 100m cpu: 100m

View File

@ -523,3 +523,5 @@ WINDOWS_NODE_TAINTS="${WINDOWS_NODE_TAINTS:-node.kubernetes.io/os=win1809:NoSche
# Whether to set up a private GCE cluster, i.e. a cluster where nodes have only private IPs. # Whether to set up a private GCE cluster, i.e. a cluster where nodes have only private IPs.
GCE_PRIVATE_CLUSTER="${KUBE_GCE_PRIVATE_CLUSTER:-false}" GCE_PRIVATE_CLUSTER="${KUBE_GCE_PRIVATE_CLUSTER:-false}"
ETCD_LISTEN_CLIENT_IP=0.0.0.0

View File

@ -611,6 +611,15 @@ function create-master-auth {
if [[ -n "${ADDON_MANAGER_TOKEN:-}" ]]; then if [[ -n "${ADDON_MANAGER_TOKEN:-}" ]]; then
append_or_replace_prefixed_line "${known_tokens_csv}" "${ADDON_MANAGER_TOKEN}," "system:addon-manager,uid:system:addon-manager,system:masters" append_or_replace_prefixed_line "${known_tokens_csv}" "${ADDON_MANAGER_TOKEN}," "system:addon-manager,uid:system:addon-manager,system:masters"
fi fi
if [[ -n "${EXTRA_STATIC_AUTH_COMPONENTS:-}" ]]; then
# Create a static Bearer token and kubeconfig for extra, comma-separated components.
IFS="," read -r -a extra_components <<< "${EXTRA_STATIC_AUTH_COMPONENTS:-}"
for extra_component in "${extra_components[@]}"; do
local token="$(secure_random 32)"
append_or_replace_prefixed_line "${known_tokens_csv}" "${token}," "system:${extra_component},uid:system:${extra_component}"
create-kubeconfig "${extra_component}" "${token}"
done
fi
local use_cloud_config="false" local use_cloud_config="false"
cat <<EOF >/etc/gce.conf cat <<EOF >/etc/gce.conf
[global] [global]
@ -1405,6 +1414,7 @@ function prepare-etcd-manifest {
sed -i -e "s@{{ *host_ip *}}@$host_ip@g" "${temp_file}" sed -i -e "s@{{ *host_ip *}}@$host_ip@g" "${temp_file}"
sed -i -e "s@{{ *etcd_cluster *}}@$etcd_cluster@g" "${temp_file}" sed -i -e "s@{{ *etcd_cluster *}}@$etcd_cluster@g" "${temp_file}"
sed -i -e "s@{{ *liveness_probe_initial_delay *}}@${ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC:-15}@g" "${temp_file}" sed -i -e "s@{{ *liveness_probe_initial_delay *}}@${ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC:-15}@g" "${temp_file}"
sed -i -e "s@{{ *listen_client_ip *}}@${ETCD_LISTEN_CLIENT_IP:-127.0.0.1}@g" "${temp_file}"
# Get default storage backend from manifest file. # Get default storage backend from manifest file.
local -r default_storage_backend=$(cat "${temp_file}" | \ local -r default_storage_backend=$(cat "${temp_file}" | \
grep -o "{{ *pillar\.get('storage_backend', '\(.*\)') *}}" | \ grep -o "{{ *pillar\.get('storage_backend', '\(.*\)') *}}" | \

View File

@ -23,7 +23,7 @@
"command": [ "command": [
"/bin/sh", "/bin/sh",
"-c", "-c",
"if [ -e /usr/local/bin/migrate-if-needed.sh ]; then /usr/local/bin/migrate-if-needed.sh 1>>/var/log/etcd{{ suffix }}.log 2>&1; fi; exec /usr/local/bin/etcd --name etcd-{{ hostname }} --listen-peer-urls {{ etcd_protocol }}://{{ host_ip }}:{{ server_port }} --initial-advertise-peer-urls {{ etcd_protocol }}://{{ hostname }}:{{ server_port }} --advertise-client-urls http://127.0.0.1:{{ port }} --listen-client-urls http://127.0.0.1:{{ port }} {{ quota_bytes }} --data-dir /var/etcd/data{{ suffix }} --initial-cluster-state {{ cluster_state }} --initial-cluster {{ etcd_cluster }} {{ etcd_creds }} {{ etcd_apiserver_creds }} {{ etcd_extra_args }} 1>>/var/log/etcd{{ suffix }}.log 2>&1" "if [ -e /usr/local/bin/migrate-if-needed.sh ]; then /usr/local/bin/migrate-if-needed.sh 1>>/var/log/etcd{{ suffix }}.log 2>&1; fi; exec /usr/local/bin/etcd --name etcd-{{ hostname }} --listen-peer-urls {{ etcd_protocol }}://{{ host_ip }}:{{ server_port }} --initial-advertise-peer-urls {{ etcd_protocol }}://{{ hostname }}:{{ server_port }} --advertise-client-urls http://127.0.0.1:{{ port }} --listen-client-urls http://{{ listen_client_ip }}:{{ port }} {{ quota_bytes }} --data-dir /var/etcd/data{{ suffix }} --initial-cluster-state {{ cluster_state }} --initial-cluster {{ etcd_cluster }} {{ etcd_creds }} {{ etcd_apiserver_creds }} {{ etcd_extra_args }} 1>>/var/log/etcd{{ suffix }}.log 2>&1"
], ],
"env": [ "env": [
{ "name": "TARGET_STORAGE", { "name": "TARGET_STORAGE",

View File

@ -730,7 +730,6 @@ function construct-common-kubelet-flags {
function construct-linux-kubelet-flags { function construct-linux-kubelet-flags {
local master="$1" local master="$1"
local flags="$(construct-common-kubelet-flags)" local flags="$(construct-common-kubelet-flags)"
flags+=" --allow-privileged=true"
# Keep in sync with CONTAINERIZED_MOUNTER_HOME in configure-helper.sh # Keep in sync with CONTAINERIZED_MOUNTER_HOME in configure-helper.sh
flags+=" --experimental-mounter-path=/home/kubernetes/containerized_mounter/mounter" flags+=" --experimental-mounter-path=/home/kubernetes/containerized_mounter/mounter"
flags+=" --experimental-check-node-capabilities-before-mount=true" flags+=" --experimental-check-node-capabilities-before-mount=true"
@ -1406,6 +1405,11 @@ EOF
if [ -n "${API_SERVER_TEST_LOG_LEVEL:-}" ]; then if [ -n "${API_SERVER_TEST_LOG_LEVEL:-}" ]; then
cat >>$file <<EOF cat >>$file <<EOF
API_SERVER_TEST_LOG_LEVEL: $(yaml-quote ${API_SERVER_TEST_LOG_LEVEL}) API_SERVER_TEST_LOG_LEVEL: $(yaml-quote ${API_SERVER_TEST_LOG_LEVEL})
EOF
fi
if [ -n "${ETCD_LISTEN_CLIENT_IP:-}" ]; then
cat >>$file <<EOF
ETCD_LISTEN_CLIENT_IP: $(yaml-quote ${ETCD_LISTEN_CLIENT_IP})
EOF EOF
fi fi

View File

@ -249,18 +249,16 @@ function Set-PrerequisiteOptions {
Install-Module -Name powershell-yaml -Force Install-Module -Name powershell-yaml -Force
} }
# Disables Windows Defender realtime scanning if this Windows node is part of a # Disables Windows Defender realtime scanning.
# test cluster. # TODO: remove this workaround once the fix is rolled out the Windows image
# # https://github.com/kubernetes/kubernetes/issues/75148
# ${kube_env} must have already been set.
function Disable-WindowsDefender { function Disable-WindowsDefender {
# Windows Defender periodically consumes 100% of the CPU, so disable realtime # Windows Defender periodically consumes 100% of the CPU, so disable realtime
# scanning. Uninstalling the Windows Feature will prevent the service from # scanning. Uninstalling the Windows Feature will prevent the service from
# starting after a reboot. # starting after a reboot.
# TODO(pjh): move this step to image preparation, since we don't want to do a # TODO(pjh): move this step to image preparation, since we don't want to do a
# full reboot here. # full reboot here.
if ((Test-IsTestCluster ${kube_env}) -and if ((Get-WindowsFeature -Name 'Windows-Defender').Installed) {
((Get-WindowsFeature -Name 'Windows-Defender').Installed)) {
Log-Output "Disabling Windows Defender service" Log-Output "Disabling Windows Defender service"
Set-MpPreference -DisableRealtimeMonitoring $true Set-MpPreference -DisableRealtimeMonitoring $true
Uninstall-WindowsFeature -Name 'Windows-Defender' Uninstall-WindowsFeature -Name 'Windows-Defender'

View File

@ -107,7 +107,6 @@ function create_cluster {
--address=0.0.0.0 \ --address=0.0.0.0 \
--kubeconfig=${KUBELET_KUBECONFIG} \ --kubeconfig=${KUBELET_KUBECONFIG} \
--pod-manifest-path=/etc/kubernetes/manifests \ --pod-manifest-path=/etc/kubernetes/manifests \
--allow-privileged=true \
--cluster-dns=10.0.0.10 \ --cluster-dns=10.0.0.10 \
--cluster-domain=cluster.local \ --cluster-domain=cluster.local \
--v=2" --v=2"

View File

@ -84,8 +84,10 @@ import (
"k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/bootstrap" "k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/bootstrap"
) )
const etcdRetryLimit = 60 const (
const etcdRetryInterval = 1 * time.Second etcdRetryLimit = 60
etcdRetryInterval = 1 * time.Second
)
// NewAPIServerCommand creates a *cobra.Command object with default parameters // NewAPIServerCommand creates a *cobra.Command object with default parameters
func NewAPIServerCommand() *cobra.Command { func NewAPIServerCommand() *cobra.Command {

View File

@ -22,6 +22,8 @@ package app
import ( import (
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/scale"
"k8s.io/kubernetes/pkg/controller/disruption" "k8s.io/kubernetes/pkg/controller/disruption"
"net/http" "net/http"
@ -40,6 +42,15 @@ func startDisruptionController(ctx ControllerContext) (http.Handler, bool, error
resource, group+"/"+version) resource, group+"/"+version)
return nil, false, nil return nil, false, nil
} }
client := ctx.ClientBuilder.ClientOrDie("disruption-controller")
config := ctx.ClientBuilder.ConfigOrDie("disruption-controller")
scaleKindResolver := scale.NewDiscoveryScaleKindResolver(client.Discovery())
scaleClient, err := scale.NewForConfig(config, ctx.RESTMapper, dynamic.LegacyAPIPathResolverFunc, scaleKindResolver)
if err != nil {
return nil, false, err
}
go disruption.NewDisruptionController( go disruption.NewDisruptionController(
ctx.InformerFactory.Core().V1().Pods(), ctx.InformerFactory.Core().V1().Pods(),
ctx.InformerFactory.Policy().V1beta1().PodDisruptionBudgets(), ctx.InformerFactory.Policy().V1beta1().PodDisruptionBudgets(),
@ -47,7 +58,9 @@ func startDisruptionController(ctx ControllerContext) (http.Handler, bool, error
ctx.InformerFactory.Apps().V1().ReplicaSets(), ctx.InformerFactory.Apps().V1().ReplicaSets(),
ctx.InformerFactory.Apps().V1().Deployments(), ctx.InformerFactory.Apps().V1().Deployments(),
ctx.InformerFactory.Apps().V1().StatefulSets(), ctx.InformerFactory.Apps().V1().StatefulSets(),
ctx.ClientBuilder.ClientOrDie("disruption-controller"), client,
ctx.RESTMapper,
scaleClient,
).Run(ctx.Stop) ).Run(ctx.Stop)
return nil, true, nil return nil, true, nil
} }

View File

@ -32,6 +32,7 @@ func Funcs(codecs runtimeserializer.CodecFactory) []interface{} {
fuzzClusterConfiguration, fuzzClusterConfiguration,
fuzzComponentConfigs, fuzzComponentConfigs,
fuzzDNS, fuzzDNS,
fuzzNodeRegistration,
fuzzLocalEtcd, fuzzLocalEtcd,
fuzzNetworking, fuzzNetworking,
fuzzJoinConfiguration, fuzzJoinConfiguration,
@ -87,6 +88,13 @@ func fuzzInitConfiguration(obj *kubeadm.InitConfiguration, c fuzz.Continue) {
obj.CertificateKey = "" obj.CertificateKey = ""
} }
func fuzzNodeRegistration(obj *kubeadm.NodeRegistrationOptions, c fuzz.Continue) {
c.FuzzNoCustom(obj)
// Pinning values for fields that get defaults if fuzz value is empty string or nil (thus making the round trip test fail)
obj.IgnorePreflightErrors = nil
}
func fuzzClusterConfiguration(obj *kubeadm.ClusterConfiguration, c fuzz.Continue) { func fuzzClusterConfiguration(obj *kubeadm.ClusterConfiguration, c fuzz.Continue) {
c.FuzzNoCustom(obj) c.FuzzNoCustom(obj)

View File

@ -229,6 +229,9 @@ type NodeRegistrationOptions struct {
// kubeadm writes at runtime for the kubelet to source. This overrides the generic base-level configuration in the kubelet-config-1.X ConfigMap // kubeadm writes at runtime for the kubelet to source. This overrides the generic base-level configuration in the kubelet-config-1.X ConfigMap
// Flags have higher priority when parsing. These values are local and specific to the node kubeadm is executing on. // Flags have higher priority when parsing. These values are local and specific to the node kubeadm is executing on.
KubeletExtraArgs map[string]string KubeletExtraArgs map[string]string
// IgnorePreflightErrors provides a slice of pre-flight errors to be ignored when the current node is registered.
IgnorePreflightErrors []string
} }
// Networking contains elements describing cluster's networking configuration. // Networking contains elements describing cluster's networking configuration.

View File

@ -45,3 +45,15 @@ func Convert_kubeadm_JoinControlPlane_To_v1beta1_JoinControlPlane(in *kubeadm.Jo
return nil return nil
} }
func Convert_kubeadm_NodeRegistrationOptions_To_v1beta1_NodeRegistrationOptions(in *kubeadm.NodeRegistrationOptions, out *NodeRegistrationOptions, s conversion.Scope) error {
if err := autoConvert_kubeadm_NodeRegistrationOptions_To_v1beta1_NodeRegistrationOptions(in, out, s); err != nil {
return err
}
if len(in.IgnorePreflightErrors) > 0 {
return errors.New("ignorePreflightErrors field is not supported by v1beta1 config format")
}
return nil
}

View File

@ -37,6 +37,14 @@ func TestInternalToVersionedInitConfigurationConversion(t *testing.T) {
}, },
expectedError: true, expectedError: true,
}, },
"ignorePreflightErrors set causes an error": {
in: kubeadm.InitConfiguration{
NodeRegistration: kubeadm.NodeRegistrationOptions{
IgnorePreflightErrors: []string{"SomeUndesirableError"},
},
},
expectedError: true,
},
} }
for name, tc := range testcases { for name, tc := range testcases {
t.Run(name, func(t *testing.T) { t.Run(name, func(t *testing.T) {
@ -51,6 +59,66 @@ func TestInternalToVersionedInitConfigurationConversion(t *testing.T) {
} }
} }
func TestInternalToVersionedJoinConfigurationConversion(t *testing.T) {
testcases := map[string]struct {
in kubeadm.JoinConfiguration
expectedError bool
}{
"conversion succeeds": {
in: kubeadm.JoinConfiguration{},
expectedError: false,
},
"ignorePreflightErrors set causes an error": {
in: kubeadm.JoinConfiguration{
NodeRegistration: kubeadm.NodeRegistrationOptions{
IgnorePreflightErrors: []string{"SomeUndesirableError"},
},
},
expectedError: true,
},
}
for name, tc := range testcases {
t.Run(name, func(t *testing.T) {
versioned := &JoinConfiguration{}
err := Convert_kubeadm_JoinConfiguration_To_v1beta1_JoinConfiguration(&tc.in, versioned, nil)
if err == nil && tc.expectedError {
t.Error("unexpected success")
} else if err != nil && !tc.expectedError {
t.Errorf("unexpected error: %v", err)
}
})
}
}
func TestInternalToVersionedNodeRegistrationOptionsConversion(t *testing.T) {
testcases := map[string]struct {
in kubeadm.NodeRegistrationOptions
expectedError bool
}{
"conversion succeeds": {
in: kubeadm.NodeRegistrationOptions{},
expectedError: false,
},
"ignorePreflightErrors set causes an error": {
in: kubeadm.NodeRegistrationOptions{
IgnorePreflightErrors: []string{"SomeUndesirableError"},
},
expectedError: true,
},
}
for name, tc := range testcases {
t.Run(name, func(t *testing.T) {
versioned := &NodeRegistrationOptions{}
err := Convert_kubeadm_NodeRegistrationOptions_To_v1beta1_NodeRegistrationOptions(&tc.in, versioned, nil)
if err == nil && tc.expectedError {
t.Error("unexpected success")
} else if err != nil && !tc.expectedError {
t.Errorf("unexpected error: %v", err)
}
})
}
}
func TestInternalToVersionedJoinControlPlaneConversion(t *testing.T) { func TestInternalToVersionedJoinControlPlaneConversion(t *testing.T) {
testcases := map[string]struct { testcases := map[string]struct {
in kubeadm.JoinControlPlane in kubeadm.JoinControlPlane

View File

@ -22,5 +22,5 @@ const (
// DefaultCACertPath defines default location of CA certificate on Windows // DefaultCACertPath defines default location of CA certificate on Windows
DefaultCACertPath = "C:/etc/kubernetes/pki/ca.crt" DefaultCACertPath = "C:/etc/kubernetes/pki/ca.crt"
// DefaultUrlScheme defines default socket url prefix // DefaultUrlScheme defines default socket url prefix
DefaultUrlScheme = "tcp" DefaultUrlScheme = "npipe"
) )

View File

@ -257,6 +257,11 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil { }); err != nil {
return err return err
} }
if err := s.AddConversionFunc((*kubeadm.NodeRegistrationOptions)(nil), (*NodeRegistrationOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_kubeadm_NodeRegistrationOptions_To_v1beta1_NodeRegistrationOptions(a.(*kubeadm.NodeRegistrationOptions), b.(*NodeRegistrationOptions), scope)
}); err != nil {
return err
}
return nil return nil
} }
@ -848,10 +853,6 @@ func autoConvert_kubeadm_NodeRegistrationOptions_To_v1beta1_NodeRegistrationOpti
out.CRISocket = in.CRISocket out.CRISocket = in.CRISocket
out.Taints = *(*[]corev1.Taint)(unsafe.Pointer(&in.Taints)) out.Taints = *(*[]corev1.Taint)(unsafe.Pointer(&in.Taints))
out.KubeletExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.KubeletExtraArgs)) out.KubeletExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.KubeletExtraArgs))
// WARNING: in.IgnorePreflightErrors requires manual conversion: does not exist in peer-type
return nil return nil
} }
// Convert_kubeadm_NodeRegistrationOptions_To_v1beta1_NodeRegistrationOptions is an autogenerated conversion function.
func Convert_kubeadm_NodeRegistrationOptions_To_v1beta1_NodeRegistrationOptions(in *kubeadm.NodeRegistrationOptions, out *NodeRegistrationOptions, s conversion.Scope) error {
return autoConvert_kubeadm_NodeRegistrationOptions_To_v1beta1_NodeRegistrationOptions(in, out, s)
}

View File

@ -22,5 +22,5 @@ const (
// DefaultCACertPath defines default location of CA certificate on Windows // DefaultCACertPath defines default location of CA certificate on Windows
DefaultCACertPath = "C:/etc/kubernetes/pki/ca.crt" DefaultCACertPath = "C:/etc/kubernetes/pki/ca.crt"
// DefaultUrlScheme defines default socket url prefix // DefaultUrlScheme defines default socket url prefix
DefaultUrlScheme = "tcp" DefaultUrlScheme = "npipe"
) )

View File

@ -215,6 +215,9 @@ type NodeRegistrationOptions struct {
// kubeadm writes at runtime for the kubelet to source. This overrides the generic base-level configuration in the kubelet-config-1.X ConfigMap // kubeadm writes at runtime for the kubelet to source. This overrides the generic base-level configuration in the kubelet-config-1.X ConfigMap
// Flags have higher priority when parsing. These values are local and specific to the node kubeadm is executing on. // Flags have higher priority when parsing. These values are local and specific to the node kubeadm is executing on.
KubeletExtraArgs map[string]string `json:"kubeletExtraArgs,omitempty"` KubeletExtraArgs map[string]string `json:"kubeletExtraArgs,omitempty"`
// IgnorePreflightErrors provides a slice of pre-flight errors to be ignored when the current node is registered.
IgnorePreflightErrors []string `json:"ignorePreflightErrors,omitempty"`
} }
// Networking contains elements describing cluster's networking configuration // Networking contains elements describing cluster's networking configuration

View File

@ -821,6 +821,7 @@ func autoConvert_v1beta2_NodeRegistrationOptions_To_kubeadm_NodeRegistrationOpti
out.CRISocket = in.CRISocket out.CRISocket = in.CRISocket
out.Taints = *(*[]corev1.Taint)(unsafe.Pointer(&in.Taints)) out.Taints = *(*[]corev1.Taint)(unsafe.Pointer(&in.Taints))
out.KubeletExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.KubeletExtraArgs)) out.KubeletExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.KubeletExtraArgs))
out.IgnorePreflightErrors = *(*[]string)(unsafe.Pointer(&in.IgnorePreflightErrors))
return nil return nil
} }
@ -834,6 +835,7 @@ func autoConvert_kubeadm_NodeRegistrationOptions_To_v1beta2_NodeRegistrationOpti
out.CRISocket = in.CRISocket out.CRISocket = in.CRISocket
out.Taints = *(*[]corev1.Taint)(unsafe.Pointer(&in.Taints)) out.Taints = *(*[]corev1.Taint)(unsafe.Pointer(&in.Taints))
out.KubeletExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.KubeletExtraArgs)) out.KubeletExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.KubeletExtraArgs))
out.IgnorePreflightErrors = *(*[]string)(unsafe.Pointer(&in.IgnorePreflightErrors))
return nil return nil
} }

View File

@ -538,6 +538,11 @@ func (in *NodeRegistrationOptions) DeepCopyInto(out *NodeRegistrationOptions) {
(*out)[key] = val (*out)[key] = val
} }
} }
if in.IgnorePreflightErrors != nil {
in, out := &in.IgnorePreflightErrors, &out.IgnorePreflightErrors
*out = make([]string, len(*in))
copy(*out, *in)
}
return return
} }

View File

@ -34,6 +34,7 @@ go_test(
"//cmd/kubeadm/app/apis/kubeadm/v1beta2:go_default_library", "//cmd/kubeadm/app/apis/kubeadm/v1beta2:go_default_library",
"//pkg/proxy/apis/config:go_default_library", "//pkg/proxy/apis/config:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
"//vendor/github.com/spf13/pflag:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library",
"//vendor/k8s.io/utils/pointer:go_default_library", "//vendor/k8s.io/utils/pointer:go_default_library",

View File

@ -463,12 +463,25 @@ func ValidateAPIEndpoint(c *kubeadm.APIEndpoint, fldPath *field.Path) field.Erro
return allErrs return allErrs
} }
// ValidateIgnorePreflightErrors validates duplicates in ignore-preflight-errors flag. // ValidateIgnorePreflightErrors validates duplicates in:
func ValidateIgnorePreflightErrors(ignorePreflightErrors []string) (sets.String, error) { // - ignore-preflight-errors flag and
// - ignorePreflightErrors field in {Init,Join}Configuration files.
func ValidateIgnorePreflightErrors(ignorePreflightErrorsFromCLI, ignorePreflightErrorsFromConfigFile []string) (sets.String, error) {
ignoreErrors := sets.NewString() ignoreErrors := sets.NewString()
allErrs := field.ErrorList{} allErrs := field.ErrorList{}
for _, item := range ignorePreflightErrors { for _, item := range ignorePreflightErrorsFromConfigFile {
ignoreErrors.Insert(strings.ToLower(item)) // parameters are case insensitive
}
if ignoreErrors.Has("all") {
// "all" is forbidden in config files. Administrators should use an
// explicit list of errors they want to ignore, as it can be risky to
// mask all errors in such a way. Hence, we return an error:
allErrs = append(allErrs, field.Invalid(field.NewPath("ignorePreflightErrors"), "all", "'all' cannot be used in configuration file"))
}
for _, item := range ignorePreflightErrorsFromCLI {
ignoreErrors.Insert(strings.ToLower(item)) // parameters are case insensitive ignoreErrors.Insert(strings.ToLower(item)) // parameters are case insensitive
} }

View File

@ -24,6 +24,7 @@ import (
"github.com/spf13/pflag" "github.com/spf13/pflag"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmapiv1beta2 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2" kubeadmapiv1beta2 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2"
@ -703,26 +704,81 @@ func TestValidateFeatureGates(t *testing.T) {
func TestValidateIgnorePreflightErrors(t *testing.T) { func TestValidateIgnorePreflightErrors(t *testing.T) {
var tests = []struct { var tests = []struct {
ignorePreflightErrors []string ignorePreflightErrorsFromCLI []string
expectedLen int ignorePreflightErrorsFromConfigFile []string
expectedError bool expectedSet sets.String
expectedError bool
}{ }{
{[]string{}, 0, false}, // empty list { // empty lists in CLI and config file
{[]string{"check1", "check2"}, 2, false}, // non-duplicate []string{},
{[]string{"check1", "check2", "check1"}, 2, false}, // duplicates []string{},
{[]string{"check1", "check2", "all"}, 3, true}, // non-duplicate, but 'all' present together wth individual checks sets.NewString(),
{[]string{"all"}, 1, false}, // skip all checks by using new flag false,
{[]string{"all"}, 1, false}, // skip all checks by using both old and new flags at the same time },
{ // empty list in CLI only
[]string{},
[]string{"a"},
sets.NewString("a"),
false,
},
{ // empty list in config file only
[]string{"a"},
[]string{},
sets.NewString("a"),
false,
},
{ // no duplicates, no overlap
[]string{"a", "b"},
[]string{"c", "d"},
sets.NewString("a", "b", "c", "d"),
false,
},
{ // some duplicates, with some overlapping duplicates
[]string{"a", "b", "a"},
[]string{"c", "b"},
sets.NewString("a", "b", "c"),
false,
},
{ // non-duplicate, but 'all' present together with individual checks in CLI
[]string{"a", "b", "all"},
[]string{},
sets.NewString(),
true,
},
{ // empty list in CLI, but 'all' present in config file, which is forbidden
[]string{},
[]string{"all"},
sets.NewString(),
true,
},
{ // non-duplicate, but 'all' present in config file, which is forbidden
[]string{"a", "b"},
[]string{"all"},
sets.NewString(),
true,
},
{ // non-duplicate, but 'all' present in CLI, while values are in config file, which is forbidden
[]string{"all"},
[]string{"a", "b"},
sets.NewString(),
true,
},
{ // skip all checks
[]string{"all"},
[]string{},
sets.NewString("all"),
false,
},
} }
for _, rt := range tests { for _, rt := range tests {
result, err := ValidateIgnorePreflightErrors(rt.ignorePreflightErrors) result, err := ValidateIgnorePreflightErrors(rt.ignorePreflightErrorsFromCLI, rt.ignorePreflightErrorsFromConfigFile)
switch { switch {
case err != nil && !rt.expectedError: case err != nil && !rt.expectedError:
t.Errorf("ValidateIgnorePreflightErrors: unexpected error for input (%s), error: %v", rt.ignorePreflightErrors, err) t.Errorf("ValidateIgnorePreflightErrors: unexpected error for input (%s, %s), error: %v", rt.ignorePreflightErrorsFromCLI, rt.ignorePreflightErrorsFromConfigFile, err)
case err == nil && rt.expectedError: case err == nil && rt.expectedError:
t.Errorf("ValidateIgnorePreflightErrors: expected error for input (%s) but got: %v", rt.ignorePreflightErrors, result) t.Errorf("ValidateIgnorePreflightErrors: expected error for input (%s, %s) but got: %v", rt.ignorePreflightErrorsFromCLI, rt.ignorePreflightErrorsFromConfigFile, result)
case result.Len() != rt.expectedLen: case err == nil && !result.Equal(rt.expectedSet):
t.Errorf("ValidateIgnorePreflightErrors: expected Len = %d for input (%s) but got: %v, %v", rt.expectedLen, rt.ignorePreflightErrors, result.Len(), result) t.Errorf("ValidateIgnorePreflightErrors: expected (%v) for input (%s, %s) but got: %v", rt.expectedSet, rt.ignorePreflightErrorsFromCLI, rt.ignorePreflightErrorsFromConfigFile, result)
} }
} }
} }

View File

@ -567,6 +567,11 @@ func (in *NodeRegistrationOptions) DeepCopyInto(out *NodeRegistrationOptions) {
(*out)[key] = val (*out)[key] = val
} }
} }
if in.IgnorePreflightErrors != nil {
in, out := &in.IgnorePreflightErrors, &out.IgnorePreflightErrors
*out = make([]string, len(*in))
copy(*out, *in)
}
return return
} }

View File

@ -99,6 +99,7 @@ go_test(
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/testing:go_default_library", "//staging/src/k8s.io/client-go/testing:go_default_library",
"//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library", "//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library",

View File

@ -103,7 +103,7 @@ type initOptions struct {
// compile-time assert that the local data object satisfies the phases data interface. // compile-time assert that the local data object satisfies the phases data interface.
var _ phases.InitData = &initData{} var _ phases.InitData = &initData{}
// initData defines all the runtime information used when running the kubeadm init worklow; // initData defines all the runtime information used when running the kubeadm init workflow;
// this data is shared across all the phases that are included in the workflow. // this data is shared across all the phases that are included in the workflow.
type initData struct { type initData struct {
cfg *kubeadmapi.InitConfiguration cfg *kubeadmapi.InitConfiguration
@ -296,11 +296,6 @@ func newInitData(cmd *cobra.Command, args []string, options *initOptions, out io
return nil, err return nil, err
} }
ignorePreflightErrorsSet, err := validation.ValidateIgnorePreflightErrors(options.ignorePreflightErrors)
if err != nil {
return nil, err
}
if err = validation.ValidateMixedArguments(cmd.Flags()); err != nil { if err = validation.ValidateMixedArguments(cmd.Flags()); err != nil {
return nil, err return nil, err
} }
@ -316,6 +311,13 @@ func newInitData(cmd *cobra.Command, args []string, options *initOptions, out io
return nil, err return nil, err
} }
ignorePreflightErrorsSet, err := validation.ValidateIgnorePreflightErrors(options.ignorePreflightErrors, cfg.NodeRegistration.IgnorePreflightErrors)
if err != nil {
return nil, err
}
// Also set the union of pre-flight errors to InitConfiguration, to provide a consistent view of the runtime configuration:
cfg.NodeRegistration.IgnorePreflightErrors = ignorePreflightErrorsSet.List()
// override node name and CRI socket from the command line options // override node name and CRI socket from the command line options
if options.externalcfg.NodeRegistration.Name != "" { if options.externalcfg.NodeRegistration.Name != "" {
cfg.NodeRegistration.Name = options.externalcfg.NodeRegistration.Name cfg.NodeRegistration.Name = options.externalcfg.NodeRegistration.Name

View File

@ -23,6 +23,7 @@ import (
"path/filepath" "path/filepath"
"testing" "testing"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/cmd/kubeadm/app/cmd/options" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/options"
"k8s.io/kubernetes/cmd/kubeadm/app/features" "k8s.io/kubernetes/cmd/kubeadm/app/features"
) )
@ -38,6 +39,9 @@ bootstrapTokens:
nodeRegistration: nodeRegistration:
criSocket: /run/containerd/containerd.sock criSocket: /run/containerd/containerd.sock
name: someName name: someName
ignorePreflightErrors:
- c
- d
--- ---
apiVersion: kubeadm.k8s.io/v1beta2 apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration kind: ClusterConfiguration
@ -129,6 +133,30 @@ func TestNewInitData(t *testing.T) {
}, },
expectError: true, expectError: true,
}, },
// Pre-flight errors:
{
name: "pre-flights errors from CLI args only",
flags: map[string]string{
options.IgnorePreflightErrors: "a,b",
},
validate: expectedInitIgnorePreflightErrors("a", "b"),
},
{
name: "pre-flights errors from InitConfiguration only",
flags: map[string]string{
options.CfgPath: configFilePath,
},
validate: expectedInitIgnorePreflightErrors("c", "d"),
},
{
name: "pre-flights errors from both CLI args and InitConfiguration",
flags: map[string]string{
options.CfgPath: configFilePath,
options.IgnorePreflightErrors: "a,b",
},
validate: expectedInitIgnorePreflightErrors("a", "b", "c", "d"),
},
} }
for _, tc := range testCases { for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
@ -157,3 +185,15 @@ func TestNewInitData(t *testing.T) {
}) })
} }
} }
func expectedInitIgnorePreflightErrors(expectedItems ...string) func(t *testing.T, data *initData) {
expected := sets.NewString(expectedItems...)
return func(t *testing.T, data *initData) {
if !expected.Equal(data.ignorePreflightErrors) {
t.Errorf("Invalid ignore preflight errors. Expected: %v. Actual: %v", expected.List(), data.ignorePreflightErrors.List())
}
if !expected.HasAll(data.cfg.NodeRegistration.IgnorePreflightErrors...) {
t.Errorf("Invalid ignore preflight errors in InitConfiguration. Expected: %v. Actual: %v", expected.List(), data.cfg.NodeRegistration.IgnorePreflightErrors)
}
}
}

View File

@ -133,7 +133,7 @@ type joinOptions struct {
// compile-time assert that the local data object satisfies the phases data interface. // compile-time assert that the local data object satisfies the phases data interface.
var _ phases.JoinData = &joinData{} var _ phases.JoinData = &joinData{}
// joinData defines all the runtime information used when running the kubeadm join worklow; // joinData defines all the runtime information used when running the kubeadm join workflow;
// this data is shared across all the phases that are included in the workflow. // this data is shared across all the phases that are included in the workflow.
type joinData struct { type joinData struct {
cfg *kubeadmapi.JoinConfiguration cfg *kubeadmapi.JoinConfiguration
@ -349,12 +349,7 @@ func newJoinData(cmd *cobra.Command, args []string, opt *joinOptions, out io.Wri
} }
} }
ignorePreflightErrorsSet, err := validation.ValidateIgnorePreflightErrors(opt.ignorePreflightErrors) if err := validation.ValidateMixedArguments(cmd.Flags()); err != nil {
if err != nil {
return nil, err
}
if err = validation.ValidateMixedArguments(cmd.Flags()); err != nil {
return nil, err return nil, err
} }
@ -383,6 +378,13 @@ func newJoinData(cmd *cobra.Command, args []string, opt *joinOptions, out io.Wri
return nil, err return nil, err
} }
ignorePreflightErrorsSet, err := validation.ValidateIgnorePreflightErrors(opt.ignorePreflightErrors, cfg.NodeRegistration.IgnorePreflightErrors)
if err != nil {
return nil, err
}
// Also set the union of pre-flight errors to JoinConfiguration, to provide a consistent view of the runtime configuration:
cfg.NodeRegistration.IgnorePreflightErrors = ignorePreflightErrorsSet.List()
// override node name and CRI socket from the command line opt // override node name and CRI socket from the command line opt
if opt.externalcfg.NodeRegistration.Name != "" { if opt.externalcfg.NodeRegistration.Name != "" {
cfg.NodeRegistration.Name = opt.externalcfg.NodeRegistration.Name cfg.NodeRegistration.Name = opt.externalcfg.NodeRegistration.Name

View File

@ -22,6 +22,7 @@ import (
"path/filepath" "path/filepath"
"testing" "testing"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/cmd/kubeadm/app/cmd/options" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/options"
) )
@ -36,6 +37,9 @@ discovery:
nodeRegistration: nodeRegistration:
criSocket: /run/containerd/containerd.sock criSocket: /run/containerd/containerd.sock
name: someName name: someName
ignorePreflightErrors:
- c
- d
` `
) )
@ -215,6 +219,31 @@ func TestNewJoinData(t *testing.T) {
}, },
expectError: true, expectError: true,
}, },
// Pre-flight errors:
{
name: "pre-flights errors from CLI args only",
flags: map[string]string{
options.IgnorePreflightErrors: "a,b",
options.FileDiscovery: "https://foo", //required only to pass discovery validation
},
validate: expectedJoinIgnorePreflightErrors(sets.NewString("a", "b")),
},
{
name: "pre-flights errors from JoinConfiguration only",
flags: map[string]string{
options.CfgPath: configFilePath,
},
validate: expectedJoinIgnorePreflightErrors(sets.NewString("c", "d")),
},
{
name: "pre-flights errors from both CLI args and JoinConfiguration",
flags: map[string]string{
options.CfgPath: configFilePath,
options.IgnorePreflightErrors: "a,b",
},
validate: expectedJoinIgnorePreflightErrors(sets.NewString("a", "b", "c", "d")),
},
} }
for _, tc := range testCases { for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
@ -243,3 +272,14 @@ func TestNewJoinData(t *testing.T) {
}) })
} }
} }
func expectedJoinIgnorePreflightErrors(expected sets.String) func(t *testing.T, data *joinData) {
return func(t *testing.T, data *joinData) {
if !expected.Equal(data.ignorePreflightErrors) {
t.Errorf("Invalid ignore preflight errors. Expected: %v. Actual: %v", expected.List(), data.ignorePreflightErrors.List())
}
if !expected.HasAll(data.cfg.NodeRegistration.IgnorePreflightErrors...) {
t.Errorf("Invalid ignore preflight errors in JoinConfiguration. Expected: %v. Actual: %v", expected.List(), data.cfg.NodeRegistration.IgnorePreflightErrors)
}
}
}

View File

@ -59,7 +59,7 @@ type resetOptions struct {
kubeconfigPath string kubeconfigPath string
} }
// resetData defines all the runtime information used when running the kubeadm reset worklow; // resetData defines all the runtime information used when running the kubeadm reset workflow;
// this data is shared across all the phases that are included in the workflow. // this data is shared across all the phases that are included in the workflow.
type resetData struct { type resetData struct {
certificatesDir string certificatesDir string
@ -84,10 +84,6 @@ func newResetOptions() *resetOptions {
// newResetData returns a new resetData struct to be used for the execution of the kubeadm reset workflow. // newResetData returns a new resetData struct to be used for the execution of the kubeadm reset workflow.
func newResetData(cmd *cobra.Command, options *resetOptions, in io.Reader, out io.Writer) (*resetData, error) { func newResetData(cmd *cobra.Command, options *resetOptions, in io.Reader, out io.Writer) (*resetData, error) {
var cfg *kubeadmapi.InitConfiguration var cfg *kubeadmapi.InitConfiguration
ignorePreflightErrorsSet, err := validation.ValidateIgnorePreflightErrors(options.ignorePreflightErrors)
if err != nil {
return nil, err
}
client, err := getClientset(options.kubeconfigPath, false) client, err := getClientset(options.kubeconfigPath, false)
if err == nil { if err == nil {
@ -100,6 +96,16 @@ func newResetData(cmd *cobra.Command, options *resetOptions, in io.Reader, out i
klog.V(1).Infof("[reset] Could not obtain a client set from the kubeconfig file: %s", options.kubeconfigPath) klog.V(1).Infof("[reset] Could not obtain a client set from the kubeconfig file: %s", options.kubeconfigPath)
} }
ignorePreflightErrorsSet, err := validation.ValidateIgnorePreflightErrors(options.ignorePreflightErrors, ignorePreflightErrors(cfg))
if err != nil {
return nil, err
}
kubeadmutil.CheckErr(err)
if cfg != nil {
// Also set the union of pre-flight errors to InitConfiguration, to provide a consistent view of the runtime configuration:
cfg.NodeRegistration.IgnorePreflightErrors = ignorePreflightErrorsSet.List()
}
var criSocketPath string var criSocketPath string
if options.criSocketPath == "" { if options.criSocketPath == "" {
criSocketPath, err = resetDetectCRISocket(cfg) criSocketPath, err = resetDetectCRISocket(cfg)
@ -121,6 +127,13 @@ func newResetData(cmd *cobra.Command, options *resetOptions, in io.Reader, out i
}, nil }, nil
} }
func ignorePreflightErrors(cfg *kubeadmapi.InitConfiguration) []string {
if cfg == nil {
return []string{}
}
return cfg.NodeRegistration.IgnorePreflightErrors
}
// AddResetFlags adds reset flags // AddResetFlags adds reset flags
func AddResetFlags(flagSet *flag.FlagSet, resetOptions *resetOptions) { func AddResetFlags(flagSet *flag.FlagSet, resetOptions *resetOptions) {
flagSet.StringVar( flagSet.StringVar(

View File

@ -76,17 +76,6 @@ func getK8sVersionFromUserInput(flags *applyPlanFlags, args []string, versionIsM
// enforceRequirements verifies that it's okay to upgrade and then returns the variables needed for the rest of the procedure // enforceRequirements verifies that it's okay to upgrade and then returns the variables needed for the rest of the procedure
func enforceRequirements(flags *applyPlanFlags, dryRun bool, newK8sVersion string) (clientset.Interface, upgrade.VersionGetter, *kubeadmapi.InitConfiguration, error) { func enforceRequirements(flags *applyPlanFlags, dryRun bool, newK8sVersion string) (clientset.Interface, upgrade.VersionGetter, *kubeadmapi.InitConfiguration, error) {
ignorePreflightErrorsSet, err := validation.ValidateIgnorePreflightErrors(flags.ignorePreflightErrors)
if err != nil {
return nil, nil, nil, err
}
// Ensure the user is root
klog.V(1).Info("running preflight checks")
if err := runPreflightChecks(ignorePreflightErrorsSet); err != nil {
return nil, nil, nil, err
}
client, err := getClient(flags.kubeConfigPath, dryRun) client, err := getClient(flags.kubeConfigPath, dryRun)
if err != nil { if err != nil {
return nil, nil, nil, errors.Wrapf(err, "couldn't create a Kubernetes client from file %q", flags.kubeConfigPath) return nil, nil, nil, errors.Wrapf(err, "couldn't create a Kubernetes client from file %q", flags.kubeConfigPath)
@ -97,11 +86,6 @@ func enforceRequirements(flags *applyPlanFlags, dryRun bool, newK8sVersion strin
return nil, nil, nil, errors.New("cannot upgrade a self-hosted control plane") return nil, nil, nil, errors.New("cannot upgrade a self-hosted control plane")
} }
// Run healthchecks against the cluster
if err := upgrade.CheckClusterHealth(client, ignorePreflightErrorsSet); err != nil {
return nil, nil, nil, errors.Wrap(err, "[upgrade/health] FATAL")
}
// Fetch the configuration from a file or ConfigMap and validate it // Fetch the configuration from a file or ConfigMap and validate it
fmt.Println("[upgrade/config] Making sure the configuration is correct:") fmt.Println("[upgrade/config] Making sure the configuration is correct:")
@ -112,6 +96,24 @@ func enforceRequirements(flags *applyPlanFlags, dryRun bool, newK8sVersion strin
cfg, err = configutil.FetchInitConfigurationFromCluster(client, os.Stdout, "upgrade/config", false) cfg, err = configutil.FetchInitConfigurationFromCluster(client, os.Stdout, "upgrade/config", false)
} }
ignorePreflightErrorsSet, err := validation.ValidateIgnorePreflightErrors(flags.ignorePreflightErrors, cfg.NodeRegistration.IgnorePreflightErrors)
if err != nil {
return nil, nil, nil, err
}
// Also set the union of pre-flight errors to InitConfiguration, to provide a consistent view of the runtime configuration:
cfg.NodeRegistration.IgnorePreflightErrors = ignorePreflightErrorsSet.List()
// Ensure the user is root
klog.V(1).Info("running preflight checks")
if err := runPreflightChecks(ignorePreflightErrorsSet); err != nil {
return nil, nil, nil, err
}
// Run healthchecks against the cluster
if err := upgrade.CheckClusterHealth(client, ignorePreflightErrorsSet); err != nil {
return nil, nil, nil, errors.Wrap(err, "[upgrade/health] FATAL")
}
if err != nil { if err != nil {
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
fmt.Printf("[upgrade/config] In order to upgrade, a ConfigMap called %q in the %s namespace must exist.\n", constants.KubeadmConfigConfigMap, metav1.NamespaceSystem) fmt.Printf("[upgrade/config] In order to upgrade, a ConfigMap called %q in the %s namespace must exist.\n", constants.KubeadmConfigConfigMap, metav1.NamespaceSystem)

View File

@ -20,5 +20,5 @@ package constants
const ( const (
// DefaultDockerCRISocket defines the default Docker CRI socket // DefaultDockerCRISocket defines the default Docker CRI socket
DefaultDockerCRISocket = "tcp://localhost:2375" DefaultDockerCRISocket = "npipe:////./pipe/docker_engine"
) )

View File

@ -15,6 +15,7 @@ Discovery:
Timeout: 5m0s Timeout: 5m0s
NodeRegistration: NodeRegistration:
CRISocket: /var/run/dockershim.sock CRISocket: /var/run/dockershim.sock
IgnorePreflightErrors: null
KubeletExtraArgs: null KubeletExtraArgs: null
Name: control-plane-1 Name: control-plane-1
Taints: Taints:

View File

@ -2,7 +2,11 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library( go_library(
name = "go_default_library", name = "go_default_library",
srcs = ["runtime.go"], srcs = [
"runtime.go",
"runtime_unix.go",
"runtime_windows.go",
],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/util/runtime", importpath = "k8s.io/kubernetes/cmd/kubeadm/app/util/runtime",
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
deps = [ deps = [
@ -10,7 +14,12 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/github.com/pkg/errors:go_default_library", "//vendor/github.com/pkg/errors:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library",
], ] + select({
"@io_bazel_rules_go//go/platform:windows": [
"//vendor/github.com/Microsoft/go-winio:go_default_library",
],
"//conditions:default": [],
}),
) )
go_test( go_test(

View File

@ -17,7 +17,6 @@ limitations under the License.
package util package util
import ( import (
"os"
"path/filepath" "path/filepath"
goruntime "runtime" goruntime "runtime"
"strings" "strings"
@ -180,23 +179,8 @@ func (runtime *DockerRuntime) ImageExists(image string) (bool, error) {
return err == nil, nil return err == nil, nil
} }
// isExistingSocket checks if path exists and is domain socket
func isExistingSocket(path string) bool {
fileInfo, err := os.Stat(path)
if err != nil {
return false
}
return fileInfo.Mode()&os.ModeSocket != 0
}
// detectCRISocketImpl is separated out only for test purposes, DON'T call it directly, use DetectCRISocket instead // detectCRISocketImpl is separated out only for test purposes, DON'T call it directly, use DetectCRISocket instead
func detectCRISocketImpl(isSocket func(string) bool) (string, error) { func detectCRISocketImpl(isSocket func(string) bool) (string, error) {
const (
dockerSocket = "/var/run/docker.sock" // The Docker socket is not CRI compatible
containerdSocket = "/run/containerd/containerd.sock"
)
foundCRISockets := []string{} foundCRISockets := []string{}
knownCRISockets := []string{ knownCRISockets := []string{
// Docker and containerd sockets are special cased below, hence not to be included here // Docker and containerd sockets are special cased below, hence not to be included here
@ -233,9 +217,5 @@ func detectCRISocketImpl(isSocket func(string) bool) (string, error) {
// DetectCRISocket uses a list of known CRI sockets to detect one. If more than one or none is discovered, an error is returned. // DetectCRISocket uses a list of known CRI sockets to detect one. If more than one or none is discovered, an error is returned.
func DetectCRISocket() (string, error) { func DetectCRISocket() (string, error) {
if goruntime.GOOS != "linux" {
return constants.DefaultDockerCRISocket, nil
}
return detectCRISocketImpl(isExistingSocket) return detectCRISocketImpl(isExistingSocket)
} }

View File

@ -0,0 +1,38 @@
// +build !windows
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"os"
)
const (
dockerSocket = "/var/run/docker.sock" // The Docker socket is not CRI compatible
containerdSocket = "/run/containerd/containerd.sock"
)
// isExistingSocket checks if path exists and is domain socket
func isExistingSocket(path string) bool {
fileInfo, err := os.Stat(path)
if err != nil {
return false
}
return fileInfo.Mode()&os.ModeSocket != 0
}

View File

@ -0,0 +1,38 @@
// +build windows
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
winio "github.com/Microsoft/go-winio"
)
const (
dockerSocket = "//./pipe/docker_engine" // The Docker socket is not CRI compatible
containerdSocket = "//./pipe/containerd-containerd" // Proposed containerd named pipe for Windows
)
// isExistingSocket checks if path exists and is domain socket
func isExistingSocket(path string) bool {
_, err := winio.DialPipe(path, nil)
if err != nil {
return false
}
return true
}

View File

@ -135,6 +135,7 @@ go_library(
"//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//staging/src/k8s.io/client-go/util/cert:go_default_library", "//staging/src/k8s.io/client-go/util/cert:go_default_library",
"//staging/src/k8s.io/client-go/util/certificate:go_default_library", "//staging/src/k8s.io/client-go/util/certificate:go_default_library",
"//staging/src/k8s.io/client-go/util/connrotation:go_default_library",
"//staging/src/k8s.io/client-go/util/keyutil:go_default_library", "//staging/src/k8s.io/client-go/util/keyutil:go_default_library",
"//staging/src/k8s.io/cloud-provider:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library",
"//staging/src/k8s.io/component-base/cli/flag:go_default_library", "//staging/src/k8s.io/component-base/cli/flag:go_default_library",

View File

@ -39,7 +39,6 @@ import (
kubeletscheme "k8s.io/kubernetes/pkg/kubelet/apis/config/scheme" kubeletscheme "k8s.io/kubernetes/pkg/kubelet/apis/config/scheme"
kubeletconfigvalidation "k8s.io/kubernetes/pkg/kubelet/apis/config/validation" kubeletconfigvalidation "k8s.io/kubernetes/pkg/kubelet/apis/config/validation"
"k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/config"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/master/ports"
utilflag "k8s.io/kubernetes/pkg/util/flag" utilflag "k8s.io/kubernetes/pkg/util/flag"
utiltaints "k8s.io/kubernetes/pkg/util/taints" utiltaints "k8s.io/kubernetes/pkg/util/taints"
@ -195,19 +194,6 @@ type KubeletFlags struct {
// This flag, if set, instructs the kubelet to keep volumes from terminated pods mounted to the node. // This flag, if set, instructs the kubelet to keep volumes from terminated pods mounted to the node.
// This can be useful for debugging volume related issues. // This can be useful for debugging volume related issues.
KeepTerminatedPodVolumes bool KeepTerminatedPodVolumes bool
// allowPrivileged enables containers to request privileged mode.
// Defaults to true.
AllowPrivileged bool
// hostNetworkSources is a comma-separated list of sources from which the
// Kubelet allows pods to use of host network. Defaults to "*". Valid
// options are "file", "http", "api", and "*" (all sources).
HostNetworkSources []string
// hostPIDSources is a comma-separated list of sources from which the
// Kubelet allows pods to use the host pid namespace. Defaults to "*".
HostPIDSources []string
// hostIPCSources is a comma-separated list of sources from which the
// Kubelet allows pods to use the host ipc namespace. Defaults to "*".
HostIPCSources []string
} }
// NewKubeletFlags will create a new KubeletFlags with default values // NewKubeletFlags will create a new KubeletFlags with default values
@ -236,11 +222,6 @@ func NewKubeletFlags() *KubeletFlags {
VolumePluginDir: "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/", VolumePluginDir: "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/",
RegisterNode: true, RegisterNode: true,
SeccompProfileRoot: filepath.Join(defaultRootDir, "seccomp"), SeccompProfileRoot: filepath.Join(defaultRootDir, "seccomp"),
HostNetworkSources: []string{kubetypes.AllSource},
HostPIDSources: []string{kubetypes.AllSource},
HostIPCSources: []string{kubetypes.AllSource},
// TODO(#58010:v1.13.0): Remove --allow-privileged, it is deprecated
AllowPrivileged: true,
// prior to the introduction of this flag, there was a hardcoded cap of 50 images // prior to the introduction of this flag, there was a hardcoded cap of 50 images
NodeStatusMaxImages: 50, NodeStatusMaxImages: 50,
} }
@ -443,18 +424,6 @@ func (f *KubeletFlags) AddFlags(mainfs *pflag.FlagSet) {
fs.MarkDeprecated("non-masquerade-cidr", "will be removed in a future version") fs.MarkDeprecated("non-masquerade-cidr", "will be removed in a future version")
fs.BoolVar(&f.KeepTerminatedPodVolumes, "keep-terminated-pod-volumes", f.KeepTerminatedPodVolumes, "Keep terminated pod volumes mounted to the node after the pod terminates. Can be useful for debugging volume related issues.") fs.BoolVar(&f.KeepTerminatedPodVolumes, "keep-terminated-pod-volumes", f.KeepTerminatedPodVolumes, "Keep terminated pod volumes mounted to the node after the pod terminates. Can be useful for debugging volume related issues.")
fs.MarkDeprecated("keep-terminated-pod-volumes", "will be removed in a future version") fs.MarkDeprecated("keep-terminated-pod-volumes", "will be removed in a future version")
// TODO(#58010:v1.13.0): Remove --allow-privileged, it is deprecated
fs.BoolVar(&f.AllowPrivileged, "allow-privileged", f.AllowPrivileged, "If true, allow containers to request privileged mode. Default: true")
fs.MarkDeprecated("allow-privileged", "will be removed in a future version")
// TODO(#58010:v1.12.0): Remove --host-network-sources, it is deprecated
fs.StringSliceVar(&f.HostNetworkSources, "host-network-sources", f.HostNetworkSources, "Comma-separated list of sources from which the Kubelet allows pods to use of host network.")
fs.MarkDeprecated("host-network-sources", "will be removed in a future version")
// TODO(#58010:v1.12.0): Remove --host-pid-sources, it is deprecated
fs.StringSliceVar(&f.HostPIDSources, "host-pid-sources", f.HostPIDSources, "Comma-separated list of sources from which the Kubelet allows pods to use the host pid namespace.")
fs.MarkDeprecated("host-pid-sources", "will be removed in a future version")
// TODO(#58010:v1.12.0): Remove --host-ipc-sources, it is deprecated
fs.StringSliceVar(&f.HostIPCSources, "host-ipc-sources", f.HostIPCSources, "Comma-separated list of sources from which the Kubelet allows pods to use the host ipc namespace.")
fs.MarkDeprecated("host-ipc-sources", "will be removed in a future version")
} }

View File

@ -57,6 +57,7 @@ import (
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
certutil "k8s.io/client-go/util/cert" certutil "k8s.io/client-go/util/cert"
"k8s.io/client-go/util/certificate" "k8s.io/client-go/util/certificate"
"k8s.io/client-go/util/connrotation"
"k8s.io/client-go/util/keyutil" "k8s.io/client-go/util/keyutil"
cloudprovider "k8s.io/cloud-provider" cloudprovider "k8s.io/cloud-provider"
cliflag "k8s.io/component-base/cli/flag" cliflag "k8s.io/component-base/cli/flag"
@ -567,6 +568,9 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan
if err != nil { if err != nil {
return err return err
} }
if closeAllConns == nil {
return errors.New("closeAllConns must be a valid function other than nil")
}
kubeDeps.OnHeartbeatFailure = closeAllConns kubeDeps.OnHeartbeatFailure = closeAllConns
kubeDeps.KubeClient, err = clientset.NewForConfig(clientConfig) kubeDeps.KubeClient, err = clientset.NewForConfig(clientConfig)
@ -806,8 +810,21 @@ func buildKubeletClientConfig(s *options.KubeletServer, nodeName types.NodeName)
} }
kubeClientConfigOverrides(s, clientConfig) kubeClientConfigOverrides(s, clientConfig)
closeAllConns, err := updateDialer(clientConfig)
if err != nil {
return nil, nil, err
}
return clientConfig, closeAllConns, nil
}
return clientConfig, nil, nil // updateDialer instruments a restconfig with a dial. the returned function allows forcefully closing all active connections.
func updateDialer(clientConfig *restclient.Config) (func(), error) {
if clientConfig.Transport != nil || clientConfig.Dial != nil {
return nil, fmt.Errorf("there is already a transport or dialer configured")
}
d := connrotation.NewDialer((&net.Dialer{Timeout: 30 * time.Second, KeepAlive: 30 * time.Second}).DialContext)
clientConfig.Dial = d.DialContext
return d.CloseAll, nil
} }
// buildClientCertificateManager creates a certificate manager that will use certConfig to request a client certificate // buildClientCertificateManager creates a certificate manager that will use certConfig to request a client certificate
@ -974,32 +991,9 @@ func RunKubelet(kubeServer *options.KubeletServer, kubeDeps *kubelet.Dependencie
// Setup event recorder if required. // Setup event recorder if required.
makeEventRecorder(kubeDeps, nodeName) makeEventRecorder(kubeDeps, nodeName)
// TODO(mtaufen): I moved the validation of these fields here, from UnsecuredKubeletConfig, capabilities.Initialize(capabilities.Capabilities{
// so that I could remove the associated fields from KubeletConfiginternal. I would AllowPrivileged: true,
// prefer this to be done as part of an independent validation step on the })
// KubeletConfiguration. But as far as I can tell, we don't have an explicit
// place for validation of the KubeletConfiguration yet.
hostNetworkSources, err := kubetypes.GetValidatedSources(kubeServer.HostNetworkSources)
if err != nil {
return err
}
hostPIDSources, err := kubetypes.GetValidatedSources(kubeServer.HostPIDSources)
if err != nil {
return err
}
hostIPCSources, err := kubetypes.GetValidatedSources(kubeServer.HostIPCSources)
if err != nil {
return err
}
privilegedSources := capabilities.PrivilegedSources{
HostNetworkSources: hostNetworkSources,
HostPIDSources: hostPIDSources,
HostIPCSources: hostIPCSources,
}
capabilities.Setup(kubeServer.AllowPrivileged, privilegedSources, 0)
credentialprovider.SetPreferredDockercfgPath(kubeServer.RootDirectory) credentialprovider.SetPreferredDockercfgPath(kubeServer.RootDirectory)
klog.V(2).Infof("Using root directory: %v", kubeServer.RootDirectory) klog.V(2).Infof("Using root directory: %v", kubeServer.RootDirectory)

4
go.mod
View File

@ -178,7 +178,7 @@ require (
k8s.io/csi-translation-lib v0.0.0 k8s.io/csi-translation-lib v0.0.0
k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af
k8s.io/heapster v1.2.0-beta.1 k8s.io/heapster v1.2.0-beta.1
k8s.io/klog v0.3.0 k8s.io/klog v0.3.1
k8s.io/kube-aggregator v0.0.0 k8s.io/kube-aggregator v0.0.0
k8s.io/kube-controller-manager v0.0.0 k8s.io/kube-controller-manager v0.0.0
k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30 k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30
@ -442,7 +442,7 @@ replace (
k8s.io/csi-translation-lib => ./staging/src/k8s.io/csi-translation-lib k8s.io/csi-translation-lib => ./staging/src/k8s.io/csi-translation-lib
k8s.io/gengo => k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af k8s.io/gengo => k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af
k8s.io/heapster => k8s.io/heapster v1.2.0-beta.1 k8s.io/heapster => k8s.io/heapster v1.2.0-beta.1
k8s.io/klog => k8s.io/klog v0.3.0 k8s.io/klog => k8s.io/klog v0.3.1
k8s.io/kube-aggregator => ./staging/src/k8s.io/kube-aggregator k8s.io/kube-aggregator => ./staging/src/k8s.io/kube-aggregator
k8s.io/kube-controller-manager => ./staging/src/k8s.io/kube-controller-manager k8s.io/kube-controller-manager => ./staging/src/k8s.io/kube-controller-manager
k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30 k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30

4
go.sum
View File

@ -462,8 +462,8 @@ k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af h1:SwjZbO0u5ZuaV6TRMWOGB40iaycX8
k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/heapster v1.2.0-beta.1 h1:lUsE/AHOMHpi3MLlBEkaU8Esxm5QhdyCrv1o7ot0s84= k8s.io/heapster v1.2.0-beta.1 h1:lUsE/AHOMHpi3MLlBEkaU8Esxm5QhdyCrv1o7ot0s84=
k8s.io/heapster v1.2.0-beta.1/go.mod h1:h1uhptVXMwC8xtZBYsPXKVi8fpdlYkTs6k949KozGrM= k8s.io/heapster v1.2.0-beta.1/go.mod h1:h1uhptVXMwC8xtZBYsPXKVi8fpdlYkTs6k949KozGrM=
k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE= k8s.io/klog v0.3.1 h1:RVgyDHY/kFKtLqh67NvEWIgkMneNoIrdkN0CxDSQc68=
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30 h1:TRb4wNWoBVrH9plmkp2q86FIDppkbrEXdXlxU3a3BMI= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30 h1:TRb4wNWoBVrH9plmkp2q86FIDppkbrEXdXlxU3a3BMI=
k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
k8s.io/repo-infra v0.0.0-20181204233714-00fe14e3d1a3 h1:WD6cPA3q7qxZe6Fwir0XjjGwGMaWbHlHUcjCcOzuRG0= k8s.io/repo-infra v0.0.0-20181204233714-00fe14e3d1a3 h1:WD6cPA3q7qxZe6Fwir0XjjGwGMaWbHlHUcjCcOzuRG0=

View File

@ -537,7 +537,6 @@ staging/src/k8s.io/client-go/scale/scheme/autoscalingv1
staging/src/k8s.io/client-go/scale/scheme/extensionsv1beta1 staging/src/k8s.io/client-go/scale/scheme/extensionsv1beta1
staging/src/k8s.io/client-go/scale/scheme/extensionsv1beta1 staging/src/k8s.io/client-go/scale/scheme/extensionsv1beta1
staging/src/k8s.io/client-go/testing staging/src/k8s.io/client-go/testing
staging/src/k8s.io/client-go/tools/auth
staging/src/k8s.io/client-go/tools/cache staging/src/k8s.io/client-go/tools/cache
staging/src/k8s.io/client-go/tools/cache/testing staging/src/k8s.io/client-go/tools/cache/testing
staging/src/k8s.io/client-go/tools/clientcmd staging/src/k8s.io/client-go/tools/clientcmd
@ -546,7 +545,6 @@ staging/src/k8s.io/client-go/tools/clientcmd/api/latest
staging/src/k8s.io/client-go/tools/clientcmd/api/v1 staging/src/k8s.io/client-go/tools/clientcmd/api/v1
staging/src/k8s.io/client-go/tools/leaderelection staging/src/k8s.io/client-go/tools/leaderelection
staging/src/k8s.io/client-go/tools/leaderelection/resourcelock staging/src/k8s.io/client-go/tools/leaderelection/resourcelock
staging/src/k8s.io/client-go/tools/portforward
staging/src/k8s.io/client-go/tools/record staging/src/k8s.io/client-go/tools/record
staging/src/k8s.io/client-go/tools/reference staging/src/k8s.io/client-go/tools/reference
staging/src/k8s.io/client-go/transport staging/src/k8s.io/client-go/transport
@ -590,12 +588,10 @@ staging/src/k8s.io/sample-apiserver/pkg/apis/wardle
staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1 staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1
staging/src/k8s.io/sample-apiserver/pkg/registry/wardle/fischer staging/src/k8s.io/sample-apiserver/pkg/registry/wardle/fischer
staging/src/k8s.io/sample-apiserver/pkg/registry/wardle/flunder staging/src/k8s.io/sample-apiserver/pkg/registry/wardle/flunder
test/e2e/autoscaling
test/e2e/chaosmonkey test/e2e/chaosmonkey
test/e2e/common test/e2e/common
test/e2e/lifecycle/bootstrap test/e2e/lifecycle/bootstrap
test/e2e/scalability test/e2e/scalability
test/e2e/storage/drivers
test/e2e/storage/testsuites test/e2e/storage/testsuites
test/e2e/storage/vsphere test/e2e/storage/vsphere
test/e2e_kubeadm test/e2e_kubeadm

View File

@ -65,6 +65,7 @@ ENABLE_CLUSTER_DNS=${KUBE_ENABLE_CLUSTER_DNS:-true}
ENABLE_NODELOCAL_DNS=${KUBE_ENABLE_NODELOCAL_DNS:-false} ENABLE_NODELOCAL_DNS=${KUBE_ENABLE_NODELOCAL_DNS:-false}
DNS_SERVER_IP=${KUBE_DNS_SERVER_IP:-10.0.0.10} DNS_SERVER_IP=${KUBE_DNS_SERVER_IP:-10.0.0.10}
LOCAL_DNS_IP=${KUBE_LOCAL_DNS_IP:-169.254.20.10} LOCAL_DNS_IP=${KUBE_LOCAL_DNS_IP:-169.254.20.10}
DNS_MEMORY_LIMIT=${KUBE_DNS_MEMORY_LIMIT:-170Mi}
DNS_DOMAIN=${KUBE_DNS_NAME:-"cluster.local"} DNS_DOMAIN=${KUBE_DNS_NAME:-"cluster.local"}
KUBECTL=${KUBECTL:-"${KUBE_ROOT}/cluster/kubectl.sh"} KUBECTL=${KUBECTL:-"${KUBE_ROOT}/cluster/kubectl.sh"}
WAIT_FOR_URL_API_SERVER=${WAIT_FOR_URL_API_SERVER:-60} WAIT_FOR_URL_API_SERVER=${WAIT_FOR_URL_API_SERVER:-60}
@ -686,11 +687,6 @@ function start_kubelet {
KUBELET_LOG=${LOG_DIR}/kubelet.log KUBELET_LOG=${LOG_DIR}/kubelet.log
mkdir -p "${POD_MANIFEST_PATH}" &>/dev/null || sudo mkdir -p "${POD_MANIFEST_PATH}" mkdir -p "${POD_MANIFEST_PATH}" &>/dev/null || sudo mkdir -p "${POD_MANIFEST_PATH}"
priv_arg=""
if [[ -n "${ALLOW_PRIVILEGED}" ]]; then
priv_arg="--allow-privileged=${ALLOW_PRIVILEGED}"
fi
cloud_config_arg=("--cloud-provider=${CLOUD_PROVIDER}" "--cloud-config=${CLOUD_CONFIG}") cloud_config_arg=("--cloud-provider=${CLOUD_PROVIDER}" "--cloud-config=${CLOUD_CONFIG}")
if [[ "${EXTERNAL_CLOUD_PROVIDER:-}" == "true" ]]; then if [[ "${EXTERNAL_CLOUD_PROVIDER:-}" == "true" ]]; then
cloud_config_arg=("--cloud-provider=external") cloud_config_arg=("--cloud-provider=external")
@ -751,7 +747,6 @@ function start_kubelet {
# shellcheck disable=SC2206 # shellcheck disable=SC2206
all_kubelet_flags=( all_kubelet_flags=(
${priv_arg:+"$priv_arg"}
"--v=${LOG_LEVEL}" "--v=${LOG_LEVEL}"
"--vmodule=${LOG_SPEC}" "--vmodule=${LOG_SPEC}"
"--chaos-chance=${CHAOS_CHANCE}" "--chaos-chance=${CHAOS_CHANCE}"
@ -849,6 +844,7 @@ function start_kubedns {
cp "${KUBE_ROOT}/cluster/addons/dns/kube-dns/kube-dns.yaml.in" kube-dns.yaml cp "${KUBE_ROOT}/cluster/addons/dns/kube-dns/kube-dns.yaml.in" kube-dns.yaml
${SED} -i -e "s/{{ pillar\['dns_domain'\] }}/${DNS_DOMAIN}/g" kube-dns.yaml ${SED} -i -e "s/{{ pillar\['dns_domain'\] }}/${DNS_DOMAIN}/g" kube-dns.yaml
${SED} -i -e "s/{{ pillar\['dns_server'\] }}/${DNS_SERVER_IP}/g" kube-dns.yaml ${SED} -i -e "s/{{ pillar\['dns_server'\] }}/${DNS_SERVER_IP}/g" kube-dns.yaml
${SED} -i -e "s/{{ pillar\['dns_memory_limit'\] }}/${DNS_MEMORY_LIMIT}/g" kube-dns.yaml
# TODO update to dns role once we have one. # TODO update to dns role once we have one.
# use kubectl to create kubedns addon # use kubectl to create kubedns addon
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" --namespace=kube-system create -f kube-dns.yaml ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" --namespace=kube-system create -f kube-dns.yaml

View File

@ -29,8 +29,6 @@ source "${KUBE_ROOT}/hack/lib/init.sh"
# Explicitly opt into go modules, even though we're inside a GOPATH directory # Explicitly opt into go modules, even though we're inside a GOPATH directory
export GO111MODULE=on export GO111MODULE=on
# Explicitly clear GOPATH, to ensure nothing this script calls makes use of that path info
export GOPATH=
# Explicitly clear GOFLAGS, since GOFLAGS=-mod=vendor breaks dependency resolution while rebuilding vendor # Explicitly clear GOFLAGS, since GOFLAGS=-mod=vendor breaks dependency resolution while rebuilding vendor
export GOFLAGS= export GOFLAGS=
# Detect problematic GOPROXY settings that prevent lookup of dependencies # Detect problematic GOPROXY settings that prevent lookup of dependencies

View File

@ -23,21 +23,36 @@ source "${KUBE_ROOT}/hack/lib/init.sh"
kube::golang::setup_env kube::golang::setup_env
make -C "${KUBE_ROOT}" WHAT=cmd/hyperkube kube::util::ensure-temp-dir
OUTPUT="${KUBE_TEMP}"/symbols-output
cleanup() {
rm -rf "${OUTPUT}"
}
trap "cleanup" EXIT SIGINT
mkdir -p "${OUTPUT}"
GOLDFLAGS="-w" make -C "${KUBE_ROOT}" WHAT=cmd/hyperkube
# Add other BADSYMBOLS here. # Add other BADSYMBOLS here.
BADSYMBOLS=( BADSYMBOLS=(
"httptest" "httptest"
"testify" "testify"
"testing[.]" "testing[.]"
"TestOnlySetFatalOnDecodeError"
"TrackStorageCleanup"
) )
# b/c hyperkube binds everything simply check that for bad symbols # b/c hyperkube binds everything simply check that for bad symbols
SYMBOLS="$(nm "${KUBE_OUTPUT_HOSTBIN}/hyperkube")" go tool nm "${KUBE_OUTPUT_HOSTBIN}/hyperkube" > "${OUTPUT}/hyperkube-symbols"
if ! grep -q "NewHyperKubeCommand" "${OUTPUT}/hyperkube-symbols"; then
echo "No symbols found in hyperkube binary."
exit 1
fi
RESULT=0 RESULT=0
for BADSYMBOL in "${BADSYMBOLS[@]}"; do for BADSYMBOL in "${BADSYMBOLS[@]}"; do
if FOUND=$(echo "$SYMBOLS" | grep "$BADSYMBOL"); then if FOUND=$(grep "${BADSYMBOL}" < "${OUTPUT}/hyperkube-symbols"); then
echo "Found bad symbol '${BADSYMBOL}':" echo "Found bad symbol '${BADSYMBOL}':"
echo "$FOUND" echo "$FOUND"
RESULT=1 RESULT=1

View File

@ -368,6 +368,8 @@ func dropDisabledFields(
dropDisabledRunAsGroupField(podSpec, oldPodSpec) dropDisabledRunAsGroupField(podSpec, oldPodSpec)
dropDisabledGMSAFields(podSpec, oldPodSpec)
if !utilfeature.DefaultFeatureGate.Enabled(features.RuntimeClass) && !runtimeClassInUse(oldPodSpec) { if !utilfeature.DefaultFeatureGate.Enabled(features.RuntimeClass) && !runtimeClassInUse(oldPodSpec) {
// Set RuntimeClassName to nil only if feature is disabled and it is not used // Set RuntimeClassName to nil only if feature is disabled and it is not used
podSpec.RuntimeClassName = nil podSpec.RuntimeClassName = nil
@ -399,6 +401,39 @@ func dropDisabledRunAsGroupField(podSpec, oldPodSpec *api.PodSpec) {
} }
} }
// dropDisabledGMSAFields removes disabled fields related to Windows GMSA
// from the given PodSpec.
func dropDisabledGMSAFields(podSpec, oldPodSpec *api.PodSpec) {
if utilfeature.DefaultFeatureGate.Enabled(features.WindowsGMSA) ||
gMSAFieldsInUse(oldPodSpec) {
return
}
if podSpec.SecurityContext != nil {
dropDisabledGMSAFieldsFromWindowsSecurityOptions(podSpec.SecurityContext.WindowsOptions)
}
dropDisabledGMSAFieldsFromContainers(podSpec.Containers)
dropDisabledGMSAFieldsFromContainers(podSpec.InitContainers)
}
// dropDisabledGMSAFieldsFromWindowsSecurityOptions removes disabled fields
// related to Windows GMSA from the given WindowsSecurityContextOptions.
func dropDisabledGMSAFieldsFromWindowsSecurityOptions(windowsOptions *api.WindowsSecurityContextOptions) {
if windowsOptions != nil {
windowsOptions.GMSACredentialSpecName = nil
windowsOptions.GMSACredentialSpec = nil
}
}
// dropDisabledGMSAFieldsFromContainers removes disabled fields
func dropDisabledGMSAFieldsFromContainers(containers []api.Container) {
for i := range containers {
if containers[i].SecurityContext != nil {
dropDisabledGMSAFieldsFromWindowsSecurityOptions(containers[i].SecurityContext.WindowsOptions)
}
}
}
// dropDisabledProcMountField removes disabled fields from PodSpec related // dropDisabledProcMountField removes disabled fields from PodSpec related
// to ProcMount only if it is not already used by the old spec // to ProcMount only if it is not already used by the old spec
func dropDisabledProcMountField(podSpec, oldPodSpec *api.PodSpec) { func dropDisabledProcMountField(podSpec, oldPodSpec *api.PodSpec) {
@ -612,6 +647,44 @@ func runAsGroupInUse(podSpec *api.PodSpec) bool {
return false return false
} }
// gMSAFieldsInUse returns true if the pod spec is non-nil and has one of any
// SecurityContext's GMSACredentialSpecName or GMSACredentialSpec fields set.
func gMSAFieldsInUse(podSpec *api.PodSpec) bool {
if podSpec == nil {
return false
}
if podSpec.SecurityContext != nil && gMSAFieldsInUseInWindowsSecurityOptions(podSpec.SecurityContext.WindowsOptions) {
return true
}
return gMSAFieldsInUseInAnyContainer(podSpec.Containers) ||
gMSAFieldsInUseInAnyContainer(podSpec.InitContainers)
}
// gMSAFieldsInUseInWindowsSecurityOptions returns true if the given WindowsSecurityContextOptions is
// non-nil and one of its GMSACredentialSpecName or GMSACredentialSpec fields is set.
func gMSAFieldsInUseInWindowsSecurityOptions(windowsOptions *api.WindowsSecurityContextOptions) bool {
if windowsOptions == nil {
return false
}
return windowsOptions.GMSACredentialSpecName != nil ||
windowsOptions.GMSACredentialSpec != nil
}
// gMSAFieldsInUseInAnyContainer returns true if any of the given Containers has its
// SecurityContext's GMSACredentialSpecName or GMSACredentialSpec fields set.
func gMSAFieldsInUseInAnyContainer(containers []api.Container) bool {
for _, container := range containers {
if container.SecurityContext != nil && gMSAFieldsInUseInWindowsSecurityOptions(container.SecurityContext.WindowsOptions) {
return true
}
}
return false
}
// subpathExprInUse returns true if the pod spec is non-nil and has a volume mount that makes use of the subPathExpr feature // subpathExprInUse returns true if the pod spec is non-nil and has a volume mount that makes use of the subPathExpr feature
func subpathExprInUse(podSpec *api.PodSpec) bool { func subpathExprInUse(podSpec *api.PodSpec) bool {
if podSpec == nil { if podSpec == nil {

View File

@ -1359,6 +1359,208 @@ func TestDropRunAsGroup(t *testing.T) {
} }
} }
func TestDropGMSAFields(t *testing.T) {
defaultContainerSecurityContextFactory := func() *api.SecurityContext {
defaultProcMount := api.DefaultProcMount
return &api.SecurityContext{ProcMount: &defaultProcMount}
}
podWithoutWindowsOptionsFactory := func() *api.Pod {
return &api.Pod{
Spec: api.PodSpec{
RestartPolicy: api.RestartPolicyNever,
SecurityContext: &api.PodSecurityContext{},
Containers: []api.Container{{Name: "container1", Image: "testimage", SecurityContext: defaultContainerSecurityContextFactory()}},
InitContainers: []api.Container{{Name: "initContainer1", Image: "testimage", SecurityContext: defaultContainerSecurityContextFactory()}},
},
}
}
type podFactoryInfo struct {
description string
hasGMSAField bool
// this factory should generate the input pod whose spec will be fed to dropDisabledFields
podFactory func() *api.Pod
// this factory should generate the expected pod after the GMSA fields have been dropped
// we can't just use podWithoutWindowsOptionsFactory as is for this, since in some cases
// we'll be left with a WindowsSecurityContextOptions struct with no GMSA field set, as opposed
// to a nil pointer in the pod generated by podWithoutWindowsOptionsFactory
// if this field is not set, it will default to the podFactory
strippedPodFactory func() *api.Pod
}
podFactoryInfos := []podFactoryInfo{
{
description: "does not have any GMSA field set",
hasGMSAField: false,
podFactory: podWithoutWindowsOptionsFactory,
},
{
description: "has a pod-level WindowsSecurityContextOptions struct with no GMSA field set",
hasGMSAField: false,
podFactory: func() *api.Pod {
pod := podWithoutWindowsOptionsFactory()
pod.Spec.SecurityContext.WindowsOptions = &api.WindowsSecurityContextOptions{}
return pod
},
},
{
description: "has a WindowsSecurityContextOptions struct with no GMSA field set on a container",
hasGMSAField: false,
podFactory: func() *api.Pod {
pod := podWithoutWindowsOptionsFactory()
pod.Spec.Containers[0].SecurityContext.WindowsOptions = &api.WindowsSecurityContextOptions{}
return pod
},
},
{
description: "has a WindowsSecurityContextOptions struct with no GMSA field set on an init container",
hasGMSAField: false,
podFactory: func() *api.Pod {
pod := podWithoutWindowsOptionsFactory()
pod.Spec.InitContainers[0].SecurityContext.WindowsOptions = &api.WindowsSecurityContextOptions{}
return pod
},
},
{
description: "is nil",
hasGMSAField: false,
podFactory: func() *api.Pod { return nil },
},
}
toPtr := func(s string) *string {
return &s
}
addGMSACredentialSpecName := func(windowsOptions *api.WindowsSecurityContextOptions) {
windowsOptions.GMSACredentialSpecName = toPtr("dummy-gmsa-cred-spec-name")
}
addGMSACredentialSpec := func(windowsOptions *api.WindowsSecurityContextOptions) {
windowsOptions.GMSACredentialSpec = toPtr("dummy-gmsa-cred-spec-contents")
}
addBothGMSAFields := func(windowsOptions *api.WindowsSecurityContextOptions) {
addGMSACredentialSpecName(windowsOptions)
addGMSACredentialSpec(windowsOptions)
}
for fieldName, windowsOptionsTransformingFunc := range map[string]func(*api.WindowsSecurityContextOptions){
"GMSACredentialSpecName field": addGMSACredentialSpecName,
"GMSACredentialSpec field": addGMSACredentialSpec,
"both GMSA fields": addBothGMSAFields,
} {
// yes, these variables are indeed needed for the closure to work
// properly, please do NOT remove them
name := fieldName
transformingFunc := windowsOptionsTransformingFunc
windowsOptionsWithGMSAFieldFactory := func() *api.WindowsSecurityContextOptions {
windowsOptions := &api.WindowsSecurityContextOptions{}
transformingFunc(windowsOptions)
return windowsOptions
}
podFactoryInfos = append(podFactoryInfos,
podFactoryInfo{
description: fmt.Sprintf("has %s in Pod", name),
hasGMSAField: true,
podFactory: func() *api.Pod {
pod := podWithoutWindowsOptionsFactory()
pod.Spec.SecurityContext.WindowsOptions = windowsOptionsWithGMSAFieldFactory()
return pod
},
strippedPodFactory: func() *api.Pod {
pod := podWithoutWindowsOptionsFactory()
pod.Spec.SecurityContext.WindowsOptions = &api.WindowsSecurityContextOptions{}
return pod
},
},
podFactoryInfo{
description: fmt.Sprintf("has %s in Container", name),
hasGMSAField: true,
podFactory: func() *api.Pod {
pod := podWithoutWindowsOptionsFactory()
pod.Spec.Containers[0].SecurityContext.WindowsOptions = windowsOptionsWithGMSAFieldFactory()
return pod
},
strippedPodFactory: func() *api.Pod {
pod := podWithoutWindowsOptionsFactory()
pod.Spec.Containers[0].SecurityContext.WindowsOptions = &api.WindowsSecurityContextOptions{}
return pod
},
},
podFactoryInfo{
description: fmt.Sprintf("has %s in InitContainer", name),
hasGMSAField: true,
podFactory: func() *api.Pod {
pod := podWithoutWindowsOptionsFactory()
pod.Spec.InitContainers[0].SecurityContext.WindowsOptions = windowsOptionsWithGMSAFieldFactory()
return pod
},
strippedPodFactory: func() *api.Pod {
pod := podWithoutWindowsOptionsFactory()
pod.Spec.InitContainers[0].SecurityContext.WindowsOptions = &api.WindowsSecurityContextOptions{}
return pod
},
})
}
for _, enabled := range []bool{true, false} {
for _, oldPodFactoryInfo := range podFactoryInfos {
for _, newPodFactoryInfo := range podFactoryInfos {
newPodHasGMSAField, newPod := newPodFactoryInfo.hasGMSAField, newPodFactoryInfo.podFactory()
if newPod == nil {
continue
}
oldPodHasGMSAField, oldPod := oldPodFactoryInfo.hasGMSAField, oldPodFactoryInfo.podFactory()
t.Run(fmt.Sprintf("feature enabled=%v, old pod %s, new pod %s", enabled, oldPodFactoryInfo.description, newPodFactoryInfo.description), func(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.WindowsGMSA, enabled)()
var oldPodSpec *api.PodSpec
if oldPod != nil {
oldPodSpec = &oldPod.Spec
}
dropDisabledFields(&newPod.Spec, nil, oldPodSpec, nil)
// old pod should never be changed
if !reflect.DeepEqual(oldPod, oldPodFactoryInfo.podFactory()) {
t.Errorf("old pod changed: %v", diff.ObjectReflectDiff(oldPod, oldPodFactoryInfo.podFactory()))
}
switch {
case enabled || oldPodHasGMSAField:
// new pod should not be changed if the feature is enabled, or if the old pod had any GMSA field set
if !reflect.DeepEqual(newPod, newPodFactoryInfo.podFactory()) {
t.Errorf("new pod changed: %v", diff.ObjectReflectDiff(newPod, newPodFactoryInfo.podFactory()))
}
case newPodHasGMSAField:
// new pod should be changed
if reflect.DeepEqual(newPod, newPodFactoryInfo.podFactory()) {
t.Errorf("%v", oldPod)
t.Errorf("%v", newPod)
t.Errorf("new pod was not changed")
}
// new pod should not have any GMSA field set
var expectedStrippedPod *api.Pod
if newPodFactoryInfo.strippedPodFactory == nil {
expectedStrippedPod = newPodFactoryInfo.podFactory()
} else {
expectedStrippedPod = newPodFactoryInfo.strippedPodFactory()
}
if !reflect.DeepEqual(newPod, expectedStrippedPod) {
t.Errorf("new pod had some GMSA field set: %v", diff.ObjectReflectDiff(newPod, expectedStrippedPod))
}
default:
// new pod should not need to be changed
if !reflect.DeepEqual(newPod, newPodFactoryInfo.podFactory()) {
t.Errorf("new pod changed: %v", diff.ObjectReflectDiff(newPod, newPodFactoryInfo.podFactory()))
}
}
})
}
}
}
}
func TestDropPodSysctls(t *testing.T) { func TestDropPodSysctls(t *testing.T) {
podWithSysctls := func() *api.Pod { podWithSysctls := func() *api.Pod {
return &api.Pod{ return &api.Pod{

View File

@ -4739,7 +4739,17 @@ type SELinuxOptions struct {
// WindowsSecurityContextOptions contain Windows-specific options and credentials. // WindowsSecurityContextOptions contain Windows-specific options and credentials.
type WindowsSecurityContextOptions struct { type WindowsSecurityContextOptions struct {
// intentionally left empty for now // GMSACredentialSpecName is the name of the GMSA credential spec to use.
// This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.
// +optional
GMSACredentialSpecName *string
// GMSACredentialSpec is where the GMSA admission webhook
// (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
// GMSA credential spec named by the GMSACredentialSpecName field.
// This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.
// +optional
GMSACredentialSpec *string
} }
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object

View File

@ -756,7 +756,7 @@ func TestMatchNodeSelectorTerms(t *testing.T) {
// TestMatchNodeSelectorTermsStateless ensures MatchNodeSelectorTerms() // TestMatchNodeSelectorTermsStateless ensures MatchNodeSelectorTerms()
// is invoked in a "stateless" manner, i.e. nodeSelectorTerms should NOT // is invoked in a "stateless" manner, i.e. nodeSelectorTerms should NOT
// be deeply modifed after invoking // be deeply modified after invoking
func TestMatchNodeSelectorTermsStateless(t *testing.T) { func TestMatchNodeSelectorTermsStateless(t *testing.T) {
type args struct { type args struct {
nodeSelectorTerms []v1.NodeSelectorTerm nodeSelectorTerms []v1.NodeSelectorTerm

View File

@ -7599,6 +7599,8 @@ func Convert_core_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in *core
} }
func autoConvert_v1_WindowsSecurityContextOptions_To_core_WindowsSecurityContextOptions(in *v1.WindowsSecurityContextOptions, out *core.WindowsSecurityContextOptions, s conversion.Scope) error { func autoConvert_v1_WindowsSecurityContextOptions_To_core_WindowsSecurityContextOptions(in *v1.WindowsSecurityContextOptions, out *core.WindowsSecurityContextOptions, s conversion.Scope) error {
out.GMSACredentialSpecName = (*string)(unsafe.Pointer(in.GMSACredentialSpecName))
out.GMSACredentialSpec = (*string)(unsafe.Pointer(in.GMSACredentialSpec))
return nil return nil
} }
@ -7608,6 +7610,8 @@ func Convert_v1_WindowsSecurityContextOptions_To_core_WindowsSecurityContextOpti
} }
func autoConvert_core_WindowsSecurityContextOptions_To_v1_WindowsSecurityContextOptions(in *core.WindowsSecurityContextOptions, out *v1.WindowsSecurityContextOptions, s conversion.Scope) error { func autoConvert_core_WindowsSecurityContextOptions_To_v1_WindowsSecurityContextOptions(in *core.WindowsSecurityContextOptions, out *v1.WindowsSecurityContextOptions, s conversion.Scope) error {
out.GMSACredentialSpecName = (*string)(unsafe.Pointer(in.GMSACredentialSpecName))
out.GMSACredentialSpec = (*string)(unsafe.Pointer(in.GMSACredentialSpec))
return nil return nil
} }

View File

@ -3446,6 +3446,8 @@ func ValidatePodSecurityContext(securityContext *core.PodSecurityContext, spec *
if len(securityContext.Sysctls) != 0 { if len(securityContext.Sysctls) != 0 {
allErrs = append(allErrs, validateSysctls(securityContext.Sysctls, fldPath.Child("sysctls"))...) allErrs = append(allErrs, validateSysctls(securityContext.Sysctls, fldPath.Child("sysctls"))...)
} }
allErrs = append(allErrs, validateWindowsSecurityContextOptions(securityContext.WindowsOptions, fldPath.Child("windowsOptions"))...)
} }
return allErrs return allErrs
@ -5156,7 +5158,7 @@ func ValidateEndpointsUpdate(newEndpoints, oldEndpoints *core.Endpoints) field.E
return allErrs return allErrs
} }
// ValidateSecurityContext ensure the security context contains valid settings // ValidateSecurityContext ensures the security context contains valid settings
func ValidateSecurityContext(sc *core.SecurityContext, fldPath *field.Path) field.ErrorList { func ValidateSecurityContext(sc *core.SecurityContext, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{} allErrs := field.ErrorList{}
//this should only be true for testing since SecurityContext is defaulted by the core //this should only be true for testing since SecurityContext is defaulted by the core
@ -5202,6 +5204,42 @@ func ValidateSecurityContext(sc *core.SecurityContext, fldPath *field.Path) fiel
} }
} }
allErrs = append(allErrs, validateWindowsSecurityContextOptions(sc.WindowsOptions, fldPath.Child("windowsOptions"))...)
return allErrs
}
// maxGMSACredentialSpecLength is the max length, in bytes, for the actual contents
// of a GMSA cred spec. In general, those shouldn't be more than a few hundred bytes,
// so we want to give plenty of room here while still providing an upper bound.
const (
maxGMSACredentialSpecLengthInKiB = 64
maxGMSACredentialSpecLength = maxGMSACredentialSpecLengthInKiB * 1024
)
func validateWindowsSecurityContextOptions(windowsOptions *core.WindowsSecurityContextOptions, fieldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if windowsOptions == nil {
return allErrs
}
if windowsOptions.GMSACredentialSpecName != nil {
// gmsaCredentialSpecName must be the name of a custom resource
for _, msg := range validation.IsDNS1123Subdomain(*windowsOptions.GMSACredentialSpecName) {
allErrs = append(allErrs, field.Invalid(fieldPath.Child("gmsaCredentialSpecName"), windowsOptions.GMSACredentialSpecName, msg))
}
}
if windowsOptions.GMSACredentialSpec != nil {
if l := len(*windowsOptions.GMSACredentialSpec); l == 0 {
allErrs = append(allErrs, field.Invalid(fieldPath.Child("gmsaCredentialSpec"), windowsOptions.GMSACredentialSpec, "gmsaCredentialSpec cannot be an empty string"))
} else if l > maxGMSACredentialSpecLength {
errMsg := fmt.Sprintf("gmsaCredentialSpec size must be under %d KiB", maxGMSACredentialSpecLengthInKiB)
allErrs = append(allErrs, field.Invalid(fieldPath.Child("gmsaCredentialSpec"), windowsOptions.GMSACredentialSpec, errMsg))
}
}
return allErrs return allErrs
} }

View File

@ -13205,3 +13205,77 @@ func TestValidateOrSetClientIPAffinityConfig(t *testing.T) {
} }
} }
} }
func TestValidateWindowsSecurityContextOptions(t *testing.T) {
toPtr := func(s string) *string {
return &s
}
testCases := []struct {
testName string
windowsOptions *core.WindowsSecurityContextOptions
expectedErrorSubstring string
}{
{
testName: "a nil pointer",
},
{
testName: "an empty struct",
windowsOptions: &core.WindowsSecurityContextOptions{},
},
{
testName: "a valid input",
windowsOptions: &core.WindowsSecurityContextOptions{
GMSACredentialSpecName: toPtr("dummy-gmsa-crep-spec-name"),
GMSACredentialSpec: toPtr("dummy-gmsa-crep-spec-contents"),
},
},
{
testName: "a GMSA cred spec name that is not a valid resource name",
windowsOptions: &core.WindowsSecurityContextOptions{
// invalid because of the underscore
GMSACredentialSpecName: toPtr("not_a-valid-gmsa-crep-spec-name"),
},
expectedErrorSubstring: dnsSubdomainLabelErrMsg,
},
{
testName: "empty GMSA cred spec contents",
windowsOptions: &core.WindowsSecurityContextOptions{
GMSACredentialSpec: toPtr(""),
},
expectedErrorSubstring: "gmsaCredentialSpec cannot be an empty string",
},
{
testName: "GMSA cred spec contents that are too long",
windowsOptions: &core.WindowsSecurityContextOptions{
GMSACredentialSpec: toPtr(strings.Repeat("a", maxGMSACredentialSpecLength+1)),
},
expectedErrorSubstring: "gmsaCredentialSpec size must be under",
},
}
for _, testCase := range testCases {
t.Run("validateWindowsSecurityContextOptions with"+testCase.testName, func(t *testing.T) {
errs := validateWindowsSecurityContextOptions(testCase.windowsOptions, field.NewPath("field"))
switch len(errs) {
case 0:
if testCase.expectedErrorSubstring != "" {
t.Errorf("expected a failure containing the substring: %q", testCase.expectedErrorSubstring)
}
case 1:
if testCase.expectedErrorSubstring == "" {
t.Errorf("didn't expect a failure, got: %q", errs[0].Error())
} else if !strings.Contains(errs[0].Error(), testCase.expectedErrorSubstring) {
t.Errorf("expected a failure with the substring %q, got %q instead", testCase.expectedErrorSubstring, errs[0].Error())
}
default:
t.Errorf("got %d failures", len(errs))
for i, err := range errs {
t.Errorf("error %d: %q", i, err.Error())
}
}
})
}
}

View File

@ -3464,7 +3464,7 @@ func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) {
if in.WindowsOptions != nil { if in.WindowsOptions != nil {
in, out := &in.WindowsOptions, &out.WindowsOptions in, out := &in.WindowsOptions, &out.WindowsOptions
*out = new(WindowsSecurityContextOptions) *out = new(WindowsSecurityContextOptions)
**out = **in (*in).DeepCopyInto(*out)
} }
if in.RunAsUser != nil { if in.RunAsUser != nil {
in, out := &in.RunAsUser, &out.RunAsUser in, out := &in.RunAsUser, &out.RunAsUser
@ -4646,7 +4646,7 @@ func (in *SecurityContext) DeepCopyInto(out *SecurityContext) {
if in.WindowsOptions != nil { if in.WindowsOptions != nil {
in, out := &in.WindowsOptions, &out.WindowsOptions in, out := &in.WindowsOptions, &out.WindowsOptions
*out = new(WindowsSecurityContextOptions) *out = new(WindowsSecurityContextOptions)
**out = **in (*in).DeepCopyInto(*out)
} }
if in.RunAsUser != nil { if in.RunAsUser != nil {
in, out := &in.RunAsUser, &out.RunAsUser in, out := &in.RunAsUser, &out.RunAsUser
@ -5475,6 +5475,16 @@ func (in *WeightedPodAffinityTerm) DeepCopy() *WeightedPodAffinityTerm {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WindowsSecurityContextOptions) DeepCopyInto(out *WindowsSecurityContextOptions) { func (in *WindowsSecurityContextOptions) DeepCopyInto(out *WindowsSecurityContextOptions) {
*out = *in *out = *in
if in.GMSACredentialSpecName != nil {
in, out := &in.GMSACredentialSpecName, &out.GMSACredentialSpecName
*out = new(string)
**out = **in
}
if in.GMSACredentialSpec != nil {
in, out := &in.GMSACredentialSpec, &out.GMSACredentialSpec
*out = new(string)
**out = **in
}
return return
} }

View File

@ -1,10 +1,6 @@
package(default_visibility = ["//visibility:public"]) package(default_visibility = ["//visibility:public"])
load( load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library( go_library(
name = "go_default_library", name = "go_default_library",
@ -15,12 +11,6 @@ go_library(
importpath = "k8s.io/kubernetes/pkg/capabilities", importpath = "k8s.io/kubernetes/pkg/capabilities",
) )
go_test(
name = "go_default_test",
srcs = ["capabilities_test.go"],
embed = [":go_default_library"],
)
filegroup( filegroup(
name = "package-srcs", name = "package-srcs",
srcs = glob(["**"]), srcs = glob(["**"]),
@ -33,3 +23,9 @@ filegroup(
srcs = [":package-srcs"], srcs = [":package-srcs"],
tags = ["automanaged"], tags = ["automanaged"],
) )
go_test(
name = "go_default_test",
srcs = ["capabilities_test.go"],
embed = [":go_default_library"],
)

View File

@ -61,10 +61,9 @@ func Initialize(c Capabilities) {
} }
// Setup the capability set. It wraps Initialize for improving usability. // Setup the capability set. It wraps Initialize for improving usability.
func Setup(allowPrivileged bool, privilegedSources PrivilegedSources, perConnectionBytesPerSec int64) { func Setup(allowPrivileged bool, perConnectionBytesPerSec int64) {
Initialize(Capabilities{ Initialize(Capabilities{
AllowPrivileged: allowPrivileged, AllowPrivileged: allowPrivileged,
PrivilegedSources: privilegedSources,
PerConnectionBandwidthLimitBytesPerSec: perConnectionBytesPerSec, PerConnectionBandwidthLimitBytesPerSec: perConnectionBytesPerSec,
}) })
} }

View File

@ -17,18 +17,10 @@ limitations under the License.
package leaderelectionconfig package leaderelectionconfig
import ( import (
"time"
"github.com/spf13/pflag" "github.com/spf13/pflag"
componentbaseconfig "k8s.io/component-base/config" componentbaseconfig "k8s.io/component-base/config"
) )
const (
// DefaultLeaseDuration defines a default duration of lease.
// TODO: This constant should move to the k8s.io/component-base/config package
DefaultLeaseDuration = 15 * time.Second
)
// BindFlags binds the LeaderElectionConfiguration struct fields to a flagset // BindFlags binds the LeaderElectionConfiguration struct fields to a flagset
func BindFlags(l *componentbaseconfig.LeaderElectionConfiguration, fs *pflag.FlagSet) { func BindFlags(l *componentbaseconfig.LeaderElectionConfiguration, fs *pflag.FlagSet) {
fs.BoolVar(&l.LeaderElect, "leader-elect", l.LeaderElect, ""+ fs.BoolVar(&l.LeaderElect, "leader-elect", l.LeaderElect, ""+

View File

@ -111,12 +111,12 @@ func updateRoutes(network *gophercloud.ServiceClient, router *routers.Router, ne
} }
unwinder := func() { unwinder := func() {
klog.V(4).Info("Reverting routes change to router ", router.ID) klog.V(4).Infof("Reverting routes change to router %v", router.ID)
_, err := routers.Update(network, router.ID, routers.UpdateOpts{ _, err := routers.Update(network, router.ID, routers.UpdateOpts{
Routes: origRoutes, Routes: origRoutes,
}).Extract() }).Extract()
if err != nil { if err != nil {
klog.Warning("Unable to reset routes during error unwind: ", err) klog.Warningf("Unable to reset routes during error unwind: %v", err)
} }
} }
@ -134,12 +134,12 @@ func updateAllowedAddressPairs(network *gophercloud.ServiceClient, port *neutron
} }
unwinder := func() { unwinder := func() {
klog.V(4).Info("Reverting allowed-address-pairs change to port ", port.ID) klog.V(4).Infof("Reverting allowed-address-pairs change to port %v", port.ID)
_, err := neutronports.Update(network, port.ID, neutronports.UpdateOpts{ _, err := neutronports.Update(network, port.ID, neutronports.UpdateOpts{
AllowedAddressPairs: &origPairs, AllowedAddressPairs: &origPairs,
}).Extract() }).Extract()
if err != nil { if err != nil {
klog.Warning("Unable to reset allowed-address-pairs during error unwind: ", err) klog.Warningf("Unable to reset allowed-address-pairs during error unwind: %v", err)
} }
} }
@ -200,7 +200,7 @@ func (r *Routes) CreateRoute(ctx context.Context, clusterName string, nameHint s
found := false found := false
for _, item := range port.AllowedAddressPairs { for _, item := range port.AllowedAddressPairs {
if item.IPAddress == route.DestinationCIDR { if item.IPAddress == route.DestinationCIDR {
klog.V(4).Info("Found existing allowed-address-pair: ", item) klog.V(4).Infof("Found existing allowed-address-pair: %v", item)
found = true found = true
break break
} }

View File

@ -19,7 +19,9 @@ go_library(
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library", "//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
@ -34,6 +36,7 @@ go_library(
"//staging/src/k8s.io/client-go/listers/apps/v1:go_default_library", "//staging/src/k8s.io/client-go/listers/apps/v1:go_default_library",
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/listers/policy/v1beta1:go_default_library", "//staging/src/k8s.io/client-go/listers/policy/v1beta1:go_default_library",
"//staging/src/k8s.io/client-go/scale:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
@ -49,13 +52,20 @@ go_test(
"//pkg/apis/core/install:go_default_library", "//pkg/apis/core/install:go_default_library",
"//pkg/controller:go_default_library", "//pkg/controller:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/autoscaling/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library", "//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/meta/testrestmapper:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/scale/fake:go_default_library",
"//staging/src/k8s.io/client-go/testing:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
"//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library",

View File

@ -26,7 +26,9 @@ import (
policy "k8s.io/api/policy/v1beta1" policy "k8s.io/api/policy/v1beta1"
apiequality "k8s.io/apimachinery/pkg/api/equality" apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/errors"
apimeta "k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/intstr"
utilruntime "k8s.io/apimachinery/pkg/util/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime"
@ -41,6 +43,7 @@ import (
appsv1listers "k8s.io/client-go/listers/apps/v1" appsv1listers "k8s.io/client-go/listers/apps/v1"
corelisters "k8s.io/client-go/listers/core/v1" corelisters "k8s.io/client-go/listers/core/v1"
policylisters "k8s.io/client-go/listers/policy/v1beta1" policylisters "k8s.io/client-go/listers/policy/v1beta1"
scaleclient "k8s.io/client-go/scale"
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue" "k8s.io/client-go/util/workqueue"
@ -67,6 +70,9 @@ type updater func(*policy.PodDisruptionBudget) error
type DisruptionController struct { type DisruptionController struct {
kubeClient clientset.Interface kubeClient clientset.Interface
mapper apimeta.RESTMapper
scaleNamespacer scaleclient.ScalesGetter
pdbLister policylisters.PodDisruptionBudgetLister pdbLister policylisters.PodDisruptionBudgetLister
pdbListerSynced cache.InformerSynced pdbListerSynced cache.InformerSynced
@ -105,7 +111,7 @@ type controllerAndScale struct {
// podControllerFinder is a function type that maps a pod to a list of // podControllerFinder is a function type that maps a pod to a list of
// controllers and their scale. // controllers and their scale.
type podControllerFinder func(*v1.Pod) (*controllerAndScale, error) type podControllerFinder func(controllerRef *metav1.OwnerReference, namespace string) (*controllerAndScale, error)
func NewDisruptionController( func NewDisruptionController(
podInformer coreinformers.PodInformer, podInformer coreinformers.PodInformer,
@ -115,6 +121,8 @@ func NewDisruptionController(
dInformer appsv1informers.DeploymentInformer, dInformer appsv1informers.DeploymentInformer,
ssInformer appsv1informers.StatefulSetInformer, ssInformer appsv1informers.StatefulSetInformer,
kubeClient clientset.Interface, kubeClient clientset.Interface,
restMapper apimeta.RESTMapper,
scaleNamespacer scaleclient.ScalesGetter,
) *DisruptionController { ) *DisruptionController {
dc := &DisruptionController{ dc := &DisruptionController{
kubeClient: kubeClient, kubeClient: kubeClient,
@ -157,19 +165,19 @@ func NewDisruptionController(
dc.ssLister = ssInformer.Lister() dc.ssLister = ssInformer.Lister()
dc.ssListerSynced = ssInformer.Informer().HasSynced dc.ssListerSynced = ssInformer.Informer().HasSynced
dc.mapper = restMapper
dc.scaleNamespacer = scaleNamespacer
return dc return dc
} }
// TODO(mml): When controllerRef is implemented (#2210), we *could* simply // The workload resources do implement the scale subresource, so it would
// return controllers without their scales, and access scale type-generically // be possible to only check the scale subresource here. But since there is no
// via the scale subresource. That may not be as much of a win as it sounds, // way to take advantage of listers with scale subresources, we use the workload
// however. We are accessing everything through the pkg/client/cache API that // resources directly and only fall back to the scale subresource when needed.
// we have to set up and tune to the types we know we'll be accessing anyway,
// and we may well need further tweaks just to be able to access scale
// subresources.
func (dc *DisruptionController) finders() []podControllerFinder { func (dc *DisruptionController) finders() []podControllerFinder {
return []podControllerFinder{dc.getPodReplicationController, dc.getPodDeployment, dc.getPodReplicaSet, return []podControllerFinder{dc.getPodReplicationController, dc.getPodDeployment, dc.getPodReplicaSet,
dc.getPodStatefulSet} dc.getPodStatefulSet, dc.getScaleController}
} }
var ( var (
@ -180,15 +188,12 @@ var (
) )
// getPodReplicaSet finds a replicaset which has no matching deployments. // getPodReplicaSet finds a replicaset which has no matching deployments.
func (dc *DisruptionController) getPodReplicaSet(pod *v1.Pod) (*controllerAndScale, error) { func (dc *DisruptionController) getPodReplicaSet(controllerRef *metav1.OwnerReference, namespace string) (*controllerAndScale, error) {
controllerRef := metav1.GetControllerOf(pod) ok, err := verifyGroupKind(controllerRef, controllerKindRS.Kind, []string{"apps", "extensions"})
if controllerRef == nil { if !ok || err != nil {
return nil, nil return nil, err
} }
if controllerRef.Kind != controllerKindRS.Kind { rs, err := dc.rsLister.ReplicaSets(namespace).Get(controllerRef.Name)
return nil, nil
}
rs, err := dc.rsLister.ReplicaSets(pod.Namespace).Get(controllerRef.Name)
if err != nil { if err != nil {
// The only possible error is NotFound, which is ok here. // The only possible error is NotFound, which is ok here.
return nil, nil return nil, nil
@ -204,16 +209,13 @@ func (dc *DisruptionController) getPodReplicaSet(pod *v1.Pod) (*controllerAndSca
return &controllerAndScale{rs.UID, *(rs.Spec.Replicas)}, nil return &controllerAndScale{rs.UID, *(rs.Spec.Replicas)}, nil
} }
// getPodStatefulSet returns the statefulset managing the given pod. // getPodStatefulSet returns the statefulset referenced by the provided controllerRef.
func (dc *DisruptionController) getPodStatefulSet(pod *v1.Pod) (*controllerAndScale, error) { func (dc *DisruptionController) getPodStatefulSet(controllerRef *metav1.OwnerReference, namespace string) (*controllerAndScale, error) {
controllerRef := metav1.GetControllerOf(pod) ok, err := verifyGroupKind(controllerRef, controllerKindSS.Kind, []string{"apps"})
if controllerRef == nil { if !ok || err != nil {
return nil, nil return nil, err
} }
if controllerRef.Kind != controllerKindSS.Kind { ss, err := dc.ssLister.StatefulSets(namespace).Get(controllerRef.Name)
return nil, nil
}
ss, err := dc.ssLister.StatefulSets(pod.Namespace).Get(controllerRef.Name)
if err != nil { if err != nil {
// The only possible error is NotFound, which is ok here. // The only possible error is NotFound, which is ok here.
return nil, nil return nil, nil
@ -226,15 +228,12 @@ func (dc *DisruptionController) getPodStatefulSet(pod *v1.Pod) (*controllerAndSc
} }
// getPodDeployments finds deployments for any replicasets which are being managed by deployments. // getPodDeployments finds deployments for any replicasets which are being managed by deployments.
func (dc *DisruptionController) getPodDeployment(pod *v1.Pod) (*controllerAndScale, error) { func (dc *DisruptionController) getPodDeployment(controllerRef *metav1.OwnerReference, namespace string) (*controllerAndScale, error) {
controllerRef := metav1.GetControllerOf(pod) ok, err := verifyGroupKind(controllerRef, controllerKindRS.Kind, []string{"apps", "extensions"})
if controllerRef == nil { if !ok || err != nil {
return nil, nil return nil, err
} }
if controllerRef.Kind != controllerKindRS.Kind { rs, err := dc.rsLister.ReplicaSets(namespace).Get(controllerRef.Name)
return nil, nil
}
rs, err := dc.rsLister.ReplicaSets(pod.Namespace).Get(controllerRef.Name)
if err != nil { if err != nil {
// The only possible error is NotFound, which is ok here. // The only possible error is NotFound, which is ok here.
return nil, nil return nil, nil
@ -246,8 +245,10 @@ func (dc *DisruptionController) getPodDeployment(pod *v1.Pod) (*controllerAndSca
if controllerRef == nil { if controllerRef == nil {
return nil, nil return nil, nil
} }
if controllerRef.Kind != controllerKindDep.Kind {
return nil, nil ok, err = verifyGroupKind(controllerRef, controllerKindDep.Kind, []string{"apps", "extensions"})
if !ok || err != nil {
return nil, err
} }
deployment, err := dc.dLister.Deployments(rs.Namespace).Get(controllerRef.Name) deployment, err := dc.dLister.Deployments(rs.Namespace).Get(controllerRef.Name)
if err != nil { if err != nil {
@ -260,15 +261,12 @@ func (dc *DisruptionController) getPodDeployment(pod *v1.Pod) (*controllerAndSca
return &controllerAndScale{deployment.UID, *(deployment.Spec.Replicas)}, nil return &controllerAndScale{deployment.UID, *(deployment.Spec.Replicas)}, nil
} }
func (dc *DisruptionController) getPodReplicationController(pod *v1.Pod) (*controllerAndScale, error) { func (dc *DisruptionController) getPodReplicationController(controllerRef *metav1.OwnerReference, namespace string) (*controllerAndScale, error) {
controllerRef := metav1.GetControllerOf(pod) ok, err := verifyGroupKind(controllerRef, controllerKindRC.Kind, []string{""})
if controllerRef == nil { if !ok || err != nil {
return nil, nil return nil, err
} }
if controllerRef.Kind != controllerKindRC.Kind { rc, err := dc.rcLister.ReplicationControllers(namespace).Get(controllerRef.Name)
return nil, nil
}
rc, err := dc.rcLister.ReplicationControllers(pod.Namespace).Get(controllerRef.Name)
if err != nil { if err != nil {
// The only possible error is NotFound, which is ok here. // The only possible error is NotFound, which is ok here.
return nil, nil return nil, nil
@ -279,6 +277,55 @@ func (dc *DisruptionController) getPodReplicationController(pod *v1.Pod) (*contr
return &controllerAndScale{rc.UID, *(rc.Spec.Replicas)}, nil return &controllerAndScale{rc.UID, *(rc.Spec.Replicas)}, nil
} }
func (dc *DisruptionController) getScaleController(controllerRef *metav1.OwnerReference, namespace string) (*controllerAndScale, error) {
gv, err := schema.ParseGroupVersion(controllerRef.APIVersion)
if err != nil {
return nil, err
}
gk := schema.GroupKind{
Group: gv.Group,
Kind: controllerRef.Kind,
}
mapping, err := dc.mapper.RESTMapping(gk, gv.Version)
if err != nil {
return nil, err
}
gr := mapping.Resource.GroupResource()
scale, err := dc.scaleNamespacer.Scales(namespace).Get(gr, controllerRef.Name)
if err != nil {
if errors.IsNotFound(err) {
return nil, nil
}
return nil, err
}
if scale.UID != controllerRef.UID {
return nil, nil
}
return &controllerAndScale{scale.UID, scale.Spec.Replicas}, nil
}
func verifyGroupKind(controllerRef *metav1.OwnerReference, expectedKind string, expectedGroups []string) (bool, error) {
gv, err := schema.ParseGroupVersion(controllerRef.APIVersion)
if err != nil {
return false, err
}
if controllerRef.Kind != expectedKind {
return false, nil
}
for _, group := range expectedGroups {
if group == gv.Group {
return true, nil
}
}
return false, nil
}
func (dc *DisruptionController) Run(stopCh <-chan struct{}) { func (dc *DisruptionController) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash() defer utilruntime.HandleCrash()
defer dc.queue.ShutDown() defer dc.queue.ShutDown()
@ -583,10 +630,23 @@ func (dc *DisruptionController) getExpectedScale(pdb *policy.PodDisruptionBudget
// 1. Find the controller for each pod. If any pod has 0 controllers, // 1. Find the controller for each pod. If any pod has 0 controllers,
// that's an error. With ControllerRef, a pod can only have 1 controller. // that's an error. With ControllerRef, a pod can only have 1 controller.
for _, pod := range pods { for _, pod := range pods {
controllerRef := metav1.GetControllerOf(pod)
if controllerRef == nil {
err = fmt.Errorf("found no controller ref for pod %q", pod.Name)
dc.recorder.Event(pdb, v1.EventTypeWarning, "NoControllerRef", err.Error())
return
}
// If we already know the scale of the controller there is no need to do anything.
if _, found := controllerScale[controllerRef.UID]; found {
continue
}
// Check all the supported controllers to find the desired scale.
foundController := false foundController := false
for _, finder := range dc.finders() { for _, finder := range dc.finders() {
var controllerNScale *controllerAndScale var controllerNScale *controllerAndScale
controllerNScale, err = finder(pod) controllerNScale, err = finder(controllerRef, pod.Namespace)
if err != nil { if err != nil {
return return
} }

View File

@ -23,13 +23,20 @@ import (
"time" "time"
apps "k8s.io/api/apps/v1" apps "k8s.io/api/apps/v1"
autoscalingapi "k8s.io/api/autoscaling/v1"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1beta1" policy "k8s.io/api/policy/v1beta1"
apiequality "k8s.io/apimachinery/pkg/api/equality" apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/meta/testrestmapper"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
scalefake "k8s.io/client-go/scale/fake"
core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue" "k8s.io/client-go/util/workqueue"
_ "k8s.io/kubernetes/pkg/apis/core/install" _ "k8s.io/kubernetes/pkg/apis/core/install"
@ -90,6 +97,14 @@ type disruptionController struct {
rsStore cache.Store rsStore cache.Store
dStore cache.Store dStore cache.Store
ssStore cache.Store ssStore cache.Store
scaleClient *scalefake.FakeScaleClient
}
var customGVK = schema.GroupVersionKind{
Group: "custom.k8s.io",
Version: "v1",
Kind: "customresource",
} }
func newFakeDisruptionController() (*disruptionController, *pdbStates) { func newFakeDisruptionController() (*disruptionController, *pdbStates) {
@ -97,6 +112,10 @@ func newFakeDisruptionController() (*disruptionController, *pdbStates) {
informerFactory := informers.NewSharedInformerFactory(nil, controller.NoResyncPeriodFunc()) informerFactory := informers.NewSharedInformerFactory(nil, controller.NoResyncPeriodFunc())
scheme := runtime.NewScheme()
scheme.AddKnownTypeWithName(customGVK, &v1.Service{})
fakeScaleClient := &scalefake.FakeScaleClient{}
dc := NewDisruptionController( dc := NewDisruptionController(
informerFactory.Core().V1().Pods(), informerFactory.Core().V1().Pods(),
informerFactory.Policy().V1beta1().PodDisruptionBudgets(), informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
@ -105,6 +124,8 @@ func newFakeDisruptionController() (*disruptionController, *pdbStates) {
informerFactory.Apps().V1().Deployments(), informerFactory.Apps().V1().Deployments(),
informerFactory.Apps().V1().StatefulSets(), informerFactory.Apps().V1().StatefulSets(),
nil, nil,
testrestmapper.TestOnlyStaticRESTMapper(scheme),
fakeScaleClient,
) )
dc.getUpdater = func() updater { return ps.Set } dc.getUpdater = func() updater { return ps.Set }
dc.podListerSynced = alwaysReady dc.podListerSynced = alwaysReady
@ -122,6 +143,7 @@ func newFakeDisruptionController() (*disruptionController, *pdbStates) {
informerFactory.Apps().V1().ReplicaSets().Informer().GetStore(), informerFactory.Apps().V1().ReplicaSets().Informer().GetStore(),
informerFactory.Apps().V1().Deployments().Informer().GetStore(), informerFactory.Apps().V1().Deployments().Informer().GetStore(),
informerFactory.Apps().V1().StatefulSets().Informer().GetStore(), informerFactory.Apps().V1().StatefulSets().Informer().GetStore(),
fakeScaleClient,
}, ps }, ps
} }
@ -490,6 +512,52 @@ func TestReplicaSet(t *testing.T) {
ps.VerifyPdbStatus(t, pdbName, 0, 1, 2, 10, map[string]metav1.Time{}) ps.VerifyPdbStatus(t, pdbName, 0, 1, 2, 10, map[string]metav1.Time{})
} }
func TestScaleResource(t *testing.T) {
customResourceUID := uuid.NewUUID()
replicas := int32(10)
pods := int32(4)
maxUnavailable := int32(5)
dc, ps := newFakeDisruptionController()
dc.scaleClient.AddReactor("get", "customresources", func(action core.Action) (handled bool, ret runtime.Object, err error) {
obj := &autoscalingapi.Scale{
ObjectMeta: metav1.ObjectMeta{
Namespace: metav1.NamespaceDefault,
UID: customResourceUID,
},
Spec: autoscalingapi.ScaleSpec{
Replicas: replicas,
},
}
return true, obj, nil
})
pdb, pdbName := newMaxUnavailablePodDisruptionBudget(t, intstr.FromInt(int(maxUnavailable)))
add(t, dc.pdbStore, pdb)
trueVal := true
for i := 0; i < int(pods); i++ {
pod, _ := newPod(t, fmt.Sprintf("pod-%d", i))
pod.SetOwnerReferences([]metav1.OwnerReference{
{
Kind: customGVK.Kind,
APIVersion: customGVK.GroupVersion().String(),
Controller: &trueVal,
UID: customResourceUID,
},
})
add(t, dc.podStore, pod)
}
dc.sync(pdbName)
disruptionsAllowed := int32(0)
if replicas-pods < maxUnavailable {
disruptionsAllowed = maxUnavailable - (replicas - pods)
}
ps.VerifyPdbStatus(t, pdbName, disruptionsAllowed, pods, replicas-maxUnavailable, replicas, map[string]metav1.Time{})
}
// Verify that multiple controllers doesn't allow the PDB to be set true. // Verify that multiple controllers doesn't allow the PDB to be set true.
func TestMultipleControllers(t *testing.T) { func TestMultipleControllers(t *testing.T) {
const podCount = 2 const podCount = 2
@ -759,3 +827,202 @@ func TestUpdateDisruptedPods(t *testing.T) {
ps.VerifyPdbStatus(t, pdbName, 0, 1, 1, 3, map[string]metav1.Time{"p3": {Time: currentTime}}) ps.VerifyPdbStatus(t, pdbName, 0, 1, 1, 3, map[string]metav1.Time{"p3": {Time: currentTime}})
} }
func TestBasicFinderFunctions(t *testing.T) {
dc, _ := newFakeDisruptionController()
rs, _ := newReplicaSet(t, 10)
add(t, dc.rsStore, rs)
rc, _ := newReplicationController(t, 12)
add(t, dc.rcStore, rc)
ss, _ := newStatefulSet(t, 14)
add(t, dc.ssStore, ss)
testCases := map[string]struct {
finderFunc podControllerFinder
apiVersion string
kind string
name string
uid types.UID
findsScale bool
expectedScale int32
}{
"replicaset controller with apps group": {
finderFunc: dc.getPodReplicaSet,
apiVersion: "apps/v1",
kind: controllerKindRS.Kind,
name: rs.Name,
uid: rs.UID,
findsScale: true,
expectedScale: 10,
},
"replicaset controller with invalid group": {
finderFunc: dc.getPodReplicaSet,
apiVersion: "invalid/v1",
kind: controllerKindRS.Kind,
name: rs.Name,
uid: rs.UID,
findsScale: false,
},
"replicationcontroller with empty group": {
finderFunc: dc.getPodReplicationController,
apiVersion: "/v1",
kind: controllerKindRC.Kind,
name: rc.Name,
uid: rc.UID,
findsScale: true,
expectedScale: 12,
},
"replicationcontroller with invalid group": {
finderFunc: dc.getPodReplicationController,
apiVersion: "apps/v1",
kind: controllerKindRC.Kind,
name: rc.Name,
uid: rc.UID,
findsScale: false,
},
"statefulset controller with extensions group": {
finderFunc: dc.getPodStatefulSet,
apiVersion: "apps/v1",
kind: controllerKindSS.Kind,
name: ss.Name,
uid: ss.UID,
findsScale: true,
expectedScale: 14,
},
"statefulset controller with invalid kind": {
finderFunc: dc.getPodStatefulSet,
apiVersion: "apps/v1",
kind: controllerKindRS.Kind,
name: ss.Name,
uid: ss.UID,
findsScale: false,
},
}
for tn, tc := range testCases {
t.Run(tn, func(t *testing.T) {
controllerRef := &metav1.OwnerReference{
APIVersion: tc.apiVersion,
Kind: tc.kind,
Name: tc.name,
UID: tc.uid,
}
controllerAndScale, _ := tc.finderFunc(controllerRef, metav1.NamespaceDefault)
if controllerAndScale == nil {
if tc.findsScale {
t.Error("Expected scale, but got nil")
}
return
}
if got, want := controllerAndScale.scale, tc.expectedScale; got != want {
t.Errorf("Expected scale %d, but got %d", want, got)
}
if got, want := controllerAndScale.UID, tc.uid; got != want {
t.Errorf("Expected uid %s, but got %s", want, got)
}
})
}
}
func TestDeploymentFinderFunction(t *testing.T) {
labels := map[string]string{
"foo": "bar",
}
testCases := map[string]struct {
rsApiVersion string
rsKind string
depApiVersion string
depKind string
findsScale bool
expectedScale int32
}{
"happy path": {
rsApiVersion: "apps/v1",
rsKind: controllerKindRS.Kind,
depApiVersion: "extensions/v1",
depKind: controllerKindDep.Kind,
findsScale: true,
expectedScale: 10,
},
"invalid rs apiVersion": {
rsApiVersion: "invalid/v1",
rsKind: controllerKindRS.Kind,
depApiVersion: "apps/v1",
depKind: controllerKindDep.Kind,
findsScale: false,
},
"invalid rs kind": {
rsApiVersion: "apps/v1",
rsKind: "InvalidKind",
depApiVersion: "apps/v1",
depKind: controllerKindDep.Kind,
findsScale: false,
},
"invalid deployment apiVersion": {
rsApiVersion: "extensions/v1",
rsKind: controllerKindRS.Kind,
depApiVersion: "deployment/v1",
depKind: controllerKindDep.Kind,
findsScale: false,
},
"invalid deployment kind": {
rsApiVersion: "apps/v1",
rsKind: controllerKindRS.Kind,
depApiVersion: "extensions/v1",
depKind: "InvalidKind",
findsScale: false,
},
}
for tn, tc := range testCases {
t.Run(tn, func(t *testing.T) {
dc, _ := newFakeDisruptionController()
dep, _ := newDeployment(t, 10)
dep.Spec.Selector = newSel(labels)
add(t, dc.dStore, dep)
rs, _ := newReplicaSet(t, 5)
rs.Labels = labels
trueVal := true
rs.OwnerReferences = append(rs.OwnerReferences, metav1.OwnerReference{
APIVersion: tc.depApiVersion,
Kind: tc.depKind,
Name: dep.Name,
UID: dep.UID,
Controller: &trueVal,
})
add(t, dc.rsStore, rs)
controllerRef := &metav1.OwnerReference{
APIVersion: tc.rsApiVersion,
Kind: tc.rsKind,
Name: rs.Name,
UID: rs.UID,
}
controllerAndScale, _ := dc.getPodDeployment(controllerRef, metav1.NamespaceDefault)
if controllerAndScale == nil {
if tc.findsScale {
t.Error("Expected scale, but got nil")
}
return
}
if got, want := controllerAndScale.scale, tc.expectedScale; got != want {
t.Errorf("Expected scale %d, but got %d", want, got)
}
if got, want := controllerAndScale.UID, dep.UID; got != want {
t.Errorf("Expected uid %s, but got %s", want, got)
}
})
}
}

View File

@ -631,16 +631,15 @@ func pastBackoffLimitOnFailure(job *batch.Job, pods []*v1.Pod) bool {
result := int32(0) result := int32(0)
for i := range pods { for i := range pods {
po := pods[i] po := pods[i]
if po.Status.Phase != v1.PodRunning { if po.Status.Phase == v1.PodRunning || po.Status.Phase == v1.PodPending {
continue for j := range po.Status.InitContainerStatuses {
} stat := po.Status.InitContainerStatuses[j]
for j := range po.Status.InitContainerStatuses { result += stat.RestartCount
stat := po.Status.InitContainerStatuses[j] }
result += stat.RestartCount for j := range po.Status.ContainerStatuses {
} stat := po.Status.ContainerStatuses[j]
for j := range po.Status.ContainerStatuses { result += stat.RestartCount
stat := po.Status.ContainerStatuses[j] }
result += stat.RestartCount
} }
} }
if *job.Spec.BackoffLimit == 0 { if *job.Spec.BackoffLimit == 0 {

View File

@ -1414,6 +1414,7 @@ func TestJobBackoffForOnFailure(t *testing.T) {
// pod setup // pod setup
jobKeyForget bool jobKeyForget bool
restartCounts []int32 restartCounts []int32
podPhase v1.PodPhase
// expectations // expectations
expectedActive int32 expectedActive int32
@ -1424,32 +1425,47 @@ func TestJobBackoffForOnFailure(t *testing.T) {
}{ }{
"backoffLimit 0 should have 1 pod active": { "backoffLimit 0 should have 1 pod active": {
1, 1, 0, 1, 1, 0,
true, []int32{0}, true, []int32{0}, v1.PodRunning,
1, 0, 0, nil, "", 1, 0, 0, nil, "",
}, },
"backoffLimit 1 with restartCount 0 should have 1 pod active": { "backoffLimit 1 with restartCount 0 should have 1 pod active": {
1, 1, 1, 1, 1, 1,
true, []int32{0}, true, []int32{0}, v1.PodRunning,
1, 0, 0, nil, "", 1, 0, 0, nil, "",
}, },
"backoffLimit 1 with restartCount 1 should have 0 pod active": { "backoffLimit 1 with restartCount 1 and podRunning should have 0 pod active": {
1, 1, 1, 1, 1, 1,
true, []int32{1}, true, []int32{1}, v1.PodRunning,
0, 0, 1, &jobConditionFailed, "BackoffLimitExceeded", 0, 0, 1, &jobConditionFailed, "BackoffLimitExceeded",
}, },
"too many job failures - single pod": { "backoffLimit 1 with restartCount 1 and podPending should have 0 pod active": {
1, 1, 1,
true, []int32{1}, v1.PodPending,
0, 0, 1, &jobConditionFailed, "BackoffLimitExceeded",
},
"too many job failures with podRunning - single pod": {
1, 5, 2, 1, 5, 2,
true, []int32{2}, true, []int32{2}, v1.PodRunning,
0, 0, 1, &jobConditionFailed, "BackoffLimitExceeded", 0, 0, 1, &jobConditionFailed, "BackoffLimitExceeded",
}, },
"too many job failures - multiple pods": { "too many job failures with podPending - single pod": {
1, 5, 2,
true, []int32{2}, v1.PodPending,
0, 0, 1, &jobConditionFailed, "BackoffLimitExceeded",
},
"too many job failures with podRunning - multiple pods": {
2, 5, 2, 2, 5, 2,
true, []int32{1, 1}, true, []int32{1, 1}, v1.PodRunning,
0, 0, 2, &jobConditionFailed, "BackoffLimitExceeded",
},
"too many job failures with podPending - multiple pods": {
2, 5, 2,
true, []int32{1, 1}, v1.PodPending,
0, 0, 2, &jobConditionFailed, "BackoffLimitExceeded", 0, 0, 2, &jobConditionFailed, "BackoffLimitExceeded",
}, },
"not enough failures": { "not enough failures": {
2, 5, 3, 2, 5, 3,
true, []int32{1, 1}, true, []int32{1, 1}, v1.PodRunning,
2, 0, 0, nil, "", 2, 0, 0, nil, "",
}, },
} }
@ -1474,7 +1490,7 @@ func TestJobBackoffForOnFailure(t *testing.T) {
job.Spec.Template.Spec.RestartPolicy = v1.RestartPolicyOnFailure job.Spec.Template.Spec.RestartPolicy = v1.RestartPolicyOnFailure
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job) sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
podIndexer := sharedInformerFactory.Core().V1().Pods().Informer().GetIndexer() podIndexer := sharedInformerFactory.Core().V1().Pods().Informer().GetIndexer()
for i, pod := range newPodList(int32(len(tc.restartCounts)), v1.PodRunning, job) { for i, pod := range newPodList(int32(len(tc.restartCounts)), tc.podPhase, job) {
pod.Status.ContainerStatuses = []v1.ContainerStatus{{RestartCount: tc.restartCounts[i]}} pod.Status.ContainerStatuses = []v1.ContainerStatus{{RestartCount: tc.restartCounts[i]}}
podIndexer.Add(&pod) podIndexer.Add(&pod)
} }

View File

@ -862,7 +862,7 @@ func (nc *Controller) tryUpdateNodeHealth(node *v1.Node) (time.Duration, v1.Node
transitionTime = savedNodeHealth.readyTransitionTimestamp transitionTime = savedNodeHealth.readyTransitionTimestamp
} }
if klog.V(5) { if klog.V(5) {
klog.V(5).Infof("Node %s ReadyCondition updated. Updating timestamp: %+v vs %+v.", node.Name, savedNodeHealth.status, node.Status) klog.Infof("Node %s ReadyCondition updated. Updating timestamp: %+v vs %+v.", node.Name, savedNodeHealth.status, node.Status)
} else { } else {
klog.V(3).Infof("Node %s ReadyCondition updated. Updating timestamp.", node.Name) klog.V(3).Infof("Node %s ReadyCondition updated. Updating timestamp.", node.Name)
} }

View File

@ -199,8 +199,7 @@ func (c *ServiceAccountsController) syncNamespace(key string) error {
} }
createFailures := []error{} createFailures := []error{}
for i := range c.serviceAccountsToEnsure { for _, sa := range c.serviceAccountsToEnsure {
sa := c.serviceAccountsToEnsure[i]
switch _, err := c.saLister.ServiceAccounts(ns.Name).Get(sa.Name); { switch _, err := c.saLister.ServiceAccounts(ns.Name).Get(sa.Name); {
case err == nil: case err == nil:
continue continue

View File

@ -6,6 +6,7 @@ go_library(
importpath = "k8s.io/kubernetes/pkg/controller/util/node", importpath = "k8s.io/kubernetes/pkg/controller/util/node",
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
deps = [ deps = [
"//pkg/api/v1/pod:go_default_library",
"//pkg/apis/core:go_default_library", "//pkg/apis/core:go_default_library",
"//pkg/controller:go_default_library", "//pkg/controller:go_default_library",
"//pkg/kubelet/util/format:go_default_library", "//pkg/kubelet/util/format:go_default_library",

View File

@ -32,6 +32,7 @@ import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
appsv1listers "k8s.io/client-go/listers/apps/v1" appsv1listers "k8s.io/client-go/listers/apps/v1"
utilpod "k8s.io/kubernetes/pkg/api/v1/pod"
api "k8s.io/kubernetes/pkg/apis/core" api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/kubelet/util/format"
@ -134,9 +135,12 @@ func MarkAllPodsNotReady(kubeClient clientset.Interface, node *v1.Node) error {
continue continue
} }
for i, cond := range pod.Status.Conditions { for _, cond := range pod.Status.Conditions {
if cond.Type == v1.PodReady { if cond.Type == v1.PodReady {
pod.Status.Conditions[i].Status = v1.ConditionFalse cond.Status = v1.ConditionFalse
if !utilpod.UpdatePodCondition(&pod.Status, &cond) {
break
}
klog.V(2).Infof("Updating ready status of pod %v to false", pod.Name) klog.V(2).Infof("Updating ready status of pod %v to false", pod.Name)
_, err := kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(&pod) _, err := kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(&pod)
if err != nil { if err != nil {

View File

@ -532,7 +532,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
apiextensionsfeatures.CustomResourceValidation: {Default: true, PreRelease: featuregate.Beta}, apiextensionsfeatures.CustomResourceValidation: {Default: true, PreRelease: featuregate.Beta},
apiextensionsfeatures.CustomResourceSubresources: {Default: true, PreRelease: featuregate.Beta}, apiextensionsfeatures.CustomResourceSubresources: {Default: true, PreRelease: featuregate.Beta},
apiextensionsfeatures.CustomResourceWebhookConversion: {Default: false, PreRelease: featuregate.Alpha}, apiextensionsfeatures.CustomResourceWebhookConversion: {Default: false, PreRelease: featuregate.Alpha},
apiextensionsfeatures.CustomResourcePublishOpenAPI: {Default: false, PreRelease: featuregate.Alpha}, apiextensionsfeatures.CustomResourcePublishOpenAPI: {Default: true, PreRelease: featuregate.Beta},
// features that enable backwards compatibility but are scheduled to be removed // features that enable backwards compatibility but are scheduled to be removed
// ... // ...

View File

@ -24,10 +24,6 @@ import (
genericfilters "k8s.io/apiserver/pkg/server/filters" genericfilters "k8s.io/apiserver/pkg/server/filters"
) )
// DeprecatedInsecureServingInfo is required to serve http. HTTP does NOT include authentication or authorization.
// You shouldn't be using this. It makes sig-auth sad.
// DeprecatedInsecureServingInfo *ServingInfo
// BuildInsecureHandlerChain sets up the server to listen to http. Should be removed. // BuildInsecureHandlerChain sets up the server to listen to http. Should be removed.
func BuildInsecureHandlerChain(apiHandler http.Handler, c *server.Config) http.Handler { func BuildInsecureHandlerChain(apiHandler http.Handler, c *server.Config) http.Handler {
handler := apiHandler handler := apiHandler

View File

@ -38,7 +38,6 @@ go_library(
"//pkg/apis/core/v1:go_default_library", "//pkg/apis/core/v1:go_default_library",
"//pkg/apis/core/v1/helper:go_default_library", "//pkg/apis/core/v1/helper:go_default_library",
"//pkg/apis/core/v1/helper/qos:go_default_library", "//pkg/apis/core/v1/helper/qos:go_default_library",
"//pkg/capabilities:go_default_library",
"//pkg/features:go_default_library", "//pkg/features:go_default_library",
"//pkg/fieldpath:go_default_library", "//pkg/fieldpath:go_default_library",
"//pkg/kubelet/apis:go_default_library", "//pkg/kubelet/apis:go_default_library",
@ -99,13 +98,13 @@ go_library(
"//pkg/scheduler/api:go_default_library", "//pkg/scheduler/api:go_default_library",
"//pkg/security/apparmor:go_default_library", "//pkg/security/apparmor:go_default_library",
"//pkg/security/podsecuritypolicy/sysctl:go_default_library", "//pkg/security/podsecuritypolicy/sysctl:go_default_library",
"//pkg/securitycontext:go_default_library",
"//pkg/util/dbus:go_default_library", "//pkg/util/dbus:go_default_library",
"//pkg/util/iptables:go_default_library", "//pkg/util/iptables:go_default_library",
"//pkg/util/mount:go_default_library", "//pkg/util/mount:go_default_library",
"//pkg/util/node:go_default_library", "//pkg/util/node:go_default_library",
"//pkg/util/oom:go_default_library", "//pkg/util/oom:go_default_library",
"//pkg/util/removeall:go_default_library", "//pkg/util/removeall:go_default_library",
"//pkg/util/selinux:go_default_library",
"//pkg/util/taints:go_default_library", "//pkg/util/taints:go_default_library",
"//pkg/volume:go_default_library", "//pkg/volume:go_default_library",
"//pkg/volume/csi:go_default_library", "//pkg/volume/csi:go_default_library",
@ -175,7 +174,6 @@ go_test(
embed = [":go_default_library"], embed = [":go_default_library"],
deps = [ deps = [
"//pkg/apis/core/install:go_default_library", "//pkg/apis/core/install:go_default_library",
"//pkg/capabilities:go_default_library",
"//pkg/features:go_default_library", "//pkg/features:go_default_library",
"//pkg/kubelet/apis:go_default_library", "//pkg/kubelet/apis:go_default_library",
"//pkg/kubelet/cadvisor/testing:go_default_library", "//pkg/kubelet/cadvisor/testing:go_default_library",

View File

@ -291,12 +291,12 @@ type KubeletConfiguration struct {
/* the following fields are meant for Node Allocatable */ /* the following fields are meant for Node Allocatable */
// A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G,pids=100) pairs // A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G,pid=100) pairs
// that describe resources reserved for non-kubernetes components. // that describe resources reserved for non-kubernetes components.
// Currently only cpu and memory are supported. // Currently only cpu and memory are supported.
// See http://kubernetes.io/docs/user-guide/compute-resources for more detail. // See http://kubernetes.io/docs/user-guide/compute-resources for more detail.
SystemReserved map[string]string SystemReserved map[string]string
// A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G,pids=100) pairs // A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G,pid=100) pairs
// that describe resources reserved for kubernetes system components. // that describe resources reserved for kubernetes system components.
// Currently cpu, memory and local ephemeral storage for root file system are supported. // Currently cpu, memory and local ephemeral storage for root file system are supported.
// See http://kubernetes.io/docs/user-guide/compute-resources for more detail. // See http://kubernetes.io/docs/user-guide/compute-resources for more detail.

View File

@ -166,6 +166,7 @@ filegroup(
"//pkg/kubelet/cm/cpumanager:all-srcs", "//pkg/kubelet/cm/cpumanager:all-srcs",
"//pkg/kubelet/cm/cpuset:all-srcs", "//pkg/kubelet/cm/cpuset:all-srcs",
"//pkg/kubelet/cm/devicemanager:all-srcs", "//pkg/kubelet/cm/devicemanager:all-srcs",
"//pkg/kubelet/cm/topologymanager/socketmask:all-srcs",
"//pkg/kubelet/cm/util:all-srcs", "//pkg/kubelet/cm/util:all-srcs",
], ],
tags = ["automanaged"], tags = ["automanaged"],

View File

@ -21,7 +21,7 @@ import (
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
// TODO: Migrate kubelet to either use its own internal objects or client library. // TODO: Migrate kubelet to either use its own internal objects or client library.
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
internalapi "k8s.io/cri-api/pkg/apis" internalapi "k8s.io/cri-api/pkg/apis"
podresourcesapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1" podresourcesapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1"
"k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/config"
@ -104,6 +104,10 @@ type ContainerManager interface {
// GetDevices returns information about the devices assigned to pods and containers // GetDevices returns information about the devices assigned to pods and containers
GetDevices(podUID, containerName string) []*podresourcesapi.ContainerDevices GetDevices(podUID, containerName string) []*podresourcesapi.ContainerDevices
// ShouldResetExtendedResourceCapacity returns whether or not the extended resources should be zeroed,
// due to node recreation.
ShouldResetExtendedResourceCapacity() bool
} }
type NodeConfig struct { type NodeConfig struct {

View File

@ -34,7 +34,7 @@ import (
"github.com/opencontainers/runc/libcontainer/configs" "github.com/opencontainers/runc/libcontainer/configs"
"k8s.io/klog" "k8s.io/klog"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
utilerrors "k8s.io/apimachinery/pkg/util/errors" utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
@ -897,3 +897,7 @@ func (cm *containerManagerImpl) GetDevicePluginResourceCapacity() (v1.ResourceLi
func (cm *containerManagerImpl) GetDevices(podUID, containerName string) []*podresourcesapi.ContainerDevices { func (cm *containerManagerImpl) GetDevices(podUID, containerName string) []*podresourcesapi.ContainerDevices {
return cm.deviceManager.GetDevices(podUID, containerName) return cm.deviceManager.GetDevices(podUID, containerName)
} }
func (cm *containerManagerImpl) ShouldResetExtendedResourceCapacity() bool {
return cm.deviceManager.ShouldResetExtendedResourceCapacity()
}

View File

@ -17,7 +17,7 @@ limitations under the License.
package cm package cm
import ( import (
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/klog" "k8s.io/klog"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
@ -32,7 +32,9 @@ import (
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
type containerManagerStub struct{} type containerManagerStub struct {
shouldResetExtendedResourceCapacity bool
}
var _ ContainerManager = &containerManagerStub{} var _ ContainerManager = &containerManagerStub{}
@ -110,6 +112,14 @@ func (cm *containerManagerStub) GetDevices(_, _ string) []*podresourcesapi.Conta
return nil return nil
} }
func NewStubContainerManager() ContainerManager { func (cm *containerManagerStub) ShouldResetExtendedResourceCapacity() bool {
return &containerManagerStub{} return cm.shouldResetExtendedResourceCapacity
}
func NewStubContainerManager() ContainerManager {
return &containerManagerStub{shouldResetExtendedResourceCapacity: false}
}
func NewStubContainerManagerWithExtendedResource(shouldResetExtendedResourceCapacity bool) ContainerManager {
return &containerManagerStub{shouldResetExtendedResourceCapacity: shouldResetExtendedResourceCapacity}
} }

View File

@ -24,7 +24,7 @@ package cm
import ( import (
"fmt" "fmt"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
@ -171,3 +171,7 @@ func (cm *containerManagerImpl) GetPodCgroupRoot() string {
func (cm *containerManagerImpl) GetDevices(_, _ string) []*podresourcesapi.ContainerDevices { func (cm *containerManagerImpl) GetDevices(_, _ string) []*podresourcesapi.ContainerDevices {
return nil return nil
} }
func (cm *containerManagerImpl) ShouldResetExtendedResourceCapacity() bool {
return false
}

View File

@ -14,6 +14,7 @@ go_library(
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
deps = [ deps = [
"//pkg/apis/core/v1/helper:go_default_library", "//pkg/apis/core/v1/helper:go_default_library",
"//pkg/features:go_default_library",
"//pkg/kubelet/apis/deviceplugin/v1beta1:go_default_library", "//pkg/kubelet/apis/deviceplugin/v1beta1:go_default_library",
"//pkg/kubelet/apis/pluginregistration/v1:go_default_library", "//pkg/kubelet/apis/pluginregistration/v1:go_default_library",
"//pkg/kubelet/apis/podresources/v1alpha1:go_default_library", "//pkg/kubelet/apis/podresources/v1alpha1:go_default_library",
@ -26,9 +27,11 @@ go_library(
"//pkg/kubelet/metrics:go_default_library", "//pkg/kubelet/metrics:go_default_library",
"//pkg/kubelet/util/pluginwatcher:go_default_library", "//pkg/kubelet/util/pluginwatcher:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/util/selinux:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/google.golang.org/grpc:go_default_library", "//vendor/google.golang.org/grpc:go_default_library",
"//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/klog:go_default_library",
], ],

View File

@ -28,10 +28,12 @@ import (
"google.golang.org/grpc" "google.golang.org/grpc"
"k8s.io/klog" "k8s.io/klog"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/features"
pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1" pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1"
podresourcesapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1" podresourcesapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager" "k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
@ -42,6 +44,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/metrics" "k8s.io/kubernetes/pkg/kubelet/metrics"
watcher "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher" watcher "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
"k8s.io/kubernetes/pkg/util/selinux"
) )
// ActivePodsFunc is a function that returns a list of pods to reconcile. // ActivePodsFunc is a function that returns a list of pods to reconcile.
@ -206,6 +209,11 @@ func (m *ManagerImpl) Start(activePods ActivePodsFunc, sourcesReady config.Sourc
socketPath := filepath.Join(m.socketdir, m.socketname) socketPath := filepath.Join(m.socketdir, m.socketname)
os.MkdirAll(m.socketdir, 0755) os.MkdirAll(m.socketdir, 0755)
if selinux.SELinuxEnabled() {
if err := selinux.SetFileLabel(m.socketdir, config.KubeletPluginsDirSELinuxLabel); err != nil {
klog.Warningf("Unprivileged containerized plugins might not work. Could not set selinux context on %s: %v", m.socketdir, err)
}
}
// Removes all stale sockets in m.socketdir. Device plugins can monitor // Removes all stale sockets in m.socketdir. Device plugins can monitor
// this and use it as a signal to re-register with the new Kubelet. // this and use it as a signal to re-register with the new Kubelet.
@ -832,3 +840,17 @@ func (m *ManagerImpl) GetDevices(podUID, containerName string) []*podresourcesap
defer m.mutex.Unlock() defer m.mutex.Unlock()
return m.podDevices.getContainerDevices(podUID, containerName) return m.podDevices.getContainerDevices(podUID, containerName)
} }
// ShouldResetExtendedResourceCapacity returns whether the extended resources should be zeroed or not,
// depending on whether the node has been recreated. Absence of the checkpoint file strongly indicates the node
// has been recreated.
func (m *ManagerImpl) ShouldResetExtendedResourceCapacity() bool {
if utilfeature.DefaultFeatureGate.Enabled(features.DevicePlugins) {
checkpoints, err := m.checkpointManager.ListCheckpoints()
if err != nil {
return false
}
return len(checkpoints) == 0
}
return false
}

View File

@ -17,7 +17,7 @@ limitations under the License.
package devicemanager package devicemanager
import ( import (
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
podresourcesapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1" podresourcesapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1"
"k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/lifecycle"
@ -67,3 +67,8 @@ func (h *ManagerStub) GetWatcherHandler() pluginwatcher.PluginHandler {
func (h *ManagerStub) GetDevices(_, _ string) []*podresourcesapi.ContainerDevices { func (h *ManagerStub) GetDevices(_, _ string) []*podresourcesapi.ContainerDevices {
return nil return nil
} }
// ShouldResetExtendedResourceCapacity returns false
func (h *ManagerStub) ShouldResetExtendedResourceCapacity() bool {
return false
}

View File

@ -27,7 +27,7 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
@ -946,6 +946,45 @@ func TestDevicePreStartContainer(t *testing.T) {
as.Equal(len(runContainerOpts.Envs), len(expectedResp.Envs)) as.Equal(len(runContainerOpts.Envs), len(expectedResp.Envs))
} }
func TestResetExtendedResource(t *testing.T) {
as := assert.New(t)
tmpDir, err := ioutil.TempDir("", "checkpoint")
as.Nil(err)
ckm, err := checkpointmanager.NewCheckpointManager(tmpDir)
as.Nil(err)
testManager := &ManagerImpl{
endpoints: make(map[string]endpointInfo),
healthyDevices: make(map[string]sets.String),
unhealthyDevices: make(map[string]sets.String),
allocatedDevices: make(map[string]sets.String),
podDevices: make(podDevices),
checkpointManager: ckm,
}
extendedResourceName := "domain.com/resource"
testManager.podDevices.insert("pod", "con", extendedResourceName,
constructDevices([]string{"dev1"}),
constructAllocResp(map[string]string{"/dev/dev1": "/dev/dev1"},
map[string]string{"/home/lib1": "/usr/lib1"}, map[string]string{}))
testManager.healthyDevices[extendedResourceName] = sets.NewString()
testManager.healthyDevices[extendedResourceName].Insert("dev1")
// checkpoint is present, indicating node hasn't been recreated
err = testManager.writeCheckpoint()
as.Nil(err)
as.False(testManager.ShouldResetExtendedResourceCapacity())
// checkpoint is absent, representing node recreation
ckpts, err := ckm.ListCheckpoints()
as.Nil(err)
for _, ckpt := range ckpts {
err = ckm.RemoveCheckpoint(ckpt)
as.Nil(err)
}
as.True(testManager.ShouldResetExtendedResourceCapacity())
}
func allocateStubFunc() func(devs []string) (*pluginapi.AllocateResponse, error) { func allocateStubFunc() func(devs []string) (*pluginapi.AllocateResponse, error) {
return func(devs []string) (*pluginapi.AllocateResponse, error) { return func(devs []string) (*pluginapi.AllocateResponse, error) {
resp := new(pluginapi.ContainerAllocateResponse) resp := new(pluginapi.ContainerAllocateResponse)

View File

@ -19,7 +19,7 @@ package devicemanager
import ( import (
"time" "time"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
podresourcesapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1" podresourcesapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1"
"k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/config"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
@ -58,6 +58,11 @@ type Manager interface {
// GetDevices returns information about the devices assigned to pods and containers // GetDevices returns information about the devices assigned to pods and containers
GetDevices(podUID, containerName string) []*podresourcesapi.ContainerDevices GetDevices(podUID, containerName string) []*podresourcesapi.ContainerDevices
// ShouldResetExtendedResourceCapacity returns whether the extended resources should be reset or not,
// depending on the checkpoint file availability. Absence of the checkpoint file strongly indicates
// the node has been recreated.
ShouldResetExtendedResourceCapacity() bool
} }
// DeviceRunContainerOptions contains the combined container runtime settings to consume its allocated devices. // DeviceRunContainerOptions contains the combined container runtime settings to consume its allocated devices.

View File

@ -0,0 +1,28 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["socketmask.go"],
importpath = "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/socketmask",
visibility = ["//visibility:public"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
go_test(
name = "go_default_test",
srcs = ["socketmask_test.go"],
embed = [":go_default_library"],
)

View File

@ -0,0 +1,152 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package socketmask
import (
"fmt"
)
//SocketMask interface allows hint providers to create SocketMasks for TopologyHints
type SocketMask interface {
Add(sockets ...int) error
Remove(sockets ...int) error
And(masks ...SocketMask)
Or(masks ...SocketMask)
Clear()
Fill()
IsEqual(mask SocketMask) bool
IsEmpty() bool
IsSet(socket int) bool
String() string
Count() int
GetSockets() []int
}
type socketMask uint64
//NewSocketMask creates a new SocketMask
func NewSocketMask(sockets ...int) (SocketMask, error) {
s := socketMask(0)
err := (&s).Add(sockets...)
if err != nil {
return nil, err
}
return &s, nil
}
//Add adds the sockets with topology affinity to the SocketMask
func (s *socketMask) Add(sockets ...int) error {
mask := *s
for _, i := range sockets {
if i < 0 || i >= 64 {
return fmt.Errorf("socket number must be in range 0-63")
}
mask |= 1 << uint64(i)
}
*s = mask
return nil
}
//Remove removes specified sockets from SocketMask
func (s *socketMask) Remove(sockets ...int) error {
mask := *s
for _, i := range sockets {
if i < 0 || i >= 64 {
return fmt.Errorf("socket number must be in range 0-63")
}
mask &^= 1 << uint64(i)
}
*s = mask
return nil
}
//And performs and operation on all bits in masks
func (s *socketMask) And(masks ...SocketMask) {
for _, m := range masks {
*s &= *m.(*socketMask)
}
}
//Or performs or operation on all bits in masks
func (s *socketMask) Or(masks ...SocketMask) {
for _, m := range masks {
*s |= *m.(*socketMask)
}
}
//Clear resets all bits in mask to zero
func (s *socketMask) Clear() {
*s = 0
}
//Fill sets all bits in mask to one
func (s *socketMask) Fill() {
*s = socketMask(^uint64(0))
}
//IsEmpty checks mask to see if all bits are zero
func (s *socketMask) IsEmpty() bool {
return *s == 0
}
//IsSet checks socket in mask to see if bit is set to one
func (s *socketMask) IsSet(socket int) bool {
if socket < 0 || socket >= 64 {
return false
}
return (*s & (1 << uint64(socket))) > 0
}
//IsEqual checks if masks are equal
func (s *socketMask) IsEqual(mask SocketMask) bool {
return *s == *mask.(*socketMask)
}
//String converts mask to string
func (s *socketMask) String() string {
str := ""
for i := uint64(0); i < 64; i++ {
if (*s & (1 << i)) > 0 {
str += "1"
} else {
str += "0"
}
}
return str
}
//Count counts number of bits in mask set to one
func (s *socketMask) Count() int {
count := 0
for i := uint64(0); i < 64; i++ {
if (*s & (1 << i)) > 0 {
count++
}
}
return count
}
//GetSockets returns each socket number with bits set to one
func (s *socketMask) GetSockets() []int {
var sockets []int
for i := uint64(0); i < 64; i++ {
if (*s & (1 << i)) > 0 {
sockets = append(sockets, int(i))
}
}
return sockets
}

View File

@ -0,0 +1,290 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package socketmask
import (
"reflect"
"testing"
)
func TestNewSocketMask(t *testing.T) {
tcases := []struct {
name string
socket int
expectedMask string
}{
{
name: "New SocketMask with socket 0 set",
socket: int(0),
expectedMask: "1000000000000000000000000000000000000000000000000000000000000000",
},
}
for _, tc := range tcases {
sm, _ := NewSocketMask(0)
if sm.String() != tc.expectedMask {
t.Errorf("Expected mask to be %v, got %v", tc.expectedMask, sm)
}
}
}
func TestAdd(t *testing.T) {
tcases := []struct {
name string
firstSocket int
secondSocket int
expectedMask string
}{
{
name: "Reset bit 1 SocketMask to 0",
firstSocket: 0,
secondSocket: 1,
expectedMask: "1100000000000000000000000000000000000000000000000000000000000000",
},
}
for _, tc := range tcases {
mask, _ := NewSocketMask()
mask.Add(tc.firstSocket, tc.secondSocket)
if mask.String() != tc.expectedMask {
t.Errorf("Expected mask to be %v, got %v", tc.expectedMask, mask)
}
}
}
func TestRemove(t *testing.T) {
tcases := []struct {
name string
firstSocketSet int
secondSocketSet int
firstSocketRemove int
expectedMask string
}{
{
name: "Reset bit 1 SocketMask to 0",
firstSocketSet: 0,
secondSocketSet: 1,
firstSocketRemove: 0,
expectedMask: "0100000000000000000000000000000000000000000000000000000000000000",
},
}
for _, tc := range tcases {
mask, _ := NewSocketMask(tc.firstSocketSet, tc.secondSocketSet)
mask.Remove(tc.firstSocketRemove)
if mask.String() != tc.expectedMask {
t.Errorf("Expected mask to be %v, got %v", tc.expectedMask, mask)
}
}
}
func TestAnd(t *testing.T) {
tcases := []struct {
name string
firstMaskBit int
secondMaskBit int
andMask string
}{
{
name: "And socket masks",
firstMaskBit: 0,
secondMaskBit: 0,
andMask: "1000000000000000000000000000000000000000000000000000000000000000",
},
}
for _, tc := range tcases {
firstMask, _ := NewSocketMask(tc.firstMaskBit)
secondMask, _ := NewSocketMask(tc.secondMaskBit)
firstMask.And(secondMask)
if firstMask.String() != string(tc.andMask) {
t.Errorf("Expected mask to be %v, got %v", tc.andMask, firstMask)
}
}
}
func TestOr(t *testing.T) {
tcases := []struct {
name string
firstMaskBit int
secondMaskBit int
orMask string
}{
{
name: "Or socket masks",
firstMaskBit: int(0),
secondMaskBit: int(1),
orMask: "1100000000000000000000000000000000000000000000000000000000000000",
},
}
for _, tc := range tcases {
firstMask, _ := NewSocketMask(tc.firstMaskBit)
secondMask, _ := NewSocketMask(tc.secondMaskBit)
firstMask.Or(secondMask)
if firstMask.String() != string(tc.orMask) {
t.Errorf("Expected mask to be %v, got %v", tc.orMask, firstMask)
}
}
}
func TestClear(t *testing.T) {
tcases := []struct {
name string
firstBit int
secondBit int
clearedMask string
}{
{
name: "Clear socket masks",
firstBit: int(0),
secondBit: int(1),
clearedMask: "0000000000000000000000000000000000000000000000000000000000000000",
},
}
for _, tc := range tcases {
mask, _ := NewSocketMask(tc.firstBit, tc.secondBit)
mask.Clear()
if mask.String() != string(tc.clearedMask) {
t.Errorf("Expected mask to be %v, got %v", tc.clearedMask, mask)
}
}
}
func TestFill(t *testing.T) {
tcases := []struct {
name string
filledMask string
}{
{
name: "Fill socket masks",
filledMask: "1111111111111111111111111111111111111111111111111111111111111111",
},
}
for _, tc := range tcases {
mask, _ := NewSocketMask()
mask.Fill()
if mask.String() != string(tc.filledMask) {
t.Errorf("Expected mask to be %v, got %v", tc.filledMask, mask)
}
}
}
func TestIsEmpty(t *testing.T) {
tcases := []struct {
name string
maskBit int
expectedEmpty bool
}{
{
name: "Check if mask is empty",
maskBit: int(0),
expectedEmpty: false,
},
}
for _, tc := range tcases {
mask, _ := NewSocketMask(tc.maskBit)
empty := mask.IsEmpty()
if empty {
t.Errorf("Expected value to be %v, got %v", tc.expectedEmpty, empty)
}
}
}
func TestIsSet(t *testing.T) {
tcases := []struct {
name string
maskBit int
expectedSet bool
}{
{
name: "Check if mask bit is set",
maskBit: int(0),
expectedSet: true,
},
}
for _, tc := range tcases {
mask, _ := NewSocketMask(tc.maskBit)
set := mask.IsSet(tc.maskBit)
if !set {
t.Errorf("Expected value to be %v, got %v", tc.expectedSet, set)
}
}
}
func TestIsEqual(t *testing.T) {
tcases := []struct {
name string
firstMaskBit int
secondMaskBit int
isEqual bool
}{
{
name: "And socket masks",
firstMaskBit: int(0),
secondMaskBit: int(0),
isEqual: true,
},
}
for _, tc := range tcases {
firstMask, _ := NewSocketMask(tc.firstMaskBit)
secondMask, _ := NewSocketMask(tc.secondMaskBit)
isEqual := firstMask.IsEqual(secondMask)
if !isEqual {
t.Errorf("Expected mask to be %v, got %v", tc.isEqual, isEqual)
}
}
}
func TestCount(t *testing.T) {
tcases := []struct {
name string
maskBit int
expectedCount int
}{
{
name: "Count number of bits set in full mask",
maskBit: 0,
expectedCount: 1,
},
}
for _, tc := range tcases {
mask, _ := NewSocketMask(tc.maskBit)
count := mask.Count()
if count != tc.expectedCount {
t.Errorf("Expected value to be %v, got %v", tc.expectedCount, count)
}
}
}
func TestGetSockets(t *testing.T) {
tcases := []struct {
name string
firstSocket int
secondSocket int
expectedSockets []int
}{
{
name: "Get number of each socket which has been set",
firstSocket: 0,
secondSocket: 1,
expectedSockets: []int{0, 1},
},
}
for _, tc := range tcases {
mask, _ := NewSocketMask(tc.firstSocket, tc.secondSocket)
sockets := mask.GetSockets()
if !reflect.DeepEqual(sockets, tc.expectedSockets) {
t.Errorf("Expected value to be %v, got %v", tc.expectedSockets, sockets)
}
}
}

View File

@ -26,4 +26,5 @@ const (
DefaultKubeletContainersDirName = "containers" DefaultKubeletContainersDirName = "containers"
DefaultKubeletPluginContainersDirName = "plugin-containers" DefaultKubeletPluginContainersDirName = "plugin-containers"
DefaultKubeletPodResourcesDirName = "pod-resources" DefaultKubeletPodResourcesDirName = "pod-resources"
KubeletPluginsDirSELinuxLabel = "system_u:object_r:container_file_t:s0"
) )

View File

@ -30,7 +30,6 @@ import (
dockercontainer "github.com/docker/docker/api/types/container" dockercontainer "github.com/docker/docker/api/types/container"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
"k8s.io/kubernetes/pkg/kubelet/kuberuntime"
) )
type containerCleanupInfo struct { type containerCleanupInfo struct {
@ -54,7 +53,7 @@ func (ds *dockerService) applyPlatformSpecificDockerConfig(request *runtimeapi.C
return cleanupInfo, nil return cleanupInfo, nil
} }
// applyGMSAConfig looks at the kuberuntime.GMSASpecContainerAnnotationKey container annotation; if present, // applyGMSAConfig looks at the container's .Windows.SecurityContext.GMSACredentialSpec field; if present,
// it copies its contents to a unique registry value, and sets a SecurityOpt on the config pointing to that registry value. // it copies its contents to a unique registry value, and sets a SecurityOpt on the config pointing to that registry value.
// We use registry values instead of files since their location cannot change - as opposed to credential spec files, // We use registry values instead of files since their location cannot change - as opposed to credential spec files,
// whose location could potentially change down the line, or even be unknown (eg if docker is not installed on the // whose location could potentially change down the line, or even be unknown (eg if docker is not installed on the
@ -63,7 +62,10 @@ func (ds *dockerService) applyPlatformSpecificDockerConfig(request *runtimeapi.C
// as it will avoid cluttering the registry - there is a moby PR out for this: // as it will avoid cluttering the registry - there is a moby PR out for this:
// https://github.com/moby/moby/pull/38777 // https://github.com/moby/moby/pull/38777
func applyGMSAConfig(config *runtimeapi.ContainerConfig, createConfig *dockertypes.ContainerCreateConfig, cleanupInfo *containerCleanupInfo) error { func applyGMSAConfig(config *runtimeapi.ContainerConfig, createConfig *dockertypes.ContainerCreateConfig, cleanupInfo *containerCleanupInfo) error {
credSpec := config.Annotations[kuberuntime.GMSASpecContainerAnnotationKey] var credSpec string
if config.Windows != nil && config.Windows.SecurityContext != nil {
credSpec = config.Windows.SecurityContext.CredentialSpec
}
if credSpec == "" { if credSpec == "" {
return nil return nil
} }

View File

@ -73,7 +73,11 @@ func TestApplyGMSAConfig(t *testing.T) {
expectedValueName := "k8s-cred-spec-" + expectedHex expectedValueName := "k8s-cred-spec-" + expectedHex
containerConfigWithGMSAAnnotation := &runtimeapi.ContainerConfig{ containerConfigWithGMSAAnnotation := &runtimeapi.ContainerConfig{
Annotations: map[string]string{"container.alpha.windows.kubernetes.io/gmsa-credential-spec": dummyCredSpec}, Windows: &runtimeapi.WindowsContainerConfig{
SecurityContext: &runtimeapi.WindowsContainerSecurityContext{
CredentialSpec: dummyCredSpec,
},
},
} }
t.Run("happy path", func(t *testing.T) { t.Run("happy path", func(t *testing.T) {

View File

@ -96,7 +96,7 @@ func (ds *dockerService) RunPodSandbox(ctx context.Context, r *runtimeapi.RunPod
} }
// Step 2: Create the sandbox container. // Step 2: Create the sandbox container.
if r.GetRuntimeHandler() != "" { if r.GetRuntimeHandler() != "" && r.GetRuntimeHandler() != runtimeName {
return nil, fmt.Errorf("RuntimeHandler %q not supported", r.GetRuntimeHandler()) return nil, fmt.Errorf("RuntimeHandler %q not supported", r.GetRuntimeHandler())
} }
createConfig, err := ds.makeSandboxDockerConfig(config, image) createConfig, err := ds.makeSandboxDockerConfig(config, image)

View File

@ -110,6 +110,7 @@ import (
"k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/mount"
nodeutil "k8s.io/kubernetes/pkg/util/node" nodeutil "k8s.io/kubernetes/pkg/util/node"
"k8s.io/kubernetes/pkg/util/oom" "k8s.io/kubernetes/pkg/util/oom"
"k8s.io/kubernetes/pkg/util/selinux"
"k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/csi" "k8s.io/kubernetes/pkg/volume/csi"
"k8s.io/kubernetes/pkg/volume/util/subpath" "k8s.io/kubernetes/pkg/volume/util/subpath"
@ -1047,7 +1048,7 @@ type Kubelet struct {
// as it takes time to gather all necessary node information. // as it takes time to gather all necessary node information.
nodeStatusUpdateFrequency time.Duration nodeStatusUpdateFrequency time.Duration
// nodeStatusUpdateFrequency is the frequency that kubelet posts node // nodeStatusReportFrequency is the frequency that kubelet posts node
// status to master. It is only used when node lease feature is enabled. // status to master. It is only used when node lease feature is enabled.
nodeStatusReportFrequency time.Duration nodeStatusReportFrequency time.Duration
@ -1222,6 +1223,8 @@ type Kubelet struct {
// 4. the pod-resources directory // 4. the pod-resources directory
func (kl *Kubelet) setupDataDirs() error { func (kl *Kubelet) setupDataDirs() error {
kl.rootDirectory = path.Clean(kl.rootDirectory) kl.rootDirectory = path.Clean(kl.rootDirectory)
pluginRegistrationDir := kl.getPluginsRegistrationDir()
pluginsDir := kl.getPluginsDir()
if err := os.MkdirAll(kl.getRootDir(), 0750); err != nil { if err := os.MkdirAll(kl.getRootDir(), 0750); err != nil {
return fmt.Errorf("error creating root directory: %v", err) return fmt.Errorf("error creating root directory: %v", err)
} }
@ -1240,6 +1243,16 @@ func (kl *Kubelet) setupDataDirs() error {
if err := os.MkdirAll(kl.getPodResourcesDir(), 0750); err != nil { if err := os.MkdirAll(kl.getPodResourcesDir(), 0750); err != nil {
return fmt.Errorf("error creating podresources directory: %v", err) return fmt.Errorf("error creating podresources directory: %v", err)
} }
if selinux.SELinuxEnabled() {
err := selinux.SetFileLabel(pluginRegistrationDir, config.KubeletPluginsDirSELinuxLabel)
if err != nil {
klog.Warningf("Unprivileged containerized plugins might not work. Could not set selinux context on %s: %v", pluginRegistrationDir, err)
}
err = selinux.SetFileLabel(pluginsDir, config.KubeletPluginsDirSELinuxLabel)
if err != nil {
klog.Warningf("Unprivileged containerized plugins might not work. Could not set selinux context on %s: %v", pluginsDir, err)
}
}
return nil return nil
} }
@ -1782,15 +1795,6 @@ func (kl *Kubelet) canRunPod(pod *v1.Pod) lifecycle.PodAdmitResult {
} }
} }
// TODO: Refactor as a soft admit handler.
if err := canRunPod(pod); err != nil {
return lifecycle.PodAdmitResult{
Admit: false,
Reason: "Forbidden",
Message: err.Error(),
}
}
return lifecycle.PodAdmitResult{Admit: true} return lifecycle.PodAdmitResult{Admit: true}
} }
@ -1841,7 +1845,7 @@ func (kl *Kubelet) syncLoop(updates <-chan kubetypes.PodUpdate, handler SyncHand
// 1. configCh: a channel to read config events from // 1. configCh: a channel to read config events from
// 2. handler: the SyncHandler to dispatch pods to // 2. handler: the SyncHandler to dispatch pods to
// 3. syncCh: a channel to read periodic sync events from // 3. syncCh: a channel to read periodic sync events from
// 4. houseKeepingCh: a channel to read housekeeping events from // 4. housekeepingCh: a channel to read housekeeping events from
// 5. plegCh: a channel to read PLEG updates from // 5. plegCh: a channel to read PLEG updates from
// //
// Events are also read from the kubelet liveness manager's update channel. // Events are also read from the kubelet liveness manager's update channel.
@ -1863,7 +1867,7 @@ func (kl *Kubelet) syncLoop(updates <-chan kubetypes.PodUpdate, handler SyncHand
// handler callback for the event type // handler callback for the event type
// * plegCh: update the runtime cache; sync pod // * plegCh: update the runtime cache; sync pod
// * syncCh: sync all pods waiting for sync // * syncCh: sync all pods waiting for sync
// * houseKeepingCh: trigger cleanup of pods // * housekeepingCh: trigger cleanup of pods
// * liveness manager: sync pods that have failed or in which one or more // * liveness manager: sync pods that have failed or in which one or more
// containers have failed liveness checks // containers have failed liveness checks
func (kl *Kubelet) syncLoopIteration(configCh <-chan kubetypes.PodUpdate, handler SyncHandler, func (kl *Kubelet) syncLoopIteration(configCh <-chan kubetypes.PodUpdate, handler SyncHandler,

View File

@ -159,6 +159,11 @@ func (kl *Kubelet) getPodResourcesDir() string {
return filepath.Join(kl.getRootDir(), config.DefaultKubeletPodResourcesDirName) return filepath.Join(kl.getRootDir(), config.DefaultKubeletPodResourcesDirName)
} }
// getPluginsDirSELinuxLabel returns the selinux label to be applied on plugin directories
func (kl *Kubelet) getPluginsDirSELinuxLabel() string {
return config.KubeletPluginsDirSELinuxLabel
}
// GetPods returns all pods bound to the kubelet and their spec, and the mirror // GetPods returns all pods bound to the kubelet and their spec, and the mirror
// pods. // pods.
func (kl *Kubelet) GetPods() []*v1.Pod { func (kl *Kubelet) GetPods() []*v1.Pod {

View File

@ -26,7 +26,7 @@ import (
"k8s.io/klog" "k8s.io/klog"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality" apiequality "k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
@ -132,12 +132,15 @@ func (kl *Kubelet) tryRegisterWithAPIServer(node *v1.Node) bool {
// Zeros out extended resource capacity during reconciliation. // Zeros out extended resource capacity during reconciliation.
func (kl *Kubelet) reconcileExtendedResource(initialNode, node *v1.Node) bool { func (kl *Kubelet) reconcileExtendedResource(initialNode, node *v1.Node) bool {
requiresUpdate := false requiresUpdate := false
for k := range node.Status.Capacity { // Check with the device manager to see if node has been recreated, in which case extended resources should be zeroed until they are available
if v1helper.IsExtendedResourceName(k) { if kl.containerManager.ShouldResetExtendedResourceCapacity() {
klog.Infof("Zero out resource %s capacity in existing node.", k) for k := range node.Status.Capacity {
node.Status.Capacity[k] = *resource.NewQuantity(int64(0), resource.DecimalSI) if v1helper.IsExtendedResourceName(k) {
node.Status.Allocatable[k] = *resource.NewQuantity(int64(0), resource.DecimalSI) klog.Infof("Zero out resource %s capacity in existing node.", k)
requiresUpdate = true node.Status.Capacity[k] = *resource.NewQuantity(int64(0), resource.DecimalSI)
node.Status.Allocatable[k] = *resource.NewQuantity(int64(0), resource.DecimalSI)
requiresUpdate = true
}
} }
} }
return requiresUpdate return requiresUpdate

View File

@ -31,7 +31,7 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
cadvisorapi "github.com/google/cadvisor/info/v1" cadvisorapi "github.com/google/cadvisor/info/v1"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality" apiequality "k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
@ -1737,17 +1737,21 @@ func TestUpdateDefaultLabels(t *testing.T) {
func TestReconcileExtendedResource(t *testing.T) { func TestReconcileExtendedResource(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
testKubelet.kubelet.kubeClient = nil // ensure only the heartbeat client is used testKubelet.kubelet.kubeClient = nil // ensure only the heartbeat client is used
testKubelet.kubelet.containerManager = cm.NewStubContainerManagerWithExtendedResource(true /* shouldResetExtendedResourceCapacity*/)
testKubeletNoReset := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
extendedResourceName1 := v1.ResourceName("test.com/resource1") extendedResourceName1 := v1.ResourceName("test.com/resource1")
extendedResourceName2 := v1.ResourceName("test.com/resource2") extendedResourceName2 := v1.ResourceName("test.com/resource2")
cases := []struct { cases := []struct {
name string name string
testKubelet *TestKubelet
existingNode *v1.Node existingNode *v1.Node
expectedNode *v1.Node expectedNode *v1.Node
needsUpdate bool needsUpdate bool
}{ }{
{ {
name: "no update needed without extended resource", name: "no update needed without extended resource",
testKubelet: testKubelet,
existingNode: &v1.Node{ existingNode: &v1.Node{
Status: v1.NodeStatus{ Status: v1.NodeStatus{
Capacity: v1.ResourceList{ Capacity: v1.ResourceList{
@ -1779,7 +1783,41 @@ func TestReconcileExtendedResource(t *testing.T) {
needsUpdate: false, needsUpdate: false,
}, },
{ {
name: "extended resource capacity is zeroed", name: "extended resource capacity is not zeroed due to presence of checkpoint file",
testKubelet: testKubelet,
existingNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
},
},
expectedNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
},
},
needsUpdate: false,
},
{
name: "extended resource capacity is zeroed",
testKubelet: testKubeletNoReset,
existingNode: &v1.Node{ existingNode: &v1.Node{
Status: v1.NodeStatus{ Status: v1.NodeStatus{
Capacity: v1.ResourceList{ Capacity: v1.ResourceList{

Some files were not shown because too many files have changed in this diff Show More