Merge branch 'master' into fluentd-1.1.0
This commit is contained in:
commit
ec187f729c
156
CHANGELOG-1.8.md
156
CHANGELOG-1.8.md
@ -1,52 +1,59 @@
|
||||
<!-- BEGIN MUNGE: GENERATED_TOC -->
|
||||
- [v1.8.6](#v186)
|
||||
- [Downloads for v1.8.6](#downloads-for-v186)
|
||||
- [v1.8.7](#v187)
|
||||
- [Downloads for v1.8.7](#downloads-for-v187)
|
||||
- [Client Binaries](#client-binaries)
|
||||
- [Server Binaries](#server-binaries)
|
||||
- [Node Binaries](#node-binaries)
|
||||
- [Changelog since v1.8.5](#changelog-since-v185)
|
||||
- [Changelog since v1.8.6](#changelog-since-v186)
|
||||
- [Other notable changes](#other-notable-changes)
|
||||
- [v1.8.5](#v185)
|
||||
- [Downloads for v1.8.5](#downloads-for-v185)
|
||||
- [v1.8.6](#v186)
|
||||
- [Downloads for v1.8.6](#downloads-for-v186)
|
||||
- [Client Binaries](#client-binaries-1)
|
||||
- [Server Binaries](#server-binaries-1)
|
||||
- [Node Binaries](#node-binaries-1)
|
||||
- [Changelog since v1.8.4](#changelog-since-v184)
|
||||
- [Changelog since v1.8.5](#changelog-since-v185)
|
||||
- [Other notable changes](#other-notable-changes-1)
|
||||
- [v1.8.4](#v184)
|
||||
- [Downloads for v1.8.4](#downloads-for-v184)
|
||||
- [v1.8.5](#v185)
|
||||
- [Downloads for v1.8.5](#downloads-for-v185)
|
||||
- [Client Binaries](#client-binaries-2)
|
||||
- [Server Binaries](#server-binaries-2)
|
||||
- [Node Binaries](#node-binaries-2)
|
||||
- [Changelog since v1.8.3](#changelog-since-v183)
|
||||
- [Changelog since v1.8.4](#changelog-since-v184)
|
||||
- [Other notable changes](#other-notable-changes-2)
|
||||
- [v1.8.3](#v183)
|
||||
- [Downloads for v1.8.3](#downloads-for-v183)
|
||||
- [v1.8.4](#v184)
|
||||
- [Downloads for v1.8.4](#downloads-for-v184)
|
||||
- [Client Binaries](#client-binaries-3)
|
||||
- [Server Binaries](#server-binaries-3)
|
||||
- [Node Binaries](#node-binaries-3)
|
||||
- [Changelog since v1.8.2](#changelog-since-v182)
|
||||
- [Changelog since v1.8.3](#changelog-since-v183)
|
||||
- [Other notable changes](#other-notable-changes-3)
|
||||
- [v1.8.2](#v182)
|
||||
- [Downloads for v1.8.2](#downloads-for-v182)
|
||||
- [v1.8.3](#v183)
|
||||
- [Downloads for v1.8.3](#downloads-for-v183)
|
||||
- [Client Binaries](#client-binaries-4)
|
||||
- [Server Binaries](#server-binaries-4)
|
||||
- [Node Binaries](#node-binaries-4)
|
||||
- [Changelog since v1.8.1](#changelog-since-v181)
|
||||
- [Changelog since v1.8.2](#changelog-since-v182)
|
||||
- [Other notable changes](#other-notable-changes-4)
|
||||
- [v1.8.1](#v181)
|
||||
- [Downloads for v1.8.1](#downloads-for-v181)
|
||||
- [v1.8.2](#v182)
|
||||
- [Downloads for v1.8.2](#downloads-for-v182)
|
||||
- [Client Binaries](#client-binaries-5)
|
||||
- [Server Binaries](#server-binaries-5)
|
||||
- [Node Binaries](#node-binaries-5)
|
||||
- [Changelog since v1.8.0](#changelog-since-v180)
|
||||
- [Action Required](#action-required)
|
||||
- [Changelog since v1.8.1](#changelog-since-v181)
|
||||
- [Other notable changes](#other-notable-changes-5)
|
||||
- [v1.8.0](#v180)
|
||||
- [Downloads for v1.8.0](#downloads-for-v180)
|
||||
- [v1.8.1](#v181)
|
||||
- [Downloads for v1.8.1](#downloads-for-v181)
|
||||
- [Client Binaries](#client-binaries-6)
|
||||
- [Server Binaries](#server-binaries-6)
|
||||
- [Node Binaries](#node-binaries-6)
|
||||
- [Changelog since v1.8.0](#changelog-since-v180)
|
||||
- [Action Required](#action-required)
|
||||
- [Other notable changes](#other-notable-changes-6)
|
||||
- [v1.8.0](#v180)
|
||||
- [Downloads for v1.8.0](#downloads-for-v180)
|
||||
- [Client Binaries](#client-binaries-7)
|
||||
- [Server Binaries](#server-binaries-7)
|
||||
- [Node Binaries](#node-binaries-7)
|
||||
- [Introduction to v1.8.0](#introduction-to-v180)
|
||||
- [Major Themes](#major-themes)
|
||||
- [SIG API Machinery](#sig-api-machinery)
|
||||
@ -107,49 +114,112 @@
|
||||
- [External Dependencies](#external-dependencies)
|
||||
- [v1.8.0-rc.1](#v180-rc1)
|
||||
- [Downloads for v1.8.0-rc.1](#downloads-for-v180-rc1)
|
||||
- [Client Binaries](#client-binaries-7)
|
||||
- [Server Binaries](#server-binaries-7)
|
||||
- [Node Binaries](#node-binaries-7)
|
||||
- [Changelog since v1.8.0-beta.1](#changelog-since-v180-beta1)
|
||||
- [Action Required](#action-required-1)
|
||||
- [Other notable changes](#other-notable-changes-6)
|
||||
- [v1.8.0-beta.1](#v180-beta1)
|
||||
- [Downloads for v1.8.0-beta.1](#downloads-for-v180-beta1)
|
||||
- [Client Binaries](#client-binaries-8)
|
||||
- [Server Binaries](#server-binaries-8)
|
||||
- [Node Binaries](#node-binaries-8)
|
||||
- [Changelog since v1.8.0-alpha.3](#changelog-since-v180-alpha3)
|
||||
- [Action Required](#action-required-2)
|
||||
- [Changelog since v1.8.0-beta.1](#changelog-since-v180-beta1)
|
||||
- [Action Required](#action-required-1)
|
||||
- [Other notable changes](#other-notable-changes-7)
|
||||
- [v1.8.0-alpha.3](#v180-alpha3)
|
||||
- [Downloads for v1.8.0-alpha.3](#downloads-for-v180-alpha3)
|
||||
- [v1.8.0-beta.1](#v180-beta1)
|
||||
- [Downloads for v1.8.0-beta.1](#downloads-for-v180-beta1)
|
||||
- [Client Binaries](#client-binaries-9)
|
||||
- [Server Binaries](#server-binaries-9)
|
||||
- [Node Binaries](#node-binaries-9)
|
||||
- [Changelog since v1.8.0-alpha.2](#changelog-since-v180-alpha2)
|
||||
- [Action Required](#action-required-3)
|
||||
- [Changelog since v1.8.0-alpha.3](#changelog-since-v180-alpha3)
|
||||
- [Action Required](#action-required-2)
|
||||
- [Other notable changes](#other-notable-changes-8)
|
||||
- [v1.8.0-alpha.2](#v180-alpha2)
|
||||
- [Downloads for v1.8.0-alpha.2](#downloads-for-v180-alpha2)
|
||||
- [v1.8.0-alpha.3](#v180-alpha3)
|
||||
- [Downloads for v1.8.0-alpha.3](#downloads-for-v180-alpha3)
|
||||
- [Client Binaries](#client-binaries-10)
|
||||
- [Server Binaries](#server-binaries-10)
|
||||
- [Node Binaries](#node-binaries-10)
|
||||
- [Changelog since v1.7.0](#changelog-since-v170)
|
||||
- [Action Required](#action-required-4)
|
||||
- [Changelog since v1.8.0-alpha.2](#changelog-since-v180-alpha2)
|
||||
- [Action Required](#action-required-3)
|
||||
- [Other notable changes](#other-notable-changes-9)
|
||||
- [v1.8.0-alpha.1](#v180-alpha1)
|
||||
- [Downloads for v1.8.0-alpha.1](#downloads-for-v180-alpha1)
|
||||
- [v1.8.0-alpha.2](#v180-alpha2)
|
||||
- [Downloads for v1.8.0-alpha.2](#downloads-for-v180-alpha2)
|
||||
- [Client Binaries](#client-binaries-11)
|
||||
- [Server Binaries](#server-binaries-11)
|
||||
- [Node Binaries](#node-binaries-11)
|
||||
- [Changelog since v1.7.0](#changelog-since-v170)
|
||||
- [Action Required](#action-required-4)
|
||||
- [Other notable changes](#other-notable-changes-10)
|
||||
- [v1.8.0-alpha.1](#v180-alpha1)
|
||||
- [Downloads for v1.8.0-alpha.1](#downloads-for-v180-alpha1)
|
||||
- [Client Binaries](#client-binaries-12)
|
||||
- [Server Binaries](#server-binaries-12)
|
||||
- [Node Binaries](#node-binaries-12)
|
||||
- [Changelog since v1.7.0-alpha.4](#changelog-since-v170-alpha4)
|
||||
- [Action Required](#action-required-5)
|
||||
- [Other notable changes](#other-notable-changes-10)
|
||||
- [Other notable changes](#other-notable-changes-11)
|
||||
<!-- END MUNGE: GENERATED_TOC -->
|
||||
|
||||
<!-- NEW RELEASE NOTES ENTRY -->
|
||||
|
||||
|
||||
# v1.8.7
|
||||
|
||||
[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.8/examples)
|
||||
|
||||
## Downloads for v1.8.7
|
||||
|
||||
|
||||
filename | sha256 hash
|
||||
-------- | -----------
|
||||
[kubernetes.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes.tar.gz) | `39389e6bc459e96af44dbca38697a14fa292a66e5d5b82cced2ed5cd321b3793`
|
||||
[kubernetes-src.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-src.tar.gz) | `9b9ecc3a6f4b5681038742744e70d1a89ce6fb829106118710df93ff9a69558b`
|
||||
|
||||
### Client Binaries
|
||||
|
||||
filename | sha256 hash
|
||||
-------- | -----------
|
||||
[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-client-darwin-386.tar.gz) | `4f5517d5c1a13921f818e76e7d9639744d166d9289196465f6811bfd6bebb7ee`
|
||||
[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-client-darwin-amd64.tar.gz) | `608a5a88fed518a378f4f30b2bb1743def2366eb99b11825123f9c6ec8117f5e`
|
||||
[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-client-linux-386.tar.gz) | `e4e13b177f313050a68f17793eaf314c53501f7b5225aaa6a5da516ac46b6726`
|
||||
[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-client-linux-amd64.tar.gz) | `b5bd43f15fb091959fd6b4cff739b24da3194d26ed598d512adbd4b59d6a0eaa`
|
||||
[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-client-linux-arm.tar.gz) | `0856ad62860ecedc327cb5162617c4cd3af3f40cd8308fccf0491259da5e5199`
|
||||
[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-client-linux-arm64.tar.gz) | `8c5afcb917fff4c9e927609580cb211d7daa6b7c40b2e4d67766df65b47c9883`
|
||||
[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-client-linux-ppc64le.tar.gz) | `3380e8a50330efa8e626c65ccc5dadcd79c6acacfadb00bb0845271eaf6091b1`
|
||||
[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-client-linux-s390x.tar.gz) | `1ba97be9f269579c2b004a898036a4d4acb7f12455c1bf43d6ab4cd7cb6e1718`
|
||||
[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-client-windows-386.tar.gz) | `1c7718117647e0940e007e1383b20ca438068fc74e42eb017529c6e7ec0c5bfa`
|
||||
[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-client-windows-amd64.tar.gz) | `a962223bd349b58f85e86b91d559a3a55ffa48c17322ccc3cf35cf215b5f8633`
|
||||
|
||||
### Server Binaries
|
||||
|
||||
filename | sha256 hash
|
||||
-------- | -----------
|
||||
[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-server-linux-amd64.tar.gz) | `ea3df45a3cd573ba7d1a6d7fcddaf9a2812243560d591f7ba6a497f0467b18b8`
|
||||
[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-server-linux-arm.tar.gz) | `8e4a67569e4182ffe623419b9a16d078f3a3f48f592993e83f25cc08fefd4b3d`
|
||||
[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-server-linux-arm64.tar.gz) | `1fca5b099a180a733cad9a382604d69b9b1a63a4b2bbd40e32d54871f3f06489`
|
||||
[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-server-linux-ppc64le.tar.gz) | `9233ed62830b505abebf6d0c120a9aa1a3eb1fe70cd7750d60552ca9ec0e4f7d`
|
||||
[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-server-linux-s390x.tar.gz) | `2ec3385847af78e66b18b1fcf9de7c75c4af26f44c07dfbb37d5d793578a7595`
|
||||
|
||||
### Node Binaries
|
||||
|
||||
filename | sha256 hash
|
||||
-------- | -----------
|
||||
[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-node-linux-amd64.tar.gz) | `79ee543a9c2636f1491715739c3c54cb70ae5b215fe5ce3345e6ff92759ace72`
|
||||
[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-node-linux-arm.tar.gz) | `60c40066bd1b9a6996371a47d1113a7ef30295e9ea37f738cd7ce86cda380516`
|
||||
[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-node-linux-arm64.tar.gz) | `92ee26c0bbb0d016122c38831903ee82d83c33b289463b9f4dc3481e5c096f9c`
|
||||
[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-node-linux-ppc64le.tar.gz) | `965ddb5e7c54975aa5ce35507317f9738db34f799c67e4fc625e150aac7f5c38`
|
||||
[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-node-linux-s390x.tar.gz) | `5e71d983830ab11aff065fe872bea9e9cfc663d62cd9480b4085a2d1bbf8ca95`
|
||||
[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-node-windows-amd64.tar.gz) | `6f364309fd9dc34f7c7bc13d279499fd7c434ce5cfab379f0e9848e5fab497e0`
|
||||
|
||||
## Changelog since v1.8.6
|
||||
|
||||
### Other notable changes
|
||||
|
||||
* fix device name change issue for azure disk: add remount logic ([#57953](https://github.com/kubernetes/kubernetes/pull/57953), [@andyzhangx](https://github.com/andyzhangx))
|
||||
* GCE: Allows existing internal load balancers to continue using an outdated subnetwork ([#57861](https://github.com/kubernetes/kubernetes/pull/57861), [@nicksardo](https://github.com/nicksardo))
|
||||
* fix azure disk not available issue when device name changed ([#57549](https://github.com/kubernetes/kubernetes/pull/57549), [@andyzhangx](https://github.com/andyzhangx))
|
||||
* Allow kubernetes components to react to SIGTERM signal and shutdown gracefully. ([#57756](https://github.com/kubernetes/kubernetes/pull/57756), [@mborsz](https://github.com/mborsz))
|
||||
* fix incorrect error info when creating an azure file PVC failed ([#56550](https://github.com/kubernetes/kubernetes/pull/56550), [@andyzhangx](https://github.com/andyzhangx))
|
||||
* GCE: Fixes ILB creation on automatic networks with manually created subnetworks. ([#57351](https://github.com/kubernetes/kubernetes/pull/57351), [@nicksardo](https://github.com/nicksardo))
|
||||
* Configurable liveness probe initial delays for etcd and kube-apiserver in GCE ([#57749](https://github.com/kubernetes/kubernetes/pull/57749), [@wojtek-t](https://github.com/wojtek-t))
|
||||
* Fixes a bug where if an error was returned that was not an `autorest.DetailedError` we would return `"not found", nil` which caused nodes to go to `NotReady` state. ([#57484](https://github.com/kubernetes/kubernetes/pull/57484), [@brendandburns](https://github.com/brendandburns))
|
||||
|
||||
|
||||
|
||||
# v1.8.6
|
||||
|
||||
[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.8/examples)
|
||||
@ -807,7 +877,7 @@ Consider the following changes, limitations, and guidelines before you upgrade:
|
||||
|
||||
* Advanced auditing is the default auditing mechanism at `v1beta1`. The new version introduces the following changes:
|
||||
|
||||
* The `--audit-policy-file` option is required if the `AdvancedAudit` feature is not explicitly turned off (`--feature-gates=AdvancedAudit=false`) on the API server.
|
||||
* The `--audit-policy-file` option is required if the `AdvancedAuditing` feature is not explicitly turned off (`--feature-gates=AdvancedAuditing=false`) on the API server.
|
||||
* The audit log file defaults to JSON encoding when using the advanced auditing feature gate.
|
||||
* An audit policy file without either an `apiVersion` or a `kind` field may be treated as invalid.
|
||||
* The webhook and log file now output the `v1beta1` event format.
|
||||
|
172
CHANGELOG-1.9.md
172
CHANGELOG-1.9.md
@ -1,16 +1,23 @@
|
||||
<!-- BEGIN MUNGE: GENERATED_TOC -->
|
||||
- [v1.9.1](#v191)
|
||||
- [Downloads for v1.9.1](#downloads-for-v191)
|
||||
- [v1.9.2](#v192)
|
||||
- [Downloads for v1.9.2](#downloads-for-v192)
|
||||
- [Client Binaries](#client-binaries)
|
||||
- [Server Binaries](#server-binaries)
|
||||
- [Node Binaries](#node-binaries)
|
||||
- [Changelog since v1.9.0](#changelog-since-v190)
|
||||
- [Changelog since v1.9.1](#changelog-since-v191)
|
||||
- [Other notable changes](#other-notable-changes)
|
||||
- [v1.9.0](#v190)
|
||||
- [Downloads for v1.9.0](#downloads-for-v190)
|
||||
- [v1.9.1](#v191)
|
||||
- [Downloads for v1.9.1](#downloads-for-v191)
|
||||
- [Client Binaries](#client-binaries-1)
|
||||
- [Server Binaries](#server-binaries-1)
|
||||
- [Node Binaries](#node-binaries-1)
|
||||
- [Changelog since v1.9.0](#changelog-since-v190)
|
||||
- [Other notable changes](#other-notable-changes-1)
|
||||
- [v1.9.0](#v190)
|
||||
- [Downloads for v1.9.0](#downloads-for-v190)
|
||||
- [Client Binaries](#client-binaries-2)
|
||||
- [Server Binaries](#server-binaries-2)
|
||||
- [Node Binaries](#node-binaries-2)
|
||||
- [1.9 Release Notes](#19-release-notes)
|
||||
- [WARNING: etcd backup strongly recommended](#warning-etcd-backup-strongly-recommended)
|
||||
- [Introduction to 1.9.0](#introduction-to-190)
|
||||
@ -98,48 +105,117 @@
|
||||
- [External Dependencies](#external-dependencies)
|
||||
- [v1.9.0-beta.2](#v190-beta2)
|
||||
- [Downloads for v1.9.0-beta.2](#downloads-for-v190-beta2)
|
||||
- [Client Binaries](#client-binaries-2)
|
||||
- [Server Binaries](#server-binaries-2)
|
||||
- [Node Binaries](#node-binaries-2)
|
||||
- [Changelog since v1.9.0-beta.1](#changelog-since-v190-beta1)
|
||||
- [Other notable changes](#other-notable-changes-1)
|
||||
- [v1.9.0-beta.1](#v190-beta1)
|
||||
- [Downloads for v1.9.0-beta.1](#downloads-for-v190-beta1)
|
||||
- [Client Binaries](#client-binaries-3)
|
||||
- [Server Binaries](#server-binaries-3)
|
||||
- [Node Binaries](#node-binaries-3)
|
||||
- [Changelog since v1.9.0-alpha.3](#changelog-since-v190-alpha3)
|
||||
- [Action Required](#action-required)
|
||||
- [Changelog since v1.9.0-beta.1](#changelog-since-v190-beta1)
|
||||
- [Other notable changes](#other-notable-changes-2)
|
||||
- [v1.9.0-alpha.3](#v190-alpha3)
|
||||
- [Downloads for v1.9.0-alpha.3](#downloads-for-v190-alpha3)
|
||||
- [v1.9.0-beta.1](#v190-beta1)
|
||||
- [Downloads for v1.9.0-beta.1](#downloads-for-v190-beta1)
|
||||
- [Client Binaries](#client-binaries-4)
|
||||
- [Server Binaries](#server-binaries-4)
|
||||
- [Node Binaries](#node-binaries-4)
|
||||
- [Changelog since v1.9.0-alpha.2](#changelog-since-v190-alpha2)
|
||||
- [Action Required](#action-required-1)
|
||||
- [Changelog since v1.9.0-alpha.3](#changelog-since-v190-alpha3)
|
||||
- [Action Required](#action-required)
|
||||
- [Other notable changes](#other-notable-changes-3)
|
||||
- [v1.9.0-alpha.2](#v190-alpha2)
|
||||
- [Downloads for v1.9.0-alpha.2](#downloads-for-v190-alpha2)
|
||||
- [v1.9.0-alpha.3](#v190-alpha3)
|
||||
- [Downloads for v1.9.0-alpha.3](#downloads-for-v190-alpha3)
|
||||
- [Client Binaries](#client-binaries-5)
|
||||
- [Server Binaries](#server-binaries-5)
|
||||
- [Node Binaries](#node-binaries-5)
|
||||
- [Changelog since v1.8.0](#changelog-since-v180)
|
||||
- [Action Required](#action-required-2)
|
||||
- [Changelog since v1.9.0-alpha.2](#changelog-since-v190-alpha2)
|
||||
- [Action Required](#action-required-1)
|
||||
- [Other notable changes](#other-notable-changes-4)
|
||||
- [v1.9.0-alpha.1](#v190-alpha1)
|
||||
- [Downloads for v1.9.0-alpha.1](#downloads-for-v190-alpha1)
|
||||
- [v1.9.0-alpha.2](#v190-alpha2)
|
||||
- [Downloads for v1.9.0-alpha.2](#downloads-for-v190-alpha2)
|
||||
- [Client Binaries](#client-binaries-6)
|
||||
- [Server Binaries](#server-binaries-6)
|
||||
- [Node Binaries](#node-binaries-6)
|
||||
- [Changelog since v1.8.0](#changelog-since-v180)
|
||||
- [Action Required](#action-required-2)
|
||||
- [Other notable changes](#other-notable-changes-5)
|
||||
- [v1.9.0-alpha.1](#v190-alpha1)
|
||||
- [Downloads for v1.9.0-alpha.1](#downloads-for-v190-alpha1)
|
||||
- [Client Binaries](#client-binaries-7)
|
||||
- [Server Binaries](#server-binaries-7)
|
||||
- [Node Binaries](#node-binaries-7)
|
||||
- [Changelog since v1.8.0-alpha.3](#changelog-since-v180-alpha3)
|
||||
- [Action Required](#action-required-3)
|
||||
- [Other notable changes](#other-notable-changes-5)
|
||||
- [Other notable changes](#other-notable-changes-6)
|
||||
<!-- END MUNGE: GENERATED_TOC -->
|
||||
|
||||
<!-- NEW RELEASE NOTES ENTRY -->
|
||||
|
||||
|
||||
# v1.9.2
|
||||
|
||||
[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.9/examples)
|
||||
|
||||
## Downloads for v1.9.2
|
||||
|
||||
|
||||
filename | sha256 hash
|
||||
-------- | -----------
|
||||
[kubernetes.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes.tar.gz) | `7a922d49b1194cb1b59b22cecb4eb1197f7c37250d4326410dc71aa5dc5ec8a2`
|
||||
[kubernetes-src.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-src.tar.gz) | `9f128809cdd442d71a13f7c61c7a0e03e832cf0c068a86184c1bcc9acdb78872`
|
||||
|
||||
### Client Binaries
|
||||
|
||||
filename | sha256 hash
|
||||
-------- | -----------
|
||||
[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-client-darwin-386.tar.gz) | `37d2dd1b1762f1040699584736bbc1a2392e94779a19061d477786bcce3d3f01`
|
||||
[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-client-darwin-amd64.tar.gz) | `42adc9762b30bfd3648323f9a8f350efeedec08a901997073f6d4244f7a16f78`
|
||||
[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-client-linux-386.tar.gz) | `5dde6c6388353376aaa0bd731b0366d9d2d11baee3746662b008e09d9618d55f`
|
||||
[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-client-linux-amd64.tar.gz) | `c45cf9e9d27b9d1bfc6d26f86856271fec6f8e7007f014597d27668f72f8c349`
|
||||
[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-client-linux-arm64.tar.gz) | `05c3810b00adcdbf7bc67671847f11e287da72f308cc704e5679e83564236fee`
|
||||
[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-client-linux-arm.tar.gz) | `a9421d4627eb9eaa1e46cfd4276943e25b5b80e52db6945f173a2a45782ce42d`
|
||||
[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-client-linux-ppc64le.tar.gz) | `adc345ab050e09a3069a47e862c0ce88630a586905b33f6e5fd339005ceffbbf`
|
||||
[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-client-linux-s390x.tar.gz) | `fdff4b462e67569a4a1110b696d8af2c563e0a19e50a58a7b1a4346942b07993`
|
||||
[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-client-windows-386.tar.gz) | `1a82e8e4213153993a6e86e74120f62f95645952b223ed8586316358dd22a225`
|
||||
[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-client-windows-amd64.tar.gz) | `a8648d4d3e0f85597bd57de87459a040ceab4c073d647027a70b0fba8862eab3`
|
||||
|
||||
### Server Binaries
|
||||
|
||||
filename | sha256 hash
|
||||
-------- | -----------
|
||||
[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-server-linux-amd64.tar.gz) | `2218fe0b939273b57ce00c7d5f3f7d2c34ebde5ae500ba2646eea6ba26c7c63d`
|
||||
[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-server-linux-arm64.tar.gz) | `3b4bc6cf91c3eaf37ef2b361dd77e838f0a8ca2b8cbb4dd42793c1fea5186b69`
|
||||
[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-server-linux-arm.tar.gz) | `73e77da0ddc951f791b5f7b73420ba0dbb141b3637cc48b4e916a41249e40ce3`
|
||||
[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-server-linux-ppc64le.tar.gz) | `860ba4ac773e4aff69dde781cac7ac1fb1824f2158155dfa49c50dd3acf0ab82`
|
||||
[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-server-linux-s390x.tar.gz) | `19e0fd7863e217b4cb67f91b56ceb5939ae677f523681bdf8ccac174f36f576d`
|
||||
|
||||
### Node Binaries
|
||||
|
||||
filename | sha256 hash
|
||||
-------- | -----------
|
||||
[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-node-linux-amd64.tar.gz) | `f86b7038dc89d79b277c5fba499f391c25f5aba8f5caa3119c05065f9917b6f9`
|
||||
[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-node-linux-arm64.tar.gz) | `87f40c37a3e359a9350a3bcbe0e27ad6e7dfa0d8ee5f6d2ecf061813423ffa73`
|
||||
[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-node-linux-arm.tar.gz) | `b73d879a03e7eba5543af0b56085ebb4919d401f6a06d4803517ddf606e8240e`
|
||||
[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-node-linux-ppc64le.tar.gz) | `26331e5d84d98fc3a94d2d55fd411159b2a79b6083758cea1dac36a0a4a44336`
|
||||
[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-node-linux-s390x.tar.gz) | `cbf52f3942965bb659d1f0f624e09ff01b2ee9f6e6217b3876c41600e1d4c711`
|
||||
[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-node-windows-amd64.tar.gz) | `70d59046a7c949d4fd4850ee57b1cd44dddfb041c548a21354ee30d7bfb1003d`
|
||||
|
||||
## Changelog since v1.9.1
|
||||
|
||||
### Other notable changes
|
||||
|
||||
* Fixes authentication problem faced during various vSphere operations. ([#57978](https://github.com/kubernetes/kubernetes/pull/57978), [@prashima](https://github.com/prashima))
|
||||
* The getSubnetIDForLB() should return subnet id rather than net id. ([#58208](https://github.com/kubernetes/kubernetes/pull/58208), [@FengyunPan](https://github.com/FengyunPan))
|
||||
* Add cache for VM get operation in azure cloud provider ([#57432](https://github.com/kubernetes/kubernetes/pull/57432), [@karataliu](https://github.com/karataliu))
|
||||
* Update kube-dns to Version 1.14.8 that includes only small changes to how Prometheus metrics are collected. ([#57918](https://github.com/kubernetes/kubernetes/pull/57918), [@rramkumar1](https://github.com/rramkumar1))
|
||||
* Fixes a possible deadlock preventing quota from being recalculated ([#58107](https://github.com/kubernetes/kubernetes/pull/58107), [@ironcladlou](https://github.com/ironcladlou))
|
||||
* Fixes a bug in Heapster deployment for google sink. ([#57902](https://github.com/kubernetes/kubernetes/pull/57902), [@kawych](https://github.com/kawych))
|
||||
* GCE: Allows existing internal load balancers to continue using an outdated subnetwork ([#57861](https://github.com/kubernetes/kubernetes/pull/57861), [@nicksardo](https://github.com/nicksardo))
|
||||
* Update etcd version to 3.1.11 ([#57811](https://github.com/kubernetes/kubernetes/pull/57811), [@xiangpengzhao](https://github.com/xiangpengzhao))
|
||||
* fix device name change issue for azure disk: add remount logic ([#57953](https://github.com/kubernetes/kubernetes/pull/57953), [@andyzhangx](https://github.com/andyzhangx))
|
||||
* calico-node addon tolerates all NoExecute and NoSchedule taints by default. ([#57122](https://github.com/kubernetes/kubernetes/pull/57122), [@caseydavenport](https://github.com/caseydavenport))
|
||||
* Allow kubernetes components to react to SIGTERM signal and shutdown gracefully. ([#57756](https://github.com/kubernetes/kubernetes/pull/57756), [@mborsz](https://github.com/mborsz))
|
||||
* Fixes controller manager crash in certain vSphere cloud provider environment. ([#57286](https://github.com/kubernetes/kubernetes/pull/57286), [@rohitjogvmw](https://github.com/rohitjogvmw))
|
||||
* fix azure disk not available issue when device name changed ([#57549](https://github.com/kubernetes/kubernetes/pull/57549), [@andyzhangx](https://github.com/andyzhangx))
|
||||
* GCE: support passing kube-scheduler policy config via SCHEDULER_POLICY_CONFIG ([#57425](https://github.com/kubernetes/kubernetes/pull/57425), [@yguo0905](https://github.com/yguo0905))
|
||||
|
||||
|
||||
|
||||
# v1.9.1
|
||||
|
||||
[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.9/examples)
|
||||
@ -149,44 +225,44 @@
|
||||
|
||||
filename | sha256 hash
|
||||
-------- | -----------
|
||||
[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes.tar.gz) | `0eece0e6c1f68535ea71b58b87e239019bb57fdd61118f3d7defa6bbf4fad5ee`
|
||||
[kubernetes-src.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-src.tar.gz) | `625ebb79412bd12feccf12e8b6a15d9c71ea681b571f34deaa59fe6c9ba55935`
|
||||
[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes.tar.gz) | `0eece0e6c1f68535ea71b58b87e239019bb57fdd61118f3d7defa6bbf4fad5ee`
|
||||
[kubernetes-src.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-src.tar.gz) | `625ebb79412bd12feccf12e8b6a15d9c71ea681b571f34deaa59fe6c9ba55935`
|
||||
|
||||
### Client Binaries
|
||||
|
||||
filename | sha256 hash
|
||||
-------- | -----------
|
||||
[kubernetes-client-darwin-386.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-client-darwin-386.tar.gz) | `909556ed9b8445703d0124f2d8c1901b00afaba63a9123a4296be8663c3a2b2d`
|
||||
[kubernetes-client-darwin-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-client-darwin-amd64.tar.gz) | `71e191d99d3ac1426e23e087b8d0875e793e5615d3aa7ac1e175b250f9707c48`
|
||||
[kubernetes-client-linux-386.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-client-linux-386.tar.gz) | `1c4e60c0c056a3300c7fcc9faccd1b1ea2b337e1360c20c5b1c25fdc47923cf0`
|
||||
[kubernetes-client-linux-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-client-linux-amd64.tar.gz) | `fe8fe40148df404b33069931ea30937699758ed4611ef6baddb4c21b7b19db5e`
|
||||
[kubernetes-client-linux-arm64.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-client-linux-arm64.tar.gz) | `921f5711b97f0b4de69784d9c79f95e80f75a550f28fc1f26597aa0ef6faa471`
|
||||
[kubernetes-client-linux-arm.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-client-linux-arm.tar.gz) | `77b010cadef98dc832a2f560afe15e57a675ed9fbc59ffad5e19878510997874`
|
||||
[kubernetes-client-linux-ppc64le.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-client-linux-ppc64le.tar.gz) | `02aa71ddcbe8b711814af7287aac79de5d99c1c143c0d3af5e14b1ff195b8bdc`
|
||||
[kubernetes-client-linux-s390x.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-client-linux-s390x.tar.gz) | `7e315024267306a620045d003785ecc8d7f2e763a6108ae806d5d384aa7552cc`
|
||||
[kubernetes-client-windows-386.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-client-windows-386.tar.gz) | `99b2a81b7876498e119db4cb34c434b3790bc41cd882384037c1c1b18cba9f99`
|
||||
[kubernetes-client-windows-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-client-windows-amd64.tar.gz) | `d89d303cbbf9e57e5a540277158e4d83ad18ca7402b5b54665f1378bb4528599`
|
||||
[kubernetes-client-darwin-386.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-client-darwin-386.tar.gz) | `909556ed9b8445703d0124f2d8c1901b00afaba63a9123a4296be8663c3a2b2d`
|
||||
[kubernetes-client-darwin-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-client-darwin-amd64.tar.gz) | `71e191d99d3ac1426e23e087b8d0875e793e5615d3aa7ac1e175b250f9707c48`
|
||||
[kubernetes-client-linux-386.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-client-linux-386.tar.gz) | `1c4e60c0c056a3300c7fcc9faccd1b1ea2b337e1360c20c5b1c25fdc47923cf0`
|
||||
[kubernetes-client-linux-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-client-linux-amd64.tar.gz) | `fe8fe40148df404b33069931ea30937699758ed4611ef6baddb4c21b7b19db5e`
|
||||
[kubernetes-client-linux-arm64.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-client-linux-arm64.tar.gz) | `921f5711b97f0b4de69784d9c79f95e80f75a550f28fc1f26597aa0ef6faa471`
|
||||
[kubernetes-client-linux-arm.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-client-linux-arm.tar.gz) | `77b010cadef98dc832a2f560afe15e57a675ed9fbc59ffad5e19878510997874`
|
||||
[kubernetes-client-linux-ppc64le.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-client-linux-ppc64le.tar.gz) | `02aa71ddcbe8b711814af7287aac79de5d99c1c143c0d3af5e14b1ff195b8bdc`
|
||||
[kubernetes-client-linux-s390x.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-client-linux-s390x.tar.gz) | `7e315024267306a620045d003785ecc8d7f2e763a6108ae806d5d384aa7552cc`
|
||||
[kubernetes-client-windows-386.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-client-windows-386.tar.gz) | `99b2a81b7876498e119db4cb34c434b3790bc41cd882384037c1c1b18cba9f99`
|
||||
[kubernetes-client-windows-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-client-windows-amd64.tar.gz) | `d89d303cbbf9e57e5a540277158e4d83ad18ca7402b5b54665f1378bb4528599`
|
||||
|
||||
### Server Binaries
|
||||
|
||||
filename | sha256 hash
|
||||
-------- | -----------
|
||||
[kubernetes-server-linux-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-server-linux-amd64.tar.gz) | `5acf2527461419ba883ac352f7c36c3fa0b86a618dbede187054ad90fa233b0e`
|
||||
[kubernetes-server-linux-arm64.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-server-linux-arm64.tar.gz) | `e1f61b4dc6e0c9986e95ec25f876f9a89966215ee8cc7f4a3539ec391b217587`
|
||||
[kubernetes-server-linux-arm.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-server-linux-arm.tar.gz) | `441c45e16e63e9bdf99887a896a99b3a376af778cb778cc1d0e6afc505237200`
|
||||
[kubernetes-server-linux-ppc64le.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-server-linux-ppc64le.tar.gz) | `c0175f02180d9c88028ee5ad4e3ea04af8a6741a97f4900b02615f7f83c4d1c5`
|
||||
[kubernetes-server-linux-s390x.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-server-linux-s390x.tar.gz) | `2178150d31197ad7f59d44ffea37d682c2675b3a4ea2fc3fa1eaa0e768b993f7`
|
||||
[kubernetes-server-linux-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-server-linux-amd64.tar.gz) | `5acf2527461419ba883ac352f7c36c3fa0b86a618dbede187054ad90fa233b0e`
|
||||
[kubernetes-server-linux-arm64.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-server-linux-arm64.tar.gz) | `e1f61b4dc6e0c9986e95ec25f876f9a89966215ee8cc7f4a3539ec391b217587`
|
||||
[kubernetes-server-linux-arm.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-server-linux-arm.tar.gz) | `441c45e16e63e9bdf99887a896a99b3a376af778cb778cc1d0e6afc505237200`
|
||||
[kubernetes-server-linux-ppc64le.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-server-linux-ppc64le.tar.gz) | `c0175f02180d9c88028ee5ad4e3ea04af8a6741a97f4900b02615f7f83c4d1c5`
|
||||
[kubernetes-server-linux-s390x.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-server-linux-s390x.tar.gz) | `2178150d31197ad7f59d44ffea37d682c2675b3a4ea2fc3fa1eaa0e768b993f7`
|
||||
|
||||
### Node Binaries
|
||||
|
||||
filename | sha256 hash
|
||||
-------- | -----------
|
||||
[kubernetes-node-linux-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-node-linux-amd64.tar.gz) | `b8ff0ae693ecca4d55669c66786d6c585f8c77b41a270d65f8175eba8729663a`
|
||||
[kubernetes-node-linux-arm64.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-node-linux-arm64.tar.gz) | `f0f63baaace463dc663c98cbc9a41e52233d1ef33410571ce3f3e78bd485787e`
|
||||
[kubernetes-node-linux-arm.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-node-linux-arm.tar.gz) | `554bdd11deaf390de85830c7c888dfd4d75d9de8ac147799df12993f27bde905`
|
||||
[kubernetes-node-linux-ppc64le.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-node-linux-ppc64le.tar.gz) | `913af8ca8b258930e76fd3368acc83608e36e7e270638fa01a6e3be4f682d8bd`
|
||||
[kubernetes-node-linux-s390x.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-node-linux-s390x.tar.gz) | `8192c1c80563230d727fab71514105571afa52cde8520b3d90af58e6daf0e19c`
|
||||
[kubernetes-node-windows-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-node-windows-amd64.tar.gz) | `4408e6d741c6008044584c0d7235e608c596e836d51346ee773589d9b4589fdc`
|
||||
[kubernetes-node-linux-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-node-linux-amd64.tar.gz) | `b8ff0ae693ecca4d55669c66786d6c585f8c77b41a270d65f8175eba8729663a`
|
||||
[kubernetes-node-linux-arm64.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-node-linux-arm64.tar.gz) | `f0f63baaace463dc663c98cbc9a41e52233d1ef33410571ce3f3e78bd485787e`
|
||||
[kubernetes-node-linux-arm.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-node-linux-arm.tar.gz) | `554bdd11deaf390de85830c7c888dfd4d75d9de8ac147799df12993f27bde905`
|
||||
[kubernetes-node-linux-ppc64le.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-node-linux-ppc64le.tar.gz) | `913af8ca8b258930e76fd3368acc83608e36e7e270638fa01a6e3be4f682d8bd`
|
||||
[kubernetes-node-linux-s390x.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-node-linux-s390x.tar.gz) | `8192c1c80563230d727fab71514105571afa52cde8520b3d90af58e6daf0e19c`
|
||||
[kubernetes-node-windows-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-node-windows-amd64.tar.gz) | `4408e6d741c6008044584c0d7235e608c596e836d51346ee773589d9b4589fdc`
|
||||
|
||||
## Changelog since v1.9.0
|
||||
|
||||
|
377
Godeps/Godeps.json
generated
377
Godeps/Godeps.json
generated
@ -437,8 +437,13 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/container-storage-interface/spec/lib/go/csi",
|
||||
"Comment": "v0.1.0",
|
||||
"Rev": "9e88e4bfabeca1b8e4810555815f112159292ada"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/console",
|
||||
"Rev": "84eeaae905fa414d03e07bcd6c8d3f19e7cf180e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/api/services/containers/v1",
|
||||
"Comment": "v1.0.0-beta.2-159-g27d450a",
|
||||
@ -968,6 +973,11 @@
|
||||
"Comment": "v1.0.4",
|
||||
"Rev": "71acacd42f85e5e82f70a55327789582a5200a90"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/cyphar/filepath-securejoin",
|
||||
"Comment": "v0.2.1-1-gae69057",
|
||||
"Rev": "ae69057f2299fb9e5ba2df738607e6a505b74ab6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/d2g/dhcp4",
|
||||
"Rev": "a1d1b6c41b1ce8a71a5121a9cee31809c4707d9c"
|
||||
@ -1119,11 +1129,6 @@
|
||||
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f",
|
||||
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/symlink",
|
||||
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f",
|
||||
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/system",
|
||||
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f",
|
||||
@ -1475,218 +1480,218 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/accelerators",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/api",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/cache/memory",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/client/v2",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/collector",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/container",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/container/common",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/container/containerd",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/container/crio",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/container/docker",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/container/libcontainer",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/container/raw",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/container/rkt",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/container/systemd",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/devicemapper",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/events",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/fs",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/healthz",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/http",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/http/mux",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/info/v1",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/info/v2",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/machine",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/manager",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/manager/watcher",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/manager/watcher/raw",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/manager/watcher/rkt",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/metrics",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/pages",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/pages/static",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/storage",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/summary",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/utils",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/utils/cloudinfo",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/utils/cpuload",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/utils/cpuload/netlink",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/utils/docker",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/utils/oomparser",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/utils/sysfs",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/utils/sysinfo",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/validate",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/version",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/zfs",
|
||||
"Comment": "v0.28.3",
|
||||
"Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d"
|
||||
"Comment": "v0.24.0-alpha1-322-g13d955d",
|
||||
"Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/certificate-transparency/go",
|
||||
@ -1712,6 +1717,11 @@
|
||||
"ImportPath": "github.com/google/gofuzz",
|
||||
"Rev": "44d81051d367757e1c7c6a5a86423ece9afcf63c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/uuid",
|
||||
"Comment": "0.2-15-g8c31c18",
|
||||
"Rev": "8c31c18f31ede9fc8eae72290a7e7a8064e9b3e3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/googleapis/gnostic/OpenAPIv2",
|
||||
"Rev": "0c5108395e2debce0d731cf0287ddf7242066aba"
|
||||
@ -2339,78 +2349,83 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer",
|
||||
"Comment": "v1.0.0-rc4-50-g4d6e672",
|
||||
"Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120"
|
||||
"Comment": "v1.0.0-rc4-197-gd5b4a3e",
|
||||
"Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/apparmor",
|
||||
"Comment": "v1.0.0-rc4-50-g4d6e672",
|
||||
"Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120"
|
||||
"Comment": "v1.0.0-rc4-197-gd5b4a3e",
|
||||
"Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups",
|
||||
"Comment": "v1.0.0-rc4-50-g4d6e672",
|
||||
"Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120"
|
||||
"Comment": "v1.0.0-rc4-197-gd5b4a3e",
|
||||
"Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups/fs",
|
||||
"Comment": "v1.0.0-rc4-50-g4d6e672",
|
||||
"Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups/rootless",
|
||||
"Comment": "v1.0.0-rc4-50-g4d6e672",
|
||||
"Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120"
|
||||
"Comment": "v1.0.0-rc4-197-gd5b4a3e",
|
||||
"Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups/systemd",
|
||||
"Comment": "v1.0.0-rc4-50-g4d6e672",
|
||||
"Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120"
|
||||
"Comment": "v1.0.0-rc4-197-gd5b4a3e",
|
||||
"Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/configs",
|
||||
"Comment": "v1.0.0-rc4-50-g4d6e672",
|
||||
"Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120"
|
||||
"Comment": "v1.0.0-rc4-197-gd5b4a3e",
|
||||
"Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/configs/validate",
|
||||
"Comment": "v1.0.0-rc4-50-g4d6e672",
|
||||
"Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120"
|
||||
"Comment": "v1.0.0-rc4-197-gd5b4a3e",
|
||||
"Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/criurpc",
|
||||
"Comment": "v1.0.0-rc4-50-g4d6e672",
|
||||
"Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120"
|
||||
"Comment": "v1.0.0-rc4-197-gd5b4a3e",
|
||||
"Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/intelrdt",
|
||||
"Comment": "v1.0.0-rc4-197-gd5b4a3e",
|
||||
"Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/keys",
|
||||
"Comment": "v1.0.0-rc4-50-g4d6e672",
|
||||
"Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120"
|
||||
"Comment": "v1.0.0-rc4-197-gd5b4a3e",
|
||||
"Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/mount",
|
||||
"Comment": "v1.0.0-rc4-197-gd5b4a3e",
|
||||
"Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/seccomp",
|
||||
"Comment": "v1.0.0-rc4-50-g4d6e672",
|
||||
"Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120"
|
||||
"Comment": "v1.0.0-rc4-197-gd5b4a3e",
|
||||
"Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/stacktrace",
|
||||
"Comment": "v1.0.0-rc4-50-g4d6e672",
|
||||
"Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120"
|
||||
"Comment": "v1.0.0-rc4-197-gd5b4a3e",
|
||||
"Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/system",
|
||||
"Comment": "v1.0.0-rc4-50-g4d6e672",
|
||||
"Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120"
|
||||
"Comment": "v1.0.0-rc4-197-gd5b4a3e",
|
||||
"Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/user",
|
||||
"Comment": "v1.0.0-rc4-50-g4d6e672",
|
||||
"Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120"
|
||||
"Comment": "v1.0.0-rc4-197-gd5b4a3e",
|
||||
"Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/utils",
|
||||
"Comment": "v1.0.0-rc4-50-g4d6e672",
|
||||
"Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120"
|
||||
"Comment": "v1.0.0-rc4-197-gd5b4a3e",
|
||||
"Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runtime-spec/specs-go",
|
||||
@ -2558,11 +2573,11 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/spf13/cobra",
|
||||
"Rev": "f62e98d28ab7ad31d707ba837a966378465c7b57"
|
||||
"Rev": "19e54c4a2b8a78c9d54b2bed61b1a6c5e1bfcf6f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/spf13/cobra/doc",
|
||||
"Rev": "f62e98d28ab7ad31d707ba837a966378465c7b57"
|
||||
"Rev": "19e54c4a2b8a78c9d54b2bed61b1a6c5e1bfcf6f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/spf13/jwalterweatherman",
|
||||
@ -2570,7 +2585,7 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/spf13/pflag",
|
||||
"Rev": "9ff6c6923cfffbcd502984b8e0c80539a94968b7"
|
||||
"Rev": "4c012f6dcd9546820e378d0bdda4d8fc772cdfea"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/spf13/viper",
|
||||
@ -2630,93 +2645,113 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi",
|
||||
"Comment": "v0.14.0-11-gb8b228c",
|
||||
"Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1"
|
||||
"Comment": "v0.16.0-5-g5f0f400",
|
||||
"Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/find",
|
||||
"Comment": "v0.14.0-11-gb8b228c",
|
||||
"Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1"
|
||||
"Comment": "v0.16.0-5-g5f0f400",
|
||||
"Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/list",
|
||||
"Comment": "v0.14.0-11-gb8b228c",
|
||||
"Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1"
|
||||
"Comment": "v0.16.0-5-g5f0f400",
|
||||
"Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/nfc",
|
||||
"Comment": "v0.16.0-5-g5f0f400",
|
||||
"Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/object",
|
||||
"Comment": "v0.14.0-11-gb8b228c",
|
||||
"Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1"
|
||||
"Comment": "v0.16.0-5-g5f0f400",
|
||||
"Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/pbm",
|
||||
"Comment": "v0.14.0-11-gb8b228c",
|
||||
"Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1"
|
||||
"Comment": "v0.16.0-5-g5f0f400",
|
||||
"Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/pbm/methods",
|
||||
"Comment": "v0.14.0-11-gb8b228c",
|
||||
"Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1"
|
||||
"Comment": "v0.16.0-5-g5f0f400",
|
||||
"Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/pbm/types",
|
||||
"Comment": "v0.14.0-11-gb8b228c",
|
||||
"Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1"
|
||||
"Comment": "v0.16.0-5-g5f0f400",
|
||||
"Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/property",
|
||||
"Comment": "v0.14.0-11-gb8b228c",
|
||||
"Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1"
|
||||
"Comment": "v0.16.0-5-g5f0f400",
|
||||
"Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/session",
|
||||
"Comment": "v0.14.0-11-gb8b228c",
|
||||
"Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1"
|
||||
"Comment": "v0.16.0-5-g5f0f400",
|
||||
"Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/simulator",
|
||||
"Comment": "v0.16.0-5-g5f0f400",
|
||||
"Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/simulator/esx",
|
||||
"Comment": "v0.16.0-5-g5f0f400",
|
||||
"Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/simulator/vpx",
|
||||
"Comment": "v0.16.0-5-g5f0f400",
|
||||
"Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/task",
|
||||
"Comment": "v0.14.0-11-gb8b228c",
|
||||
"Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1"
|
||||
"Comment": "v0.16.0-5-g5f0f400",
|
||||
"Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/vim25",
|
||||
"Comment": "v0.14.0-11-gb8b228c",
|
||||
"Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1"
|
||||
"Comment": "v0.16.0-5-g5f0f400",
|
||||
"Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/vim25/debug",
|
||||
"Comment": "v0.14.0-11-gb8b228c",
|
||||
"Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1"
|
||||
"Comment": "v0.16.0-5-g5f0f400",
|
||||
"Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/vim25/methods",
|
||||
"Comment": "v0.14.0-11-gb8b228c",
|
||||
"Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1"
|
||||
"Comment": "v0.16.0-5-g5f0f400",
|
||||
"Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/vim25/mo",
|
||||
"Comment": "v0.14.0-11-gb8b228c",
|
||||
"Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1"
|
||||
"Comment": "v0.16.0-5-g5f0f400",
|
||||
"Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/vim25/progress",
|
||||
"Comment": "v0.14.0-11-gb8b228c",
|
||||
"Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1"
|
||||
"Comment": "v0.16.0-5-g5f0f400",
|
||||
"Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/vim25/soap",
|
||||
"Comment": "v0.14.0-11-gb8b228c",
|
||||
"Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1"
|
||||
"Comment": "v0.16.0-5-g5f0f400",
|
||||
"Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/vim25/types",
|
||||
"Comment": "v0.14.0-11-gb8b228c",
|
||||
"Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1"
|
||||
"Comment": "v0.16.0-5-g5f0f400",
|
||||
"Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/vim25/xml",
|
||||
"Comment": "v0.14.0-11-gb8b228c",
|
||||
"Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1"
|
||||
"Comment": "v0.16.0-5-g5f0f400",
|
||||
"Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/photon-controller-go-sdk/SSPI",
|
||||
@ -3212,35 +3247,35 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kube-openapi/pkg/aggregator",
|
||||
"Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1"
|
||||
"Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kube-openapi/pkg/builder",
|
||||
"Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1"
|
||||
"Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kube-openapi/pkg/common",
|
||||
"Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1"
|
||||
"Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kube-openapi/pkg/generators",
|
||||
"Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1"
|
||||
"Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kube-openapi/pkg/handler",
|
||||
"Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1"
|
||||
"Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kube-openapi/pkg/util",
|
||||
"Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1"
|
||||
"Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kube-openapi/pkg/util/proto",
|
||||
"Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1"
|
||||
"Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kube-openapi/pkg/util/proto/validation",
|
||||
"Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1"
|
||||
"Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/utils/clock",
|
||||
|
1916
Godeps/LICENSES
generated
1916
Godeps/LICENSES
generated
File diff suppressed because it is too large
Load Diff
325
Vagrantfile
vendored
325
Vagrantfile
vendored
@ -1,325 +0,0 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
|
||||
VAGRANTFILE_API_VERSION = "2"
|
||||
|
||||
# Require a recent version of vagrant otherwise some have reported errors setting host names on boxes
|
||||
Vagrant.require_version ">= 1.7.4"
|
||||
|
||||
if ARGV.first == "up" && ENV['USING_KUBE_SCRIPTS'] != 'true'
|
||||
raise Vagrant::Errors::VagrantError.new, <<END
|
||||
Calling 'vagrant up' directly is not supported. Instead, please run the following:
|
||||
|
||||
export KUBERNETES_PROVIDER=vagrant
|
||||
export VAGRANT_DEFAULT_PROVIDER=providername
|
||||
./cluster/kube-up.sh
|
||||
END
|
||||
end
|
||||
|
||||
# The number of nodes to provision
|
||||
$num_node = (ENV['NUM_NODES'] || 1).to_i
|
||||
|
||||
# ip configuration
|
||||
$master_ip = ENV['MASTER_IP']
|
||||
$node_ip_base = ENV['NODE_IP_BASE'] || ""
|
||||
$node_ips = $num_node.times.collect { |n| $node_ip_base + "#{n+3}" }
|
||||
|
||||
# Determine the OS platform to use
|
||||
$kube_os = ENV['KUBERNETES_OS'] || "fedora"
|
||||
|
||||
# Determine whether vagrant should use nfs to sync folders
|
||||
$use_nfs = ENV['KUBERNETES_VAGRANT_USE_NFS'] == 'true'
|
||||
# Determine whether vagrant should use rsync to sync folders
|
||||
$use_rsync = ENV['KUBERNETES_VAGRANT_USE_RSYNC'] == 'true'
|
||||
|
||||
# To override the vagrant provider, use (e.g.):
|
||||
# KUBERNETES_PROVIDER=vagrant VAGRANT_DEFAULT_PROVIDER=... .../cluster/kube-up.sh
|
||||
# To override the box, use (e.g.):
|
||||
# KUBERNETES_PROVIDER=vagrant KUBERNETES_BOX_NAME=... .../cluster/kube-up.sh
|
||||
# You can specify a box version:
|
||||
# KUBERNETES_PROVIDER=vagrant KUBERNETES_BOX_NAME=... KUBERNETES_BOX_VERSION=... .../cluster/kube-up.sh
|
||||
# You can specify a box location:
|
||||
# KUBERNETES_PROVIDER=vagrant KUBERNETES_BOX_NAME=... KUBERNETES_BOX_URL=... .../cluster/kube-up.sh
|
||||
# KUBERNETES_BOX_URL and KUBERNETES_BOX_VERSION will be ignored unless
|
||||
# KUBERNETES_BOX_NAME is set
|
||||
|
||||
# Default OS platform to provider/box information
|
||||
$kube_provider_boxes = {
|
||||
:parallels => {
|
||||
'fedora' => {
|
||||
# :box_url and :box_version are optional (and mutually exclusive);
|
||||
# if :box_url is omitted the box will be retrieved by :box_name (and
|
||||
# :box_version if provided) from
|
||||
# http://atlas.hashicorp.com/boxes/search (formerly
|
||||
# http://vagrantcloud.com/); this allows you override :box_name with
|
||||
# your own value so long as you provide :box_url; for example, the
|
||||
# "official" name of this box is "rickard-von-essen/
|
||||
# opscode_fedora-20", but by providing the URL and our own name, we
|
||||
# make it appear as yet another provider under the "kube-fedora22"
|
||||
# box
|
||||
:box_name => 'kube-fedora23',
|
||||
:box_url => 'https://opscode-vm-bento.s3.amazonaws.com/vagrant/parallels/opscode_fedora-23_chef-provisionerless.box'
|
||||
}
|
||||
},
|
||||
:virtualbox => {
|
||||
'fedora' => {
|
||||
:box_name => 'kube-fedora23',
|
||||
:box_url => 'https://opscode-vm-bento.s3.amazonaws.com/vagrant/virtualbox/opscode_fedora-23_chef-provisionerless.box'
|
||||
}
|
||||
},
|
||||
:libvirt => {
|
||||
'fedora' => {
|
||||
:box_name => 'kube-fedora23',
|
||||
:box_url => 'https://archives.fedoraproject.org/pub/archive/fedora/linux/releases/23/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-23-20151030.x86_64.vagrant-libvirt.box'
|
||||
}
|
||||
},
|
||||
:vmware_desktop => {
|
||||
'fedora' => {
|
||||
:box_name => 'kube-fedora23',
|
||||
:box_url => 'https://opscode-vm-bento.s3.amazonaws.com/vagrant/vmware/opscode_fedora-23_chef-provisionerless.box'
|
||||
}
|
||||
},
|
||||
:vsphere => {
|
||||
'fedora' => {
|
||||
:box_name => 'vsphere-dummy',
|
||||
:box_url => 'https://github.com/deromka/vagrant-vsphere/blob/master/vsphere-dummy.box?raw=true'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Give access to all physical cpu cores
|
||||
# Previously cargo-culted from here:
|
||||
# http://www.stefanwrobel.com/how-to-make-vagrant-performance-not-suck
|
||||
# Rewritten to actually determine the number of hardware cores instead of assuming
|
||||
# that the host has hyperthreading enabled.
|
||||
host = RbConfig::CONFIG['host_os']
|
||||
if host =~ /darwin/
|
||||
$vm_cpus = `sysctl -n hw.physicalcpu`.to_i
|
||||
elsif host =~ /linux/
|
||||
#This should work on most processors, however it will fail on ones without the core id field.
|
||||
#So far i have only seen this on a raspberry pi. which you probably don't want to run vagrant on anyhow...
|
||||
#But just in case we'll default to the result of nproc if we get 0 just to be safe.
|
||||
$vm_cpus = `cat /proc/cpuinfo | grep 'core id' | sort -u | wc -l`.to_i
|
||||
if $vm_cpus < 1
|
||||
$vm_cpus = `nproc`.to_i
|
||||
end
|
||||
else # sorry Windows folks, I can't help you
|
||||
$vm_cpus = 2
|
||||
end
|
||||
|
||||
# Give VM 1024MB of RAM by default
|
||||
# In Fedora VM, tmpfs device is mapped to /tmp. tmpfs is given 50% of RAM allocation.
|
||||
# When doing Salt provisioning, we copy approximately 200MB of content in /tmp before anything else happens.
|
||||
# This causes problems if anything else was in /tmp or the other directories that are bound to tmpfs device (i.e /run, etc.)
|
||||
$vm_master_mem = (ENV['KUBERNETES_MASTER_MEMORY'] || ENV['KUBERNETES_MEMORY'] || 1280).to_i
|
||||
$vm_node_mem = (ENV['KUBERNETES_NODE_MEMORY'] || ENV['KUBERNETES_MEMORY'] || 2048).to_i
|
||||
|
||||
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||
if Vagrant.has_plugin?("vagrant-proxyconf")
|
||||
$http_proxy = ENV['KUBERNETES_HTTP_PROXY'] || ""
|
||||
$https_proxy = ENV['KUBERNETES_HTTPS_PROXY'] || ""
|
||||
$no_proxy = ENV['KUBERNETES_NO_PROXY'] || "127.0.0.1"
|
||||
config.proxy.http = $http_proxy
|
||||
config.proxy.https = $https_proxy
|
||||
config.proxy.no_proxy = $no_proxy
|
||||
end
|
||||
|
||||
# this corrects a bug in 1.8.5 where an invalid SSH key is inserted.
|
||||
if Vagrant::VERSION == "1.8.5"
|
||||
config.ssh.insert_key = false
|
||||
end
|
||||
|
||||
def setvmboxandurl(config, provider)
|
||||
if ENV['KUBERNETES_BOX_NAME'] then
|
||||
config.vm.box = ENV['KUBERNETES_BOX_NAME']
|
||||
|
||||
if ENV['KUBERNETES_BOX_URL'] then
|
||||
config.vm.box_url = ENV['KUBERNETES_BOX_URL']
|
||||
end
|
||||
|
||||
if ENV['KUBERNETES_BOX_VERSION'] then
|
||||
config.vm.box_version = ENV['KUBERNETES_BOX_VERSION']
|
||||
end
|
||||
else
|
||||
config.vm.box = $kube_provider_boxes[provider][$kube_os][:box_name]
|
||||
|
||||
if $kube_provider_boxes[provider][$kube_os][:box_url] then
|
||||
config.vm.box_url = $kube_provider_boxes[provider][$kube_os][:box_url]
|
||||
end
|
||||
|
||||
if $kube_provider_boxes[provider][$kube_os][:box_version] then
|
||||
config.vm.box_version = $kube_provider_boxes[provider][$kube_os][:box_version]
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def customize_vm(config, vm_mem)
|
||||
|
||||
if $use_nfs then
|
||||
config.vm.synced_folder ".", "/vagrant", nfs: true
|
||||
elsif $use_rsync then
|
||||
opts = {}
|
||||
if ENV['KUBERNETES_VAGRANT_RSYNC_ARGS'] then
|
||||
opts[:rsync__args] = ENV['KUBERNETES_VAGRANT_RSYNC_ARGS'].split(" ")
|
||||
end
|
||||
if ENV['KUBERNETES_VAGRANT_RSYNC_EXCLUDE'] then
|
||||
opts[:rsync__exclude] = ENV['KUBERNETES_VAGRANT_RSYNC_EXCLUDE'].split(" ")
|
||||
end
|
||||
config.vm.synced_folder ".", "/vagrant", opts
|
||||
end
|
||||
|
||||
# Try VMWare Fusion first (see
|
||||
# https://docs.vagrantup.com/v2/providers/basic_usage.html)
|
||||
config.vm.provider :vmware_fusion do |v, override|
|
||||
setvmboxandurl(override, :vmware_desktop)
|
||||
v.vmx['memsize'] = vm_mem
|
||||
v.vmx['numvcpus'] = $vm_cpus
|
||||
end
|
||||
|
||||
# configure libvirt provider
|
||||
config.vm.provider :libvirt do |v, override|
|
||||
setvmboxandurl(override, :libvirt)
|
||||
v.memory = vm_mem
|
||||
v.cpus = $vm_cpus
|
||||
v.nested = true
|
||||
v.volume_cache = 'none'
|
||||
end
|
||||
|
||||
# Then try VMWare Workstation
|
||||
config.vm.provider :vmware_workstation do |v, override|
|
||||
setvmboxandurl(override, :vmware_desktop)
|
||||
v.vmx['memsize'] = vm_mem
|
||||
v.vmx['numvcpus'] = $vm_cpus
|
||||
end
|
||||
|
||||
# Then try Parallels
|
||||
config.vm.provider :parallels do |v, override|
|
||||
setvmboxandurl(override, :parallels)
|
||||
v.memory = vm_mem # v.customize ['set', :id, '--memsize', vm_mem]
|
||||
v.cpus = $vm_cpus # v.customize ['set', :id, '--cpus', $vm_cpus]
|
||||
|
||||
# Don't attempt to update the Parallels tools on the image (this can
|
||||
# be done manually if necessary)
|
||||
v.update_guest_tools = false # v.customize ['set', :id, '--tools-autoupdate', 'off']
|
||||
|
||||
# Set up Parallels folder sharing to behave like VirtualBox (i.e.,
|
||||
# mount the current directory as /vagrant and that's it)
|
||||
v.customize ['set', :id, '--shf-guest', 'off']
|
||||
v.customize ['set', :id, '--shf-guest-automount', 'off']
|
||||
v.customize ['set', :id, '--shf-host', 'on']
|
||||
|
||||
# Synchronize VM clocks to host clock (Avoid certificate invalid issue)
|
||||
v.customize ['set', :id, '--time-sync', 'on']
|
||||
|
||||
# Remove all auto-mounted "shared folders"; the result seems to
|
||||
# persist between runs (i.e., vagrant halt && vagrant up)
|
||||
override.vm.provision :shell, :inline => (%q{
|
||||
set -ex
|
||||
if [ -d /media/psf ]; then
|
||||
for i in /media/psf/*; do
|
||||
if [ -d "${i}" ]; then
|
||||
umount "${i}" || true
|
||||
rmdir -v "${i}"
|
||||
fi
|
||||
done
|
||||
rmdir -v /media/psf
|
||||
fi
|
||||
exit
|
||||
}).strip
|
||||
end
|
||||
|
||||
# Then try vsphere
|
||||
config.vm.provider :vsphere do |vsphere, override|
|
||||
setvmboxandurl(override, :vsphere)
|
||||
|
||||
#config.vm.hostname = ENV['MASTER_NAME']
|
||||
|
||||
config.ssh.username = ENV['MASTER_USER']
|
||||
config.ssh.password = ENV['MASTER_PASSWD']
|
||||
|
||||
config.ssh.pty = true
|
||||
config.ssh.insert_key = true
|
||||
#config.ssh.private_key_path = '~/.ssh/id_rsa_vsphere'
|
||||
|
||||
# Don't attempt to update the tools on the image (this can
|
||||
# be done manually if necessary)
|
||||
# vsphere.update_guest_tools = false # v.customize ['set', :id, '--tools-autoupdate', 'off']
|
||||
|
||||
# The vSphere host we're going to connect to
|
||||
vsphere.host = ENV['VAGRANT_VSPHERE_URL']
|
||||
|
||||
# The ESX host for the new VM
|
||||
vsphere.compute_resource_name = ENV['VAGRANT_VSPHERE_RESOURCE_POOL']
|
||||
|
||||
# The resource pool for the new VM
|
||||
#vsphere.resource_pool_name = 'Comp'
|
||||
|
||||
# path to folder where new VM should be created, if not specified template's parent folder will be used
|
||||
vsphere.vm_base_path = ENV['VAGRANT_VSPHERE_BASE_PATH']
|
||||
|
||||
# The template we're going to clone
|
||||
vsphere.template_name = ENV['VAGRANT_VSPHERE_TEMPLATE_NAME']
|
||||
|
||||
# The name of the new machine
|
||||
#vsphere.name = ENV['MASTER_NAME']
|
||||
|
||||
# vSphere login
|
||||
vsphere.user = ENV['VAGRANT_VSPHERE_USERNAME']
|
||||
|
||||
# vSphere password
|
||||
vsphere.password = ENV['VAGRANT_VSPHERE_PASSWORD']
|
||||
|
||||
# cpu count
|
||||
vsphere.cpu_count = $vm_cpus
|
||||
|
||||
# memory in MB
|
||||
vsphere.memory_mb = vm_mem
|
||||
|
||||
# If you don't have SSL configured correctly, set this to 'true'
|
||||
vsphere.insecure = ENV['VAGRANT_VSPHERE_INSECURE']
|
||||
end
|
||||
|
||||
|
||||
# Don't attempt to update Virtualbox Guest Additions (requires gcc)
|
||||
if Vagrant.has_plugin?("vagrant-vbguest") then
|
||||
config.vbguest.auto_update = false
|
||||
end
|
||||
# Finally, fall back to VirtualBox
|
||||
config.vm.provider :virtualbox do |v, override|
|
||||
setvmboxandurl(override, :virtualbox)
|
||||
v.memory = vm_mem # v.customize ["modifyvm", :id, "--memory", vm_mem]
|
||||
v.cpus = $vm_cpus # v.customize ["modifyvm", :id, "--cpus", $vm_cpus]
|
||||
|
||||
# Use faster paravirtualized networking
|
||||
v.customize ["modifyvm", :id, "--nictype1", "virtio"]
|
||||
v.customize ["modifyvm", :id, "--nictype2", "virtio"]
|
||||
end
|
||||
end
|
||||
|
||||
# Kubernetes master
|
||||
config.vm.define "master" do |c|
|
||||
customize_vm c, $vm_master_mem
|
||||
if ENV['KUBE_TEMP'] then
|
||||
script = "#{ENV['KUBE_TEMP']}/master-start.sh"
|
||||
c.vm.provision "shell", run: "always", path: script
|
||||
end
|
||||
c.vm.network "private_network", ip: "#{$master_ip}"
|
||||
end
|
||||
|
||||
# Kubernetes node
|
||||
$num_node.times do |n|
|
||||
node_vm_name = "node-#{n+1}"
|
||||
|
||||
config.vm.define node_vm_name do |node|
|
||||
customize_vm node, $vm_node_mem
|
||||
|
||||
node_ip = $node_ips[n]
|
||||
if ENV['KUBE_TEMP'] then
|
||||
script = "#{ENV['KUBE_TEMP']}/node-start-#{n}.sh"
|
||||
node.vm.provision "shell", run: "always", path: script
|
||||
end
|
||||
node.vm.network "private_network", ip: "#{node_ip}"
|
||||
end
|
||||
end
|
||||
end
|
10
api/openapi-spec/swagger.json
generated
10
api/openapi-spec/swagger.json
generated
@ -74526,6 +74526,10 @@
|
||||
"description": "Driver is the name of the driver to use for this volume. Required.",
|
||||
"type": "string"
|
||||
},
|
||||
"fsType": {
|
||||
"description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.",
|
||||
"type": "string"
|
||||
},
|
||||
"readOnly": {
|
||||
"description": "Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write).",
|
||||
"type": "boolean"
|
||||
@ -77568,7 +77572,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
"qosClass": {
|
||||
"description": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://github.com/kubernetes/kubernetes/blob/master/docs/design/resource-qos.md",
|
||||
"description": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md",
|
||||
"type": "string"
|
||||
},
|
||||
"reason": {
|
||||
@ -82131,7 +82135,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
"metadata": {
|
||||
"description": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
|
||||
"description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
|
||||
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"
|
||||
},
|
||||
"value": {
|
||||
@ -82170,7 +82174,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
"metadata": {
|
||||
"description": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
|
||||
"description": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
|
||||
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"
|
||||
}
|
||||
},
|
||||
|
2
api/swagger-spec/apps_v1alpha1.json
generated
2
api/swagger-spec/apps_v1alpha1.json
generated
@ -1311,7 +1311,7 @@
|
||||
},
|
||||
"serviceAccountName": {
|
||||
"type": "string",
|
||||
"description": "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md"
|
||||
"description": "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://git.k8s.io/community/contributors/design-proposals/auth/service_accounts.md"
|
||||
},
|
||||
"serviceAccount": {
|
||||
"type": "string",
|
||||
|
4
api/swagger-spec/scheduling.k8s.io_v1alpha1.json
generated
4
api/swagger-spec/scheduling.k8s.io_v1alpha1.json
generated
@ -744,7 +744,7 @@
|
||||
},
|
||||
"metadata": {
|
||||
"$ref": "v1.ListMeta",
|
||||
"description": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata"
|
||||
"description": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata"
|
||||
},
|
||||
"items": {
|
||||
"type": "array",
|
||||
@ -790,7 +790,7 @@
|
||||
},
|
||||
"metadata": {
|
||||
"$ref": "v1.ObjectMeta",
|
||||
"description": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata"
|
||||
"description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata"
|
||||
},
|
||||
"value": {
|
||||
"type": "integer",
|
||||
|
6
api/swagger-spec/v1.json
generated
6
api/swagger-spec/v1.json
generated
@ -21320,6 +21320,10 @@
|
||||
"readOnly": {
|
||||
"type": "boolean",
|
||||
"description": "Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write)."
|
||||
},
|
||||
"fsType": {
|
||||
"type": "string",
|
||||
"description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
|
||||
}
|
||||
}
|
||||
},
|
||||
@ -23155,7 +23159,7 @@
|
||||
},
|
||||
"qosClass": {
|
||||
"type": "string",
|
||||
"description": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://github.com/kubernetes/kubernetes/blob/master/docs/design/resource-qos.md"
|
||||
"description": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -100,12 +100,10 @@ The main output is a tar file: `kubernetes.tar.gz`. This includes:
|
||||
* Examples
|
||||
* Cluster deployment scripts for various clouds
|
||||
* Tar file containing all server binaries
|
||||
* Tar file containing salt deployment tree shared across multiple cloud deployments.
|
||||
|
||||
In addition, there are some other tar files that are created:
|
||||
* `kubernetes-client-*.tar.gz` Client binaries for a specific platform.
|
||||
* `kubernetes-server-*.tar.gz` Server binaries for a specific platform.
|
||||
* `kubernetes-salt.tar.gz` The salt script/tree shared across multiple deployment scripts.
|
||||
|
||||
When building final release tars, they are first staged into `_output/release-stage` before being tar'd up and put into `_output/release-tars`.
|
||||
|
||||
|
@ -451,8 +451,8 @@ function kube::build::build_image() {
|
||||
|
||||
cp /etc/localtime "${LOCAL_OUTPUT_BUILD_CONTEXT}/"
|
||||
|
||||
cp build/build-image/Dockerfile "${LOCAL_OUTPUT_BUILD_CONTEXT}/Dockerfile"
|
||||
cp build/build-image/rsyncd.sh "${LOCAL_OUTPUT_BUILD_CONTEXT}/"
|
||||
cp ${KUBE_ROOT}/build/build-image/Dockerfile "${LOCAL_OUTPUT_BUILD_CONTEXT}/Dockerfile"
|
||||
cp ${KUBE_ROOT}/build/build-image/rsyncd.sh "${LOCAL_OUTPUT_BUILD_CONTEXT}/"
|
||||
dd if=/dev/urandom bs=512 count=1 2>/dev/null | LC_ALL=C tr -dc 'A-Za-z0-9' | dd bs=32 count=1 2>/dev/null > "${LOCAL_OUTPUT_BUILD_CONTEXT}/rsyncd.password"
|
||||
chmod go= "${LOCAL_OUTPUT_BUILD_CONTEXT}/rsyncd.password"
|
||||
|
||||
|
@ -1,8 +1,8 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load("@bazel_tools//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
|
||||
load("@io_kubernetes_build//defs:deb.bzl", "k8s_deb", "deb_data")
|
||||
load("@io_kubernetes_build//defs:build.bzl", "release_filegroup")
|
||||
load("@io_kubernetes_build//defs:pkg.bzl", "pkg_tar")
|
||||
|
||||
# We do not include kube-scheduler, kube-controller-manager,
|
||||
# kube-apiserver, and kube-proxy in this list even though we
|
||||
|
@ -78,7 +78,6 @@ function kube::release::package_tarballs() {
|
||||
mkdir -p "${RELEASE_TARS}"
|
||||
kube::release::package_src_tarball &
|
||||
kube::release::package_client_tarballs &
|
||||
kube::release::package_salt_tarball &
|
||||
kube::release::package_kube_manifests_tarball &
|
||||
kube::util::wait-for-jobs || { kube::log::error "previous tarball phase failed"; return 1; }
|
||||
|
||||
@ -359,71 +358,39 @@ function kube::release::create_docker_images_for_server() {
|
||||
|
||||
}
|
||||
|
||||
# Package up the salt configuration tree. This is an optional helper to getting
|
||||
# a cluster up and running.
|
||||
function kube::release::package_salt_tarball() {
|
||||
kube::log::status "Building tarball: salt"
|
||||
|
||||
local release_stage="${RELEASE_STAGE}/salt/kubernetes"
|
||||
rm -rf "${release_stage}"
|
||||
mkdir -p "${release_stage}"
|
||||
|
||||
cp -R "${KUBE_ROOT}/cluster/saltbase" "${release_stage}/"
|
||||
|
||||
# TODO(#3579): This is a temporary hack. It gathers up the yaml,
|
||||
# yaml.in, json files in cluster/addons (minus any demos) and overlays
|
||||
# them into kube-addons, where we expect them. (This pipeline is a
|
||||
# fancy copy, stripping anything but the files we don't want.)
|
||||
local objects
|
||||
objects=$(cd "${KUBE_ROOT}/cluster/addons" && find . \( -name \*.yaml -or -name \*.yaml.in -or -name \*.json \) | grep -v demo)
|
||||
tar c -C "${KUBE_ROOT}/cluster/addons" ${objects} | tar x -C "${release_stage}/saltbase/salt/kube-addons"
|
||||
|
||||
kube::release::clean_cruft
|
||||
|
||||
local package_name="${RELEASE_TARS}/kubernetes-salt.tar.gz"
|
||||
kube::release::create_tarball "${package_name}" "${release_stage}/.."
|
||||
}
|
||||
|
||||
# This will pack kube-system manifests files for distros without using salt
|
||||
# such as GCI and Ubuntu Trusty. We directly copy manifests from
|
||||
# cluster/addons and cluster/saltbase/salt. The script of cluster initialization
|
||||
# will remove the salt configuration and evaluate the variables in the manifests.
|
||||
# This will pack kube-system manifests files for distros such as COS.
|
||||
function kube::release::package_kube_manifests_tarball() {
|
||||
kube::log::status "Building tarball: manifests"
|
||||
|
||||
local salt_dir="${KUBE_ROOT}/cluster/saltbase/salt"
|
||||
local src_dir="${KUBE_ROOT}/cluster/gce/manifests"
|
||||
|
||||
local release_stage="${RELEASE_STAGE}/manifests/kubernetes"
|
||||
rm -rf "${release_stage}"
|
||||
|
||||
mkdir -p "${release_stage}"
|
||||
cp "${salt_dir}/kube-registry-proxy/kube-registry-proxy.yaml" "${release_stage}/"
|
||||
cp "${salt_dir}/kube-proxy/kube-proxy.manifest" "${release_stage}/"
|
||||
|
||||
local gci_dst_dir="${release_stage}/gci-trusty"
|
||||
mkdir -p "${gci_dst_dir}"
|
||||
cp "${salt_dir}/cluster-autoscaler/cluster-autoscaler.manifest" "${gci_dst_dir}/"
|
||||
cp "${salt_dir}/etcd/etcd.manifest" "${gci_dst_dir}"
|
||||
cp "${salt_dir}/kube-scheduler/kube-scheduler.manifest" "${gci_dst_dir}"
|
||||
cp "${salt_dir}/kube-apiserver/kube-apiserver.manifest" "${gci_dst_dir}"
|
||||
cp "${salt_dir}/kube-apiserver/abac-authz-policy.jsonl" "${gci_dst_dir}"
|
||||
cp "${salt_dir}/kube-controller-manager/kube-controller-manager.manifest" "${gci_dst_dir}"
|
||||
cp "${salt_dir}/kube-addons/kube-addon-manager.yaml" "${gci_dst_dir}"
|
||||
cp "${salt_dir}/l7-gcp/glbc.manifest" "${gci_dst_dir}"
|
||||
cp "${salt_dir}/rescheduler/rescheduler.manifest" "${gci_dst_dir}/"
|
||||
cp "${salt_dir}/e2e-image-puller/e2e-image-puller.manifest" "${gci_dst_dir}/"
|
||||
cp "${KUBE_ROOT}/cluster/gce/gci/configure-helper.sh" "${gci_dst_dir}/gci-configure-helper.sh"
|
||||
cp "${KUBE_ROOT}/cluster/gce/gci/health-monitor.sh" "${gci_dst_dir}/health-monitor.sh"
|
||||
cp "${KUBE_ROOT}/cluster/gce/container-linux/configure-helper.sh" "${gci_dst_dir}/container-linux-configure-helper.sh"
|
||||
cp -r "${salt_dir}/kube-admission-controls/limit-range" "${gci_dst_dir}"
|
||||
local dst_dir="${release_stage}/gci-trusty"
|
||||
mkdir -p "${dst_dir}"
|
||||
cp "${src_dir}/kube-registry-proxy.yaml" "${dst_dir}/"
|
||||
cp "${src_dir}/kube-proxy.manifest" "${dst_dir}/"
|
||||
cp "${src_dir}/cluster-autoscaler.manifest" "${dst_dir}/"
|
||||
cp "${src_dir}/etcd.manifest" "${dst_dir}"
|
||||
cp "${src_dir}/kube-scheduler.manifest" "${dst_dir}"
|
||||
cp "${src_dir}/kube-apiserver.manifest" "${dst_dir}"
|
||||
cp "${src_dir}/abac-authz-policy.jsonl" "${dst_dir}"
|
||||
cp "${src_dir}/kube-controller-manager.manifest" "${dst_dir}"
|
||||
cp "${src_dir}/kube-addon-manager.yaml" "${dst_dir}"
|
||||
cp "${src_dir}/glbc.manifest" "${dst_dir}"
|
||||
cp "${src_dir}/rescheduler.manifest" "${dst_dir}/"
|
||||
cp "${src_dir}/e2e-image-puller.manifest" "${dst_dir}/"
|
||||
cp "${KUBE_ROOT}/cluster/gce/gci/configure-helper.sh" "${dst_dir}/gci-configure-helper.sh"
|
||||
cp "${KUBE_ROOT}/cluster/gce/gci/health-monitor.sh" "${dst_dir}/health-monitor.sh"
|
||||
local objects
|
||||
objects=$(cd "${KUBE_ROOT}/cluster/addons" && find . \( -name \*.yaml -or -name \*.yaml.in -or -name \*.json \) | grep -v demo)
|
||||
tar c -C "${KUBE_ROOT}/cluster/addons" ${objects} | tar x -C "${gci_dst_dir}"
|
||||
tar c -C "${KUBE_ROOT}/cluster/addons" ${objects} | tar x -C "${dst_dir}"
|
||||
# Merge GCE-specific addons with general purpose addons.
|
||||
local gce_objects
|
||||
gce_objects=$(cd "${KUBE_ROOT}/cluster/gce/addons" && find . \( -name \*.yaml -or -name \*.yaml.in -or -name \*.json \) \( -not -name \*demo\* \))
|
||||
if [[ -n "${gce_objects}" ]]; then
|
||||
tar c -C "${KUBE_ROOT}/cluster/gce/addons" ${gce_objects} | tar x -C "${gci_dst_dir}"
|
||||
tar c -C "${KUBE_ROOT}/cluster/gce/addons" ${gce_objects} | tar x -C "${dst_dir}"
|
||||
fi
|
||||
|
||||
kube::release::clean_cruft
|
||||
@ -472,8 +439,7 @@ function kube::release::package_test_tarball() {
|
||||
# using the bundled cluster/get-kube-binaries.sh script).
|
||||
# Included in this tarball:
|
||||
# - Cluster spin up/down scripts and configs for various cloud providers
|
||||
# - Tarballs for salt configs that are ready to be uploaded
|
||||
# to master by whatever means appropriate.
|
||||
# - Tarballs for manifest configs that are ready to be uploaded
|
||||
# - Examples (which may or may not still work)
|
||||
# - The remnants of the docs/ directory
|
||||
function kube::release::package_final_tarball() {
|
||||
@ -492,13 +458,10 @@ Client binaries are no longer included in the Kubernetes final tarball.
|
||||
Run cluster/get-kube-binaries.sh to download client and server binaries.
|
||||
EOF
|
||||
|
||||
# We want everything in /cluster except saltbase. That is only needed on the
|
||||
# server.
|
||||
# We want everything in /cluster.
|
||||
cp -R "${KUBE_ROOT}/cluster" "${release_stage}/"
|
||||
rm -rf "${release_stage}/cluster/saltbase"
|
||||
|
||||
mkdir -p "${release_stage}/server"
|
||||
cp "${RELEASE_TARS}/kubernetes-salt.tar.gz" "${release_stage}/server/"
|
||||
cp "${RELEASE_TARS}/kubernetes-manifests.tar.gz" "${release_stage}/server/"
|
||||
cat <<EOF > "${release_stage}/server/README"
|
||||
Server binary tarballs are no longer included in the Kubernetes final tarball.
|
||||
@ -517,7 +480,6 @@ EOF
|
||||
cp -R "${KUBE_ROOT}/docs" "${release_stage}/"
|
||||
cp "${KUBE_ROOT}/README.md" "${release_stage}/"
|
||||
cp "${KUBE_ROOT}/Godeps/LICENSES" "${release_stage}/"
|
||||
cp "${KUBE_ROOT}/Vagrantfile" "${release_stage}/"
|
||||
|
||||
echo "${KUBE_GIT_VERSION}" > "${release_stage}/version"
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load("@io_bazel//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
|
||||
load("@io_kubernetes_build//defs:build.bzl", "release_filegroup")
|
||||
load("@io_kubernetes_build//defs:pkg.bzl", "pkg_tar")
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
@ -38,12 +38,11 @@ grep ^STABLE_BUILD_GIT_COMMIT bazel-out/stable-status.txt | cut -d' ' -f2 >>$@
|
||||
|
||||
pkg_tar(
|
||||
name = "kubernetes-src",
|
||||
build_tar = "@io_kubernetes_build//tools/build_tar",
|
||||
extension = "tar.gz",
|
||||
files = select({
|
||||
srcs = select({
|
||||
":package_src": ["//:all-srcs"],
|
||||
"//conditions:default": ["README-src.txt"],
|
||||
}),
|
||||
extension = "tar.gz",
|
||||
package_dir = "kubernetes",
|
||||
strip_prefix = select({
|
||||
":package_src": "//",
|
||||
@ -66,8 +65,7 @@ filegroup(
|
||||
|
||||
pkg_tar(
|
||||
name = "_client-bin",
|
||||
build_tar = "@io_kubernetes_build//tools/build_tar",
|
||||
files = ["//build:client-targets"],
|
||||
srcs = ["//build:client-targets"],
|
||||
mode = "0755",
|
||||
package_dir = "client/bin",
|
||||
visibility = ["//visibility:private"],
|
||||
@ -75,7 +73,6 @@ pkg_tar(
|
||||
|
||||
pkg_tar(
|
||||
name = "kubernetes-client-%s" % PLATFORM_ARCH_STRING,
|
||||
build_tar = "@io_kubernetes_build//tools/build_tar",
|
||||
extension = "tar.gz",
|
||||
package_dir = "kubernetes",
|
||||
deps = [
|
||||
@ -85,8 +82,7 @@ pkg_tar(
|
||||
|
||||
pkg_tar(
|
||||
name = "_node-bin",
|
||||
build_tar = "@io_kubernetes_build//tools/build_tar",
|
||||
files = [
|
||||
srcs = [
|
||||
"//build:client-targets",
|
||||
"//build:node-targets",
|
||||
],
|
||||
@ -97,9 +93,8 @@ pkg_tar(
|
||||
|
||||
pkg_tar(
|
||||
name = "kubernetes-node-%s" % PLATFORM_ARCH_STRING,
|
||||
build_tar = "@io_kubernetes_build//tools/build_tar",
|
||||
srcs = [":license-targets"],
|
||||
extension = "tar.gz",
|
||||
files = [":license-targets"],
|
||||
mode = "0644",
|
||||
package_dir = "kubernetes",
|
||||
deps = [
|
||||
@ -109,8 +104,7 @@ pkg_tar(
|
||||
|
||||
pkg_tar(
|
||||
name = "_server-bin",
|
||||
build_tar = "@io_kubernetes_build//tools/build_tar",
|
||||
files = [
|
||||
srcs = [
|
||||
"//build:client-targets",
|
||||
"//build:docker-artifacts",
|
||||
"//build:node-targets",
|
||||
@ -131,8 +125,7 @@ genrule(
|
||||
# Some of the startup scripts fail if there isn't an addons/ directory in the server tarball.
|
||||
pkg_tar(
|
||||
name = "_server-addons",
|
||||
build_tar = "@io_kubernetes_build//tools/build_tar",
|
||||
files = [
|
||||
srcs = [
|
||||
":.dummy",
|
||||
],
|
||||
package_dir = "addons",
|
||||
@ -141,9 +134,8 @@ pkg_tar(
|
||||
|
||||
pkg_tar(
|
||||
name = "kubernetes-server-%s" % PLATFORM_ARCH_STRING,
|
||||
build_tar = "@io_kubernetes_build//tools/build_tar",
|
||||
srcs = [":license-targets"],
|
||||
extension = "tar.gz",
|
||||
files = [":license-targets"],
|
||||
mode = "0644",
|
||||
package_dir = "kubernetes",
|
||||
deps = [
|
||||
@ -154,8 +146,7 @@ pkg_tar(
|
||||
|
||||
pkg_tar(
|
||||
name = "_test-bin",
|
||||
build_tar = "@io_kubernetes_build//tools/build_tar",
|
||||
files = ["//build:test-targets"],
|
||||
srcs = ["//build:test-targets"],
|
||||
mode = "0755",
|
||||
package_dir = "platforms/" + PLATFORM_ARCH_STRING.replace("-", "/"),
|
||||
# TODO: how to make this multiplatform?
|
||||
@ -164,9 +155,8 @@ pkg_tar(
|
||||
|
||||
pkg_tar(
|
||||
name = "kubernetes-test",
|
||||
build_tar = "@io_kubernetes_build//tools/build_tar",
|
||||
srcs = ["//build:test-portable-targets"],
|
||||
extension = "tar.gz",
|
||||
files = ["//build:test-portable-targets"],
|
||||
package_dir = "kubernetes",
|
||||
strip_prefix = "//",
|
||||
deps = [
|
||||
@ -177,10 +167,8 @@ pkg_tar(
|
||||
|
||||
pkg_tar(
|
||||
name = "_full_server",
|
||||
build_tar = "@io_kubernetes_build//tools/build_tar",
|
||||
files = [
|
||||
srcs = [
|
||||
":kubernetes-manifests.tar.gz",
|
||||
":kubernetes-salt.tar.gz",
|
||||
],
|
||||
package_dir = "server",
|
||||
visibility = ["//visibility:private"],
|
||||
@ -188,12 +176,9 @@ pkg_tar(
|
||||
|
||||
pkg_tar(
|
||||
name = "kubernetes",
|
||||
build_tar = "@io_kubernetes_build//tools/build_tar",
|
||||
extension = "tar.gz",
|
||||
files = [
|
||||
srcs = [
|
||||
"//:Godeps/LICENSES",
|
||||
"//:README.md",
|
||||
"//:Vagrantfile",
|
||||
"//:version",
|
||||
"//cluster:all-srcs",
|
||||
"//docs:all-srcs",
|
||||
@ -201,6 +186,7 @@ pkg_tar(
|
||||
"//hack/lib:all-srcs",
|
||||
"//third_party/htpasswd:all-srcs",
|
||||
],
|
||||
extension = "tar.gz",
|
||||
package_dir = "kubernetes",
|
||||
strip_prefix = "//",
|
||||
deps = [
|
||||
@ -210,22 +196,12 @@ pkg_tar(
|
||||
|
||||
pkg_tar(
|
||||
name = "kubernetes-manifests",
|
||||
build_tar = "@io_kubernetes_build//tools/build_tar",
|
||||
extension = "tar.gz",
|
||||
deps = [
|
||||
"//cluster:manifests",
|
||||
],
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "kubernetes-salt",
|
||||
build_tar = "@io_kubernetes_build//tools/build_tar",
|
||||
extension = "tar.gz",
|
||||
deps = [
|
||||
"//cluster/saltbase:salt",
|
||||
],
|
||||
)
|
||||
|
||||
release_filegroup(
|
||||
name = "release-tars",
|
||||
srcs = [
|
||||
@ -234,7 +210,6 @@ release_filegroup(
|
||||
":kubernetes-node-%s.tar.gz" % PLATFORM_ARCH_STRING,
|
||||
":kubernetes-server-%s.tar.gz" % PLATFORM_ARCH_STRING,
|
||||
":kubernetes-manifests.tar.gz",
|
||||
":kubernetes-salt.tar.gz",
|
||||
":kubernetes-src.tar.gz",
|
||||
":kubernetes-test.tar.gz",
|
||||
],
|
||||
|
@ -1,15 +1,22 @@
|
||||
http_archive(
|
||||
name = "io_bazel_rules_go",
|
||||
sha256 = "e8c7f1fda9ee482745a5b35e8314ac3ae744d4ba30f3e6de28148fd166044306",
|
||||
strip_prefix = "rules_go-737df20c53499fd84b67f04c6ca9ccdee2e77089",
|
||||
urls = ["https://github.com/bazelbuild/rules_go/archive/737df20c53499fd84b67f04c6ca9ccdee2e77089.tar.gz"],
|
||||
sha256 = "0efdc3cca8ac1c29e1c837bee260dab537dfd373eb4c43c7d50246a142a7c098",
|
||||
strip_prefix = "rules_go-74d8ad8f9f59a1d9a7cf066d0980f9e394acccd7",
|
||||
urls = ["https://github.com/bazelbuild/rules_go/archive/74d8ad8f9f59a1d9a7cf066d0980f9e394acccd7.tar.gz"],
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "io_kubernetes_build",
|
||||
sha256 = "cf138e48871629345548b4aaf23101314b5621c1bdbe45c4e75edb45b08891f0",
|
||||
strip_prefix = "repo-infra-1fb0a3ff0cc5308a6d8e2f3f9c57d1f2f940354e",
|
||||
urls = ["https://github.com/kubernetes/repo-infra/archive/1fb0a3ff0cc5308a6d8e2f3f9c57d1f2f940354e.tar.gz"],
|
||||
sha256 = "f4946917d95c54aaa98d1092757256e491f8f48fd550179134f00f902bc0b4ce",
|
||||
strip_prefix = "repo-infra-c75960142a50de16ac6225b0843b1ff3476ab0b4",
|
||||
urls = ["https://github.com/kubernetes/repo-infra/archive/c75960142a50de16ac6225b0843b1ff3476ab0b4.tar.gz"],
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "bazel_skylib",
|
||||
sha256 = "bbccf674aa441c266df9894182d80de104cabd19be98be002f6d478aaa31574d",
|
||||
strip_prefix = "bazel-skylib-2169ae1c374aab4a09aa90e65efe1a3aad4e279b",
|
||||
urls = ["https://github.com/bazelbuild/bazel-skylib/archive/2169ae1c374aab4a09aa90e65efe1a3aad4e279b.tar.gz"],
|
||||
)
|
||||
|
||||
ETCD_VERSION = "3.1.10"
|
||||
@ -22,16 +29,6 @@ new_http_archive(
|
||||
urls = ["https://github.com/coreos/etcd/releases/download/v%s/etcd-v%s-linux-amd64.tar.gz" % (ETCD_VERSION, ETCD_VERSION)],
|
||||
)
|
||||
|
||||
# This contains a patch to not prepend ./ to tarfiles produced by pkg_tar.
|
||||
# When merged upstream, we'll no longer need to use ixdy's fork:
|
||||
# https://bazel-review.googlesource.com/#/c/10390/
|
||||
http_archive(
|
||||
name = "io_bazel",
|
||||
sha256 = "892a84aa1e7c1f99fb57bb056cb648745c513077252815324579a012d263defb",
|
||||
strip_prefix = "bazel-df2c687c22bdd7c76f3cdcc85f38fefd02f0b844",
|
||||
urls = ["https://github.com/ixdy/bazel/archive/df2c687c22bdd7c76f3cdcc85f38fefd02f0b844.tar.gz"],
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "io_bazel_rules_docker",
|
||||
sha256 = "c440717ee9b1b2f4a1e9bf5622539feb5aef9db83fc1fa1517818f13c041b0be",
|
||||
@ -39,9 +36,9 @@ http_archive(
|
||||
urls = ["https://github.com/bazelbuild/rules_docker/archive/8bbe2a8abd382641e65ff7127a3700a8530f02ce.tar.gz"],
|
||||
)
|
||||
|
||||
load("@io_kubernetes_build//defs:bazel_version.bzl", "check_version")
|
||||
load("@bazel_skylib//:lib.bzl", "versions")
|
||||
|
||||
check_version("0.8.0")
|
||||
versions.check(minimum_bazel_version = "0.8.0")
|
||||
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_rules_dependencies", "go_register_toolchains", "go_download_sdk")
|
||||
load("@io_bazel_rules_docker//docker:docker.bzl", "docker_repositories", "docker_pull")
|
||||
|
@ -1,6 +1,6 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load("@io_bazel//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
|
||||
load("@io_kubernetes_build//defs:pkg.bzl", "pkg_tar")
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
@ -20,33 +20,19 @@ filegroup(
|
||||
"//cluster/images/etcd/rollback:all-srcs",
|
||||
"//cluster/images/hyperkube:all-srcs",
|
||||
"//cluster/images/kubemark:all-srcs",
|
||||
"//cluster/lib:all-srcs",
|
||||
"//cluster/saltbase:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
||||
# All of the manifests that are expected to be in a "gci-trusty"
|
||||
# subdir of the manifests tarball.
|
||||
pkg_tar(
|
||||
name = "_manifests-gci-trusty",
|
||||
package_dir = "gci-trusty",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = [
|
||||
"//cluster/addons",
|
||||
"//cluster/gce:gci-trusty-manifests",
|
||||
"//cluster/gce/addons",
|
||||
"//cluster/saltbase:gci-trusty-salt-manifests",
|
||||
],
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "manifests",
|
||||
mode = "0644",
|
||||
package_dir = "kubernetes",
|
||||
package_dir = "kubernetes/gci-trusty",
|
||||
deps = [
|
||||
":_manifests-gci-trusty",
|
||||
"//cluster/saltbase:salt-manifests",
|
||||
"//cluster/addons",
|
||||
"//cluster/gce:gce-master-manifests",
|
||||
"//cluster/gce:gci-trusty-manifests",
|
||||
"//cluster/gce/addons",
|
||||
],
|
||||
)
|
||||
|
||||
@ -55,7 +41,6 @@ sh_test(
|
||||
name = "common_test",
|
||||
srcs = ["common.sh"],
|
||||
deps = [
|
||||
"//cluster/lib",
|
||||
"//hack/lib",
|
||||
],
|
||||
)
|
||||
@ -64,7 +49,6 @@ sh_test(
|
||||
name = "clientbin_test",
|
||||
srcs = ["clientbin.sh"],
|
||||
deps = [
|
||||
"//cluster/lib",
|
||||
"//hack/lib",
|
||||
],
|
||||
)
|
||||
@ -73,7 +57,6 @@ sh_test(
|
||||
name = "kube-util_test",
|
||||
srcs = ["kube-util.sh"],
|
||||
deps = [
|
||||
"//cluster/lib",
|
||||
"//hack/lib",
|
||||
],
|
||||
)
|
||||
|
@ -1,6 +1,6 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load("@io_bazel//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
|
||||
load("@io_kubernetes_build//defs:pkg.bzl", "pkg_tar")
|
||||
|
||||
filegroup(
|
||||
name = "addon-srcs",
|
||||
@ -16,10 +16,10 @@ filegroup(
|
||||
|
||||
pkg_tar(
|
||||
name = "addons",
|
||||
extension = "tar.gz",
|
||||
files = [
|
||||
srcs = [
|
||||
":addon-srcs",
|
||||
],
|
||||
extension = "tar.gz",
|
||||
mode = "0644",
|
||||
strip_prefix = ".",
|
||||
)
|
||||
|
@ -15,7 +15,7 @@
|
||||
IMAGE=gcr.io/google-containers/kube-addon-manager
|
||||
ARCH?=amd64
|
||||
TEMP_DIR:=$(shell mktemp -d)
|
||||
VERSION=v8.4
|
||||
VERSION=v8.5
|
||||
KUBECTL_VERSION?=v1.8.4
|
||||
|
||||
ifeq ($(ARCH),amd64)
|
||||
|
@ -155,7 +155,7 @@ function is_leader() {
|
||||
fi
|
||||
KUBE_CONTROLLER_MANAGER_LEADER=`${KUBECTL} -n kube-system get ep kube-controller-manager \
|
||||
-o go-template=$'{{index .metadata.annotations "control-plane.alpha.kubernetes.io/leader"}}' \
|
||||
| sed 's/^.*"holderIdentity":"\([^"]*\)".*/\1/'`
|
||||
| sed 's/^.*"holderIdentity":"\([^"]*\)".*/\1/' | awk -F'_' '{print $1}'`
|
||||
# If there was any problem with getting the leader election results, var will
|
||||
# be empty. Since it's better to have multiple addon managers than no addon
|
||||
# managers at all, we're going to assume that we're the leader in such case.
|
||||
|
@ -47,7 +47,7 @@ roleRef:
|
||||
apiGroup: ""
|
||||
---
|
||||
# Elasticsearch deployment itself
|
||||
apiVersion: apps/v1beta2
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: elasticsearch-logging
|
||||
|
@ -129,17 +129,6 @@ data:
|
||||
max_lines 1000
|
||||
</match>
|
||||
system.input.conf: |-
|
||||
# Example:
|
||||
# 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081
|
||||
<source>
|
||||
type tail
|
||||
format /^(?<time>[^ ]* [^ ,]*)[^\[]*\[[^\]]*\]\[(?<severity>[^ \]]*) *\] (?<message>.*)$/
|
||||
time_format %Y-%m-%d %H:%M:%S
|
||||
path /var/log/salt/minion
|
||||
pos_file /var/log/es-salt.pos
|
||||
tag salt
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# Dec 21 23:17:22 gke-foo-1-1-4b5cbd14-node-4eoj startupscript: Finished running startup script /var/run/google.startup.script
|
||||
<source>
|
||||
|
@ -45,27 +45,27 @@ roleRef:
|
||||
name: fluentd-es
|
||||
apiGroup: ""
|
||||
---
|
||||
apiVersion: apps/v1beta2
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: fluentd-es-v2.0.2
|
||||
name: fluentd-es-v2.0.3
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: fluentd-es
|
||||
version: v2.0.2
|
||||
version: v2.0.3
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: fluentd-es
|
||||
version: v2.0.2
|
||||
version: v2.0.3
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: fluentd-es
|
||||
kubernetes.io/cluster-service: "true"
|
||||
version: v2.0.2
|
||||
version: v2.0.3
|
||||
# This annotation ensures that fluentd does not get evicted if the node
|
||||
# supports critical pod annotation based priority scheme.
|
||||
# Note that this does not guarantee admission on the nodes (#40573).
|
||||
@ -75,7 +75,7 @@ spec:
|
||||
serviceAccountName: fluentd-es
|
||||
containers:
|
||||
- name: fluentd-es
|
||||
image: gcr.io/google-containers/fluentd-elasticsearch:v2.0.2
|
||||
image: gcr.io/google-containers/fluentd-elasticsearch:v2.0.3
|
||||
env:
|
||||
- name: FLUENTD_ARGS
|
||||
value: --no-supervisor -q
|
||||
|
@ -1,4 +1,4 @@
|
||||
apiVersion: apps/v1beta2
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: kibana-logging
|
||||
|
@ -1,13 +1,13 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: fluentd-gcp-v2.0.13
|
||||
name: fluentd-gcp-v2.0.14
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: fluentd-gcp
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v2.0.13
|
||||
version: v2.0.14
|
||||
spec:
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
@ -16,7 +16,7 @@ spec:
|
||||
labels:
|
||||
k8s-app: fluentd-gcp
|
||||
kubernetes.io/cluster-service: "true"
|
||||
version: v2.0.13
|
||||
version: v2.0.14
|
||||
# This annotation ensures that fluentd does not get evicted if the node
|
||||
# supports critical pod annotation based priority scheme.
|
||||
# Note that this does not guarantee admission on the nodes (#40573).
|
||||
@ -27,7 +27,7 @@ spec:
|
||||
dnsPolicy: Default
|
||||
containers:
|
||||
- name: fluentd-gcp
|
||||
image: gcr.io/google-containers/fluentd-gcp:2.0.13
|
||||
image: gcr.io/google-containers/fluentd-gcp:2.0.14
|
||||
env:
|
||||
- name: FLUENTD_ARGS
|
||||
value: --no-supervisor -q
|
||||
|
@ -23,31 +23,31 @@ data:
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: metrics-server-v0.2.0
|
||||
name: metrics-server-v0.2.1
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v0.2.0
|
||||
version: v0.2.1
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: metrics-server
|
||||
version: v0.2.0
|
||||
version: v0.2.1
|
||||
template:
|
||||
metadata:
|
||||
name: metrics-server
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
version: v0.2.0
|
||||
version: v0.2.1
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
serviceAccountName: metrics-server
|
||||
containers:
|
||||
- name: metrics-server
|
||||
image: gcr.io/google_containers/metrics-server-amd64:v0.2.0
|
||||
image: gcr.io/google_containers/metrics-server-amd64:v0.2.1
|
||||
command:
|
||||
- /metrics-server
|
||||
- --source=kubernetes.summary_api:''
|
||||
@ -81,10 +81,10 @@ spec:
|
||||
- --config-dir=/etc/config
|
||||
- --cpu=40m
|
||||
- --extra-cpu=0.5m
|
||||
- --memory=140Mi
|
||||
- --memory=40Mi
|
||||
- --extra-memory=4Mi
|
||||
- --threshold=5
|
||||
- --deployment=metrics-server-v0.2.0
|
||||
- --deployment=metrics-server-v0.2.1
|
||||
- --container=metrics-server
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
|
2
cluster/centos/OWNERS
Normal file
2
cluster/centos/OWNERS
Normal file
@ -0,0 +1,2 @@
|
||||
reviewers:
|
||||
- zouyee
|
@ -33,29 +33,6 @@ mkdir -p "$cert_dir"
|
||||
|
||||
use_cn=false
|
||||
|
||||
# TODO: Add support for discovery on other providers?
|
||||
if [ "$cert_ip" == "_use_gce_external_ip_" ]; then
|
||||
cert_ip=$(curl -s -H Metadata-Flavor:Google http://metadata.google.internal./computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip)
|
||||
fi
|
||||
|
||||
if [ "$cert_ip" == "_use_aws_external_ip_" ]; then
|
||||
# If there's no public IP assigned (e.g. this host is running on an internal subnet in a VPC), then
|
||||
# curl will happily spit out the contents of AWS's 404 page and an exit code of zero.
|
||||
#
|
||||
# The string containing the 404 page trips up one of easyrsa's calls to openssl later; whichever
|
||||
# one creates the CA certificate, because the 404 page is > 64 characters.
|
||||
if cert_ip=$(curl -f -s http://169.254.169.254/latest/meta-data/public-ipv4); then
|
||||
:
|
||||
else
|
||||
cert_ip=$(curl -f -s http://169.254.169.254/latest/meta-data/local-ipv4)
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$cert_ip" == "_use_azure_dns_name_" ]; then
|
||||
cert_ip=$(uname -n | awk -F. '{ print $2 }').cloudapp.net
|
||||
use_cn=true
|
||||
fi
|
||||
|
||||
sans="IP:${cert_ip}"
|
||||
if [[ -n "${extra_sans}" ]]; then
|
||||
sans="${sans},${extra_sans}"
|
@ -234,7 +234,7 @@ echo "[INFO] tear-down-node on $1"
|
||||
# Generate the CA certificates for k8s components
|
||||
function make-ca-cert() {
|
||||
echo "[INFO] make-ca-cert"
|
||||
bash "${ROOT}/../saltbase/salt/generate-cert/make-ca-cert.sh" "${MASTER_ADVERTISE_IP}" "IP:${MASTER_ADVERTISE_IP},IP:${SERVICE_CLUSTER_IP_RANGE%.*}.1,DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.cluster.local"
|
||||
bash "${ROOT}/make-ca-cert.sh" "${MASTER_ADVERTISE_IP}" "IP:${MASTER_ADVERTISE_IP},IP:${SERVICE_CLUSTER_IP_RANGE%.*}.1,DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.cluster.local"
|
||||
}
|
||||
|
||||
# Provision master
|
||||
|
@ -25,7 +25,6 @@ KUBE_ROOT=$(cd $(dirname "${BASH_SOURCE}")/.. && pwd)
|
||||
DEFAULT_KUBECONFIG="${HOME:-.}/.kube/config"
|
||||
|
||||
source "${KUBE_ROOT}/hack/lib/util.sh"
|
||||
source "${KUBE_ROOT}/cluster/lib/logging.sh"
|
||||
# KUBE_RELEASE_VERSION_REGEX matches things like "v1.2.3" or "v1.2.3-alpha.4"
|
||||
#
|
||||
# NOTE This must match the version_regex in build/common.sh
|
||||
@ -352,8 +351,6 @@ function set_binary_version() {
|
||||
# KUBE_TAR_HASH
|
||||
# SERVER_BINARY_TAR_URL
|
||||
# SERVER_BINARY_TAR_HASH
|
||||
# SALT_TAR_URL
|
||||
# SALT_TAR_HASH
|
||||
function tars_from_version() {
|
||||
local sha1sum=""
|
||||
if which sha1sum >/dev/null 2>&1; then
|
||||
@ -367,13 +364,11 @@ function tars_from_version() {
|
||||
upload-server-tars
|
||||
elif [[ ${KUBE_VERSION} =~ ${KUBE_RELEASE_VERSION_REGEX} ]]; then
|
||||
SERVER_BINARY_TAR_URL="https://storage.googleapis.com/kubernetes-release/release/${KUBE_VERSION}/kubernetes-server-linux-amd64.tar.gz"
|
||||
SALT_TAR_URL="https://storage.googleapis.com/kubernetes-release/release/${KUBE_VERSION}/kubernetes-salt.tar.gz"
|
||||
# TODO: Clean this up.
|
||||
KUBE_MANIFESTS_TAR_URL="${SERVER_BINARY_TAR_URL/server-linux-amd64/manifests}"
|
||||
KUBE_MANIFESTS_TAR_HASH=$(curl ${KUBE_MANIFESTS_TAR_URL} --silent --show-error | ${sha1sum} | awk '{print $1}')
|
||||
elif [[ ${KUBE_VERSION} =~ ${KUBE_CI_VERSION_REGEX} ]]; then
|
||||
SERVER_BINARY_TAR_URL="https://storage.googleapis.com/kubernetes-release-dev/ci/${KUBE_VERSION}/kubernetes-server-linux-amd64.tar.gz"
|
||||
SALT_TAR_URL="https://storage.googleapis.com/kubernetes-release-dev/ci/${KUBE_VERSION}/kubernetes-salt.tar.gz"
|
||||
# TODO: Clean this up.
|
||||
KUBE_MANIFESTS_TAR_URL="${SERVER_BINARY_TAR_URL/server-linux-amd64/manifests}"
|
||||
KUBE_MANIFESTS_TAR_HASH=$(curl ${KUBE_MANIFESTS_TAR_URL} --silent --show-error | ${sha1sum} | awk '{print $1}')
|
||||
@ -384,18 +379,11 @@ function tars_from_version() {
|
||||
if ! SERVER_BINARY_TAR_HASH=$(curl -Ss --fail "${SERVER_BINARY_TAR_URL}.sha1"); then
|
||||
echo "Failure trying to curl release .sha1"
|
||||
fi
|
||||
if ! SALT_TAR_HASH=$(curl -Ss --fail "${SALT_TAR_URL}.sha1"); then
|
||||
echo "Failure trying to curl Salt tar .sha1"
|
||||
fi
|
||||
|
||||
if ! curl -Ss --head "${SERVER_BINARY_TAR_URL}" >&/dev/null; then
|
||||
echo "Can't find release at ${SERVER_BINARY_TAR_URL}" >&2
|
||||
exit 1
|
||||
fi
|
||||
if ! curl -Ss --head "${SALT_TAR_URL}" >&/dev/null; then
|
||||
echo "Can't find Salt tar at ${SALT_TAR_URL}" >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Search for the specified tarball in the various known output locations,
|
||||
@ -428,16 +416,14 @@ function find-tar() {
|
||||
# KUBE_ROOT
|
||||
# Vars set:
|
||||
# SERVER_BINARY_TAR
|
||||
# SALT_TAR
|
||||
# KUBE_MANIFESTS_TAR
|
||||
function find-release-tars() {
|
||||
SERVER_BINARY_TAR=$(find-tar kubernetes-server-linux-amd64.tar.gz)
|
||||
SALT_TAR=$(find-tar kubernetes-salt.tar.gz)
|
||||
|
||||
# This tarball is used by GCI, Ubuntu Trusty, and Container Linux.
|
||||
KUBE_MANIFESTS_TAR=
|
||||
if [[ "${MASTER_OS_DISTRIBUTION:-}" == "trusty" || "${MASTER_OS_DISTRIBUTION:-}" == "gci" || "${MASTER_OS_DISTRIBUTION:-}" == "container-linux" || "${MASTER_OS_DISTRIBUTION:-}" == "ubuntu" ]] || \
|
||||
[[ "${NODE_OS_DISTRIBUTION:-}" == "trusty" || "${NODE_OS_DISTRIBUTION:-}" == "gci" || "${NODE_OS_DISTRIBUTION:-}" == "container-linux" || "${NODE_OS_DISTRIBUTION:-}" == "ubuntu" ]] ; then
|
||||
if [[ "${MASTER_OS_DISTRIBUTION:-}" == "trusty" || "${MASTER_OS_DISTRIBUTION:-}" == "gci" || "${MASTER_OS_DISTRIBUTION:-}" == "ubuntu" ]] || \
|
||||
[[ "${NODE_OS_DISTRIBUTION:-}" == "trusty" || "${NODE_OS_DISTRIBUTION:-}" == "gci" || "${NODE_OS_DISTRIBUTION:-}" == "ubuntu" ]] ; then
|
||||
KUBE_MANIFESTS_TAR=$(find-tar kubernetes-manifests.tar.gz)
|
||||
fi
|
||||
}
|
||||
@ -499,7 +485,7 @@ function stage-images() {
|
||||
done
|
||||
|
||||
kube::util::wait-for-jobs || {
|
||||
kube::log::error "unable to push images. See ${temp_dir}/*.log for more info."
|
||||
echo "!!! unable to push images. See ${temp_dir}/*.log for more info." 1>&2
|
||||
return 1
|
||||
}
|
||||
|
||||
@ -574,15 +560,11 @@ function build-kube-env {
|
||||
local file=$2
|
||||
|
||||
local server_binary_tar_url=$SERVER_BINARY_TAR_URL
|
||||
local salt_tar_url=$SALT_TAR_URL
|
||||
local kube_manifests_tar_url="${KUBE_MANIFESTS_TAR_URL:-}"
|
||||
if [[ "${master}" == "true" && "${MASTER_OS_DISTRIBUTION}" == "container-linux" ]] || \
|
||||
[[ "${master}" == "false" && "${NODE_OS_DISTRIBUTION}" == "container-linux" ]] || \
|
||||
[[ "${master}" == "true" && "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]] || \
|
||||
if [[ "${master}" == "true" && "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]] || \
|
||||
[[ "${master}" == "false" && "${NODE_OS_DISTRIBUTION}" == "ubuntu" ]] ; then
|
||||
# TODO: Support fallback .tar.gz settings on Container Linux
|
||||
server_binary_tar_url=$(split_csv "${SERVER_BINARY_TAR_URL}")
|
||||
salt_tar_url=$(split_csv "${SALT_TAR_URL}")
|
||||
kube_manifests_tar_url=$(split_csv "${KUBE_MANIFESTS_TAR_URL}")
|
||||
fi
|
||||
|
||||
@ -603,8 +585,6 @@ SERVER_BINARY_TAR_URL: $(yaml-quote ${server_binary_tar_url})
|
||||
SERVER_BINARY_TAR_HASH: $(yaml-quote ${SERVER_BINARY_TAR_HASH})
|
||||
PROJECT_ID: $(yaml-quote ${PROJECT})
|
||||
NETWORK_PROJECT_ID: $(yaml-quote ${NETWORK_PROJECT})
|
||||
SALT_TAR_URL: $(yaml-quote ${salt_tar_url})
|
||||
SALT_TAR_HASH: $(yaml-quote ${SALT_TAR_HASH})
|
||||
SERVICE_CLUSTER_IP_RANGE: $(yaml-quote ${SERVICE_CLUSTER_IP_RANGE})
|
||||
KUBERNETES_MASTER_NAME: $(yaml-quote ${KUBERNETES_MASTER_NAME})
|
||||
ALLOCATE_NODE_CIDRS: $(yaml-quote ${ALLOCATE_NODE_CIDRS:-false})
|
||||
@ -696,8 +676,8 @@ EOF
|
||||
TERMINATED_POD_GC_THRESHOLD: $(yaml-quote ${TERMINATED_POD_GC_THRESHOLD})
|
||||
EOF
|
||||
fi
|
||||
if [[ "${master}" == "true" && ("${MASTER_OS_DISTRIBUTION}" == "trusty" || "${MASTER_OS_DISTRIBUTION}" == "gci" || "${MASTER_OS_DISTRIBUTION}" == "container-linux") || "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]] || \
|
||||
[[ "${master}" == "false" && ("${NODE_OS_DISTRIBUTION}" == "trusty" || "${NODE_OS_DISTRIBUTION}" == "gci" || "${NODE_OS_DISTRIBUTION}" == "container-linux") || "${NODE_OS_DISTRIBUTION}" = "ubuntu" ]] ; then
|
||||
if [[ "${master}" == "true" && ("${MASTER_OS_DISTRIBUTION}" == "trusty" || "${MASTER_OS_DISTRIBUTION}" == "gci") || "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]] || \
|
||||
[[ "${master}" == "false" && ("${NODE_OS_DISTRIBUTION}" == "trusty" || "${NODE_OS_DISTRIBUTION}" == "gci") || "${NODE_OS_DISTRIBUTION}" = "ubuntu" ]] ; then
|
||||
cat >>$file <<EOF
|
||||
KUBE_MANIFESTS_TAR_URL: $(yaml-quote ${kube_manifests_tar_url})
|
||||
KUBE_MANIFESTS_TAR_HASH: $(yaml-quote ${KUBE_MANIFESTS_TAR_HASH})
|
||||
@ -942,16 +922,6 @@ EOF
|
||||
if [ -n "${EVICTION_HARD:-}" ]; then
|
||||
cat >>$file <<EOF
|
||||
EVICTION_HARD: $(yaml-quote ${EVICTION_HARD})
|
||||
EOF
|
||||
fi
|
||||
if [[ "${master}" == "true" && "${MASTER_OS_DISTRIBUTION}" == "container-linux" ]] || \
|
||||
[[ "${master}" == "false" && "${NODE_OS_DISTRIBUTION}" == "container-linux" ]]; then
|
||||
# Container-Linux-only env vars. TODO(yifan): Make them available on other distros.
|
||||
cat >>$file <<EOF
|
||||
KUBERNETES_CONTAINER_RUNTIME: $(yaml-quote ${CONTAINER_RUNTIME:-rkt})
|
||||
RKT_VERSION: $(yaml-quote ${RKT_VERSION:-})
|
||||
RKT_PATH: $(yaml-quote ${RKT_PATH:-})
|
||||
RKT_STAGE1_IMAGE: $(yaml-quote ${RKT_STAGE1_IMAGE:-})
|
||||
EOF
|
||||
fi
|
||||
if [[ "${ENABLE_CLUSTER_AUTOSCALER}" == "true" ]]; then
|
||||
|
@ -1,26 +1,17 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load("@io_bazel//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
|
||||
load("@io_kubernetes_build//defs:build.bzl", "release_filegroup")
|
||||
load("@io_kubernetes_build//defs:pkg.bzl", "pkg_tar")
|
||||
|
||||
pkg_tar(
|
||||
name = "gci-trusty-manifests",
|
||||
files = [
|
||||
"container-linux/configure-helper.sh",
|
||||
"gci/configure-helper.sh",
|
||||
"gci/health-monitor.sh",
|
||||
"//cluster/gce/gci/mounter",
|
||||
],
|
||||
files = {
|
||||
"//cluster/gce/gci/mounter": "gci-mounter",
|
||||
"gci/configure-helper.sh": "gci-configure-helper.sh",
|
||||
"gci/health-monitor.sh": "health-monitor.sh",
|
||||
},
|
||||
mode = "0755",
|
||||
strip_prefix = ".",
|
||||
# pkg_tar doesn't support renaming the files we add, so instead create symlinks.
|
||||
symlinks = {
|
||||
"container-linux-configure-helper.sh": "container-linux/configure-helper.sh",
|
||||
"gci-configure-helper.sh": "gci/configure-helper.sh",
|
||||
"health-monitor.sh": "gci/health-monitor.sh",
|
||||
"gci-mounter": "gci/mounter/mounter",
|
||||
"trusty-configure-helper.sh": "trusty/configure-helper.sh",
|
||||
},
|
||||
)
|
||||
|
||||
filegroup(
|
||||
@ -40,15 +31,32 @@ filegroup(
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
||||
# Having the configure-vm.sh script and and trusty code from the GCE cluster
|
||||
# deploy hosted with the release is useful for GKE.
|
||||
# This list should match the list in kubernetes/release/lib/releaselib.sh.
|
||||
# Having the COS code from the GCE cluster deploy hosted with the release is
|
||||
# useful for GKE. This list should match the list in
|
||||
# kubernetes/release/lib/releaselib.sh.
|
||||
release_filegroup(
|
||||
name = "gcs-release-artifacts",
|
||||
srcs = [
|
||||
"configure-vm.sh",
|
||||
"gci/configure.sh",
|
||||
"gci/master.yaml",
|
||||
"gci/node.yaml",
|
||||
],
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "gce-master-manifests",
|
||||
srcs = [
|
||||
"manifests/abac-authz-policy.jsonl",
|
||||
"manifests/cluster-autoscaler.manifest",
|
||||
"manifests/e2e-image-puller.manifest",
|
||||
"manifests/etcd.manifest",
|
||||
"manifests/glbc.manifest",
|
||||
"manifests/kube-addon-manager.yaml",
|
||||
"manifests/kube-apiserver.manifest",
|
||||
"manifests/kube-controller-manager.manifest",
|
||||
"manifests/kube-proxy.manifest",
|
||||
"manifests/kube-scheduler.manifest",
|
||||
"manifests/rescheduler.manifest",
|
||||
],
|
||||
mode = "0644",
|
||||
)
|
||||
|
@ -1,6 +1,6 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load("@io_bazel//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
|
||||
load("@io_kubernetes_build//defs:pkg.bzl", "pkg_tar")
|
||||
|
||||
filegroup(
|
||||
name = "addon-srcs",
|
||||
@ -16,10 +16,10 @@ filegroup(
|
||||
|
||||
pkg_tar(
|
||||
name = "addons",
|
||||
extension = "tar.gz",
|
||||
files = [
|
||||
srcs = [
|
||||
":addon-srcs",
|
||||
],
|
||||
extension = "tar.gz",
|
||||
mode = "0644",
|
||||
strip_prefix = ".",
|
||||
)
|
||||
|
@ -54,12 +54,6 @@ CREATE_CUSTOM_NETWORK=${CREATE_CUSTOM_NETWORK:-false}
|
||||
|
||||
MASTER_OS_DISTRIBUTION=${KUBE_MASTER_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
|
||||
NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
|
||||
if [[ "${MASTER_OS_DISTRIBUTION}" == "coreos" ]]; then
|
||||
MASTER_OS_DISTRIBUTION="container-linux"
|
||||
fi
|
||||
if [[ "${NODE_OS_DISTRIBUTION}" == "coreos" ]]; then
|
||||
NODE_OS_DISTRIBUTION="container-linux"
|
||||
fi
|
||||
|
||||
if [[ "${MASTER_OS_DISTRIBUTION}" == "cos" ]]; then
|
||||
MASTER_OS_DISTRIBUTION="gci"
|
||||
@ -192,7 +186,7 @@ if [[ ${ENABLE_METADATA_CONCEALMENT:-} == "true" ]]; then
|
||||
# Put the necessary label on the node so the daemonset gets scheduled.
|
||||
NODE_LABELS="${NODE_LABELS},beta.kubernetes.io/metadata-proxy-ready=true"
|
||||
# Add to the provider custom variables.
|
||||
PROVIDER_VARS="${PROVIDER_VARS:-} ENABLE_METADATA_CONCEALMENT"
|
||||
PROVIDER_VARS="${PROVIDER_VARS:-} ENABLE_METADATA_CONCEALMENT METADATA_CONCEALMENT_NO_FIREWALL"
|
||||
fi
|
||||
|
||||
# Optional: Enable node logging.
|
||||
|
@ -53,13 +53,6 @@ CREATE_CUSTOM_NETWORK=${CREATE_CUSTOM_NETWORK:-false}
|
||||
|
||||
MASTER_OS_DISTRIBUTION=${KUBE_MASTER_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
|
||||
NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
|
||||
if [[ "${MASTER_OS_DISTRIBUTION}" == "coreos" ]]; then
|
||||
MASTER_OS_DISTRIBUTION="container-linux"
|
||||
fi
|
||||
if [[ "${NODE_OS_DISTRIBUTION}" == "coreos" ]]; then
|
||||
NODE_OS_DISTRIBUTION="container-linux"
|
||||
fi
|
||||
|
||||
if [[ "${MASTER_OS_DISTRIBUTION}" == "cos" ]]; then
|
||||
MASTER_OS_DISTRIBUTION="gci"
|
||||
fi
|
||||
@ -223,7 +216,7 @@ if [[ ${ENABLE_METADATA_CONCEALMENT:-} == "true" ]]; then
|
||||
# Put the necessary label on the node so the daemonset gets scheduled.
|
||||
NODE_LABELS="${NODE_LABELS},beta.kubernetes.io/metadata-proxy-ready=true"
|
||||
# Add to the provider custom variables.
|
||||
PROVIDER_VARS="${PROVIDER_VARS:-} ENABLE_METADATA_CONCEALMENT"
|
||||
PROVIDER_VARS="${PROVIDER_VARS:-} ENABLE_METADATA_CONCEALMENT METADATA_CONCEALMENT_NO_FIREWALL"
|
||||
fi
|
||||
|
||||
# Optional: Enable node logging.
|
||||
|
@ -1,932 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
# If we have any arguments at all, this is a push and not just setup.
|
||||
is_push=$@
|
||||
|
||||
function ensure-basic-networking() {
|
||||
# Deal with GCE networking bring-up race. (We rely on DNS for a lot,
|
||||
# and it's just not worth doing a whole lot of startup work if this
|
||||
# isn't ready yet.)
|
||||
until getent hosts metadata.google.internal &>/dev/null; do
|
||||
echo 'Waiting for functional DNS (trying to resolve metadata.google.internal)...'
|
||||
sleep 3
|
||||
done
|
||||
until getent hosts $(hostname -f || echo _error_) &>/dev/null; do
|
||||
echo 'Waiting for functional DNS (trying to resolve my own FQDN)...'
|
||||
sleep 3
|
||||
done
|
||||
until getent hosts $(hostname -i || echo _error_) &>/dev/null; do
|
||||
echo 'Waiting for functional DNS (trying to resolve my own IP)...'
|
||||
sleep 3
|
||||
done
|
||||
|
||||
echo "Networking functional on $(hostname) ($(hostname -i))"
|
||||
}
|
||||
|
||||
# A hookpoint for installing any needed packages
|
||||
ensure-packages() {
|
||||
:
|
||||
}
|
||||
|
||||
function create-node-pki {
|
||||
echo "Creating node pki files"
|
||||
|
||||
local -r pki_dir="/etc/kubernetes/pki"
|
||||
mkdir -p "${pki_dir}"
|
||||
|
||||
if [[ -z "${CA_CERT_BUNDLE:-}" ]]; then
|
||||
CA_CERT_BUNDLE="${CA_CERT}"
|
||||
fi
|
||||
|
||||
CA_CERT_BUNDLE_PATH="${pki_dir}/ca-certificates.crt"
|
||||
echo "${CA_CERT_BUNDLE}" | base64 --decode > "${CA_CERT_BUNDLE_PATH}"
|
||||
|
||||
if [[ ! -z "${KUBELET_CERT:-}" && ! -z "${KUBELET_KEY:-}" ]]; then
|
||||
KUBELET_CERT_PATH="${pki_dir}/kubelet.crt"
|
||||
echo "${KUBELET_CERT}" | base64 --decode > "${KUBELET_CERT_PATH}"
|
||||
|
||||
KUBELET_KEY_PATH="${pki_dir}/kubelet.key"
|
||||
echo "${KUBELET_KEY}" | base64 --decode > "${KUBELET_KEY_PATH}"
|
||||
fi
|
||||
}
|
||||
|
||||
# A hookpoint for setting up local devices
|
||||
ensure-local-disks() {
|
||||
for ssd in /dev/disk/by-id/google-local-ssd-*; do
|
||||
if [ -e "$ssd" ]; then
|
||||
ssdnum=`echo $ssd | sed -e 's/\/dev\/disk\/by-id\/google-local-ssd-\([0-9]*\)/\1/'`
|
||||
echo "Formatting and mounting local SSD $ssd to /mnt/disks/ssd$ssdnum"
|
||||
mkdir -p /mnt/disks/ssd$ssdnum
|
||||
/usr/share/google/safe_format_and_mount -m "mkfs.ext4 -F" "${ssd}" /mnt/disks/ssd$ssdnum &>/var/log/local-ssd-$ssdnum-mount.log || \
|
||||
{ echo "Local SSD $ssdnum mount failed, review /var/log/local-ssd-$ssdnum-mount.log"; return 1; }
|
||||
else
|
||||
echo "No local SSD disks found."
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
function config-ip-firewall {
|
||||
echo "Configuring IP firewall rules"
|
||||
|
||||
# Do not consider loopback addresses as martian source or destination while
|
||||
# routing. This enables the use of 127/8 for local routing purposes.
|
||||
sysctl -w net.ipv4.conf.all.route_localnet=1
|
||||
|
||||
# We need to add rules to accept all TCP/UDP/ICMP packets.
|
||||
if iptables -L INPUT | grep "Chain INPUT (policy DROP)" > /dev/null; then
|
||||
echo "Add rules to accept all inbound TCP/UDP/ICMP packets"
|
||||
iptables -A INPUT -p TCP -j ACCEPT
|
||||
iptables -A INPUT -p UDP -j ACCEPT
|
||||
iptables -A INPUT -p ICMP -j ACCEPT
|
||||
fi
|
||||
if iptables -L FORWARD | grep "Chain FORWARD (policy DROP)" > /dev/null; then
|
||||
echo "Add rules to accept all forwarded TCP/UDP/ICMP packets"
|
||||
iptables -A FORWARD -p TCP -j ACCEPT
|
||||
iptables -A FORWARD -p UDP -j ACCEPT
|
||||
iptables -A FORWARD -p ICMP -j ACCEPT
|
||||
fi
|
||||
|
||||
# Flush iptables nat table
|
||||
iptables -t nat -F || true
|
||||
|
||||
if [[ "${NON_MASQUERADE_CIDR:-}" == "0.0.0.0/0" ]]; then
|
||||
echo "Add rules for ip masquerade"
|
||||
iptables -t nat -N IP-MASQ
|
||||
iptables -t nat -A POSTROUTING -m comment --comment "ip-masq: ensure nat POSTROUTING directs all non-LOCAL destination traffic to our custom IP-MASQ chain" -m addrtype ! --dst-type LOCAL -j IP-MASQ
|
||||
iptables -t nat -A IP-MASQ -d 169.254.0.0/16 -m comment --comment "ip-masq: local traffic is not subject to MASQUERADE" -j RETURN
|
||||
iptables -t nat -A IP-MASQ -d 10.0.0.0/8 -m comment --comment "ip-masq: local traffic is not subject to MASQUERADE" -j RETURN
|
||||
iptables -t nat -A IP-MASQ -d 172.16.0.0/12 -m comment --comment "ip-masq: local traffic is not subject to MASQUERADE" -j RETURN
|
||||
iptables -t nat -A IP-MASQ -d 192.168.0.0/16 -m comment --comment "ip-masq: local traffic is not subject to MASQUERADE" -j RETURN
|
||||
iptables -t nat -A IP-MASQ -m comment --comment "ip-masq: outbound traffic is subject to MASQUERADE (must be last in chain)" -j MASQUERADE
|
||||
fi
|
||||
|
||||
if [[ "${ENABLE_METADATA_CONCEALMENT:-}" == "true" ]]; then
|
||||
echo "Add rule for metadata concealment"
|
||||
iptables -t nat -I PREROUTING -p tcp -d 169.254.169.254 --dport 80 -m comment --comment "metadata-concealment: bridge traffic to metadata server goes to metadata proxy" -j DNAT --to-destination 127.0.0.1:988
|
||||
fi
|
||||
}
|
||||
|
||||
function ensure-install-dir() {
|
||||
INSTALL_DIR="/var/cache/kubernetes-install"
|
||||
mkdir -p ${INSTALL_DIR}
|
||||
cd ${INSTALL_DIR}
|
||||
}
|
||||
|
||||
function salt-apiserver-timeout-grain() {
|
||||
cat <<EOF >>/etc/salt/minion.d/grains.conf
|
||||
minRequestTimeout: '$1'
|
||||
EOF
|
||||
}
|
||||
|
||||
function set-broken-motd() {
|
||||
echo -e '\nBroken (or in progress) Kubernetes node setup! Suggested first step:\n tail /var/log/startupscript.log\n' > /etc/motd
|
||||
}
|
||||
|
||||
function reset-motd() {
|
||||
# kubelet is installed both on the master and nodes, and the version is easy to parse (unlike kubectl)
|
||||
local -r version="$(/usr/local/bin/kubelet --version=true | cut -f2 -d " ")"
|
||||
# This logic grabs either a release tag (v1.2.1 or v1.2.1-alpha.1),
|
||||
# or the git hash that's in the build info.
|
||||
local gitref="$(echo "${version}" | sed -r "s/(v[0-9]+\.[0-9]+\.[0-9]+)(-[a-z]+\.[0-9]+)?.*/\1\2/g")"
|
||||
local devel=""
|
||||
if [[ "${gitref}" != "${version}" ]]; then
|
||||
devel="
|
||||
Note: This looks like a development version, which might not be present on GitHub.
|
||||
If it isn't, the closest tag is at:
|
||||
https://github.com/kubernetes/kubernetes/tree/${gitref}
|
||||
"
|
||||
gitref="${version//*+/}"
|
||||
fi
|
||||
cat > /etc/motd <<EOF
|
||||
|
||||
Welcome to Kubernetes ${version}!
|
||||
|
||||
You can find documentation for Kubernetes at:
|
||||
http://docs.kubernetes.io/
|
||||
|
||||
The source for this release can be found at:
|
||||
/usr/local/share/doc/kubernetes/kubernetes-src.tar.gz
|
||||
Or you can download it at:
|
||||
https://storage.googleapis.com/kubernetes-release/release/${version}/kubernetes-src.tar.gz
|
||||
|
||||
It is based on the Kubernetes source at:
|
||||
https://github.com/kubernetes/kubernetes/tree/${gitref}
|
||||
${devel}
|
||||
For Kubernetes copyright and licensing information, see:
|
||||
/usr/local/share/doc/kubernetes/LICENSES
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
function curl-metadata() {
|
||||
curl --fail --retry 5 --silent -H 'Metadata-Flavor: Google' "http://metadata/computeMetadata/v1/instance/attributes/${1}"
|
||||
}
|
||||
|
||||
function set-kube-env() {
|
||||
(umask 700;
|
||||
local kube_env_yaml="${INSTALL_DIR}/kube_env.yaml"
|
||||
|
||||
until curl-metadata kube-env > "${kube_env_yaml}"; do
|
||||
echo 'Waiting for kube-env...'
|
||||
sleep 3
|
||||
done
|
||||
|
||||
# kube-env has all the environment variables we care about, in a flat yaml format
|
||||
eval "$(python -c '
|
||||
import pipes,sys,yaml
|
||||
|
||||
for k,v in yaml.load(sys.stdin).iteritems():
|
||||
print("""readonly {var}={value}""".format(var = k, value = pipes.quote(str(v))))
|
||||
print("""export {var}""".format(var = k))
|
||||
' < """${kube_env_yaml}""")"
|
||||
)
|
||||
}
|
||||
|
||||
function remove-docker-artifacts() {
|
||||
echo "== Deleting docker0 =="
|
||||
apt-get-install bridge-utils
|
||||
|
||||
# Remove docker artifacts on minion nodes, if present
|
||||
ifconfig docker0 down || true
|
||||
brctl delbr docker0 || true
|
||||
echo "== Finished deleting docker0 =="
|
||||
}
|
||||
|
||||
# Retry a download until we get it. Takes a hash and a set of URLs.
|
||||
#
|
||||
# $1 is the sha1 of the URL. Can be "" if the sha1 is unknown.
|
||||
# $2+ are the URLs to download.
|
||||
download-or-bust() {
|
||||
local -r hash="$1"
|
||||
shift 1
|
||||
|
||||
urls=( $* )
|
||||
while true; do
|
||||
for url in "${urls[@]}"; do
|
||||
local file="${url##*/}"
|
||||
rm -f "${file}"
|
||||
if ! curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --max-time 300 --retry 6 --retry-delay 10 "${url}"; then
|
||||
echo "== Failed to download ${url}. Retrying. =="
|
||||
elif [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
|
||||
echo "== Hash validation of ${url} failed. Retrying. =="
|
||||
else
|
||||
if [[ -n "${hash}" ]]; then
|
||||
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
|
||||
else
|
||||
echo "== Downloaded ${url} =="
|
||||
fi
|
||||
return
|
||||
fi
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
validate-hash() {
|
||||
local -r file="$1"
|
||||
local -r expected="$2"
|
||||
local actual
|
||||
|
||||
actual=$(sha1sum ${file} | awk '{ print $1 }') || true
|
||||
if [[ "${actual}" != "${expected}" ]]; then
|
||||
echo "== ${file} corrupted, sha1 ${actual} doesn't match expected ${expected} =="
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
apt-get-install() {
|
||||
local -r packages=( $@ )
|
||||
installed=true
|
||||
for package in "${packages[@]}"; do
|
||||
if ! dpkg -s "${package}" &>/dev/null; then
|
||||
installed=false
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [[ "${installed}" == "true" ]]; then
|
||||
echo "== ${packages[@]} already installed, skipped apt-get install ${packages[@]} =="
|
||||
return
|
||||
fi
|
||||
|
||||
apt-get-update
|
||||
|
||||
# Forcibly install packages (options borrowed from Salt logs).
|
||||
until apt-get -q -y -o DPkg::Options::=--force-confold -o DPkg::Options::=--force-confdef install $@; do
|
||||
echo "== install of packages $@ failed, retrying =="
|
||||
sleep 5
|
||||
done
|
||||
}
|
||||
|
||||
apt-get-update() {
|
||||
echo "== Refreshing package database =="
|
||||
until apt-get update; do
|
||||
echo "== apt-get update failed, retrying =="
|
||||
sleep 5
|
||||
done
|
||||
}
|
||||
|
||||
# Restart any services that need restarting due to a library upgrade
|
||||
# Uses needrestart
|
||||
restart-updated-services() {
|
||||
# We default to restarting services, because this is only done as part of an update
|
||||
if [[ "${AUTO_RESTART_SERVICES:-true}" != "true" ]]; then
|
||||
echo "Auto restart of services prevented by AUTO_RESTART_SERVICES=${AUTO_RESTART_SERVICES}"
|
||||
return
|
||||
fi
|
||||
echo "Restarting services with updated libraries (needrestart -r a)"
|
||||
# The pipes make sure that needrestart doesn't think it is running with a TTY
|
||||
# Debian bug #803249; fixed but not necessarily in package repos yet
|
||||
echo "" | needrestart -r a 2>&1 | tee /dev/null
|
||||
}
|
||||
|
||||
# Reboot the machine if /var/run/reboot-required exists
|
||||
reboot-if-required() {
|
||||
if [[ ! -e "/var/run/reboot-required" ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
echo "Reboot is required (/var/run/reboot-required detected)"
|
||||
if [[ -e "/var/run/reboot-required.pkgs" ]]; then
|
||||
echo "Packages that triggered reboot:"
|
||||
cat /var/run/reboot-required.pkgs
|
||||
fi
|
||||
|
||||
# We default to rebooting the machine because this is only done as part of an update
|
||||
if [[ "${AUTO_REBOOT:-true}" != "true" ]]; then
|
||||
echo "Reboot prevented by AUTO_REBOOT=${AUTO_REBOOT}"
|
||||
return
|
||||
fi
|
||||
|
||||
rm -f /var/run/reboot-required
|
||||
rm -f /var/run/reboot-required.pkgs
|
||||
echo "Triggering reboot"
|
||||
init 6
|
||||
}
|
||||
|
||||
# Install upgrades using unattended-upgrades, then reboot or restart services
|
||||
auto-upgrade() {
|
||||
# We default to not installing upgrades
|
||||
if [[ "${AUTO_UPGRADE:-false}" != "true" ]]; then
|
||||
echo "AUTO_UPGRADE not set to true; won't auto-upgrade"
|
||||
return
|
||||
fi
|
||||
apt-get-install unattended-upgrades needrestart
|
||||
unattended-upgrade --debug
|
||||
reboot-if-required # We may reboot the machine right here
|
||||
restart-updated-services
|
||||
}
|
||||
|
||||
#
|
||||
# Install salt from GCS. See README.md for instructions on how to update these
|
||||
# debs.
|
||||
install-salt() {
|
||||
if dpkg -s salt-minion &>/dev/null; then
|
||||
echo "== SaltStack already installed, skipping install step =="
|
||||
return
|
||||
fi
|
||||
|
||||
echo "== Refreshing package database =="
|
||||
until apt-get update; do
|
||||
echo "== apt-get update failed, retrying =="
|
||||
sleep 5
|
||||
done
|
||||
|
||||
mkdir -p /var/cache/salt-install
|
||||
cd /var/cache/salt-install
|
||||
|
||||
DEBS=(
|
||||
libzmq3_3.2.3+dfsg-1~bpo70~dst+1_amd64.deb
|
||||
python-zmq_13.1.0-1~bpo70~dst+1_amd64.deb
|
||||
salt-common_2014.1.13+ds-1~bpo70+1_all.deb
|
||||
salt-minion_2014.1.13+ds-1~bpo70+1_all.deb
|
||||
)
|
||||
URL_BASE="https://storage.googleapis.com/kubernetes-release/salt"
|
||||
|
||||
for deb in "${DEBS[@]}"; do
|
||||
if [ ! -e "${deb}" ]; then
|
||||
download-or-bust "" "${URL_BASE}/${deb}"
|
||||
fi
|
||||
done
|
||||
|
||||
# Based on
|
||||
# https://major.io/2014/06/26/install-debian-packages-without-starting-daemons/
|
||||
# We do this to prevent Salt from starting the salt-minion
|
||||
# daemon. The other packages don't have relevant daemons. (If you
|
||||
# add a package that needs a daemon started, add it to a different
|
||||
# list.)
|
||||
cat > /usr/sbin/policy-rc.d <<EOF
|
||||
#!/bin/sh
|
||||
echo "Salt shall not start." >&2
|
||||
exit 101
|
||||
EOF
|
||||
chmod 0755 /usr/sbin/policy-rc.d
|
||||
|
||||
for deb in "${DEBS[@]}"; do
|
||||
echo "== Installing ${deb}, ignore dependency complaints (will fix later) =="
|
||||
dpkg --skip-same-version --force-depends -i "${deb}"
|
||||
done
|
||||
|
||||
# This will install any of the unmet dependencies from above.
|
||||
echo "== Installing unmet dependencies =="
|
||||
until apt-get install -f -y; do
|
||||
echo "== apt-get install failed, retrying =="
|
||||
sleep 5
|
||||
done
|
||||
|
||||
rm /usr/sbin/policy-rc.d
|
||||
|
||||
# Log a timestamp
|
||||
echo "== Finished installing Salt =="
|
||||
}
|
||||
|
||||
# Ensure salt-minion isn't running and never runs
|
||||
stop-salt-minion() {
|
||||
if [[ -e /etc/init/salt-minion.override ]]; then
|
||||
# Assume this has already run (upgrade, or baked into containervm)
|
||||
return
|
||||
fi
|
||||
|
||||
# This ensures it on next reboot
|
||||
echo manual > /etc/init/salt-minion.override
|
||||
update-rc.d salt-minion disable
|
||||
|
||||
while service salt-minion status >/dev/null; do
|
||||
echo "salt-minion found running, stopping"
|
||||
service salt-minion stop
|
||||
sleep 1
|
||||
done
|
||||
}
|
||||
|
||||
# Finds the master PD device; returns it in MASTER_PD_DEVICE
|
||||
find-master-pd() {
|
||||
MASTER_PD_DEVICE=""
|
||||
if [[ ! -e /dev/disk/by-id/google-master-pd ]]; then
|
||||
return
|
||||
fi
|
||||
device_info=$(ls -l /dev/disk/by-id/google-master-pd)
|
||||
relative_path=${device_info##* }
|
||||
MASTER_PD_DEVICE="/dev/disk/by-id/${relative_path}"
|
||||
}
|
||||
|
||||
# Create the overlay files for the salt tree. We create these in a separate
|
||||
# place so that we can blow away the rest of the salt configs on a kube-push and
|
||||
# re-apply these.
|
||||
function create-salt-pillar() {
|
||||
# Always overwrite the cluster-params.sls (even on a push, we have
|
||||
# these variables)
|
||||
mkdir -p /srv/salt-overlay/pillar
|
||||
cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
|
||||
instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")'
|
||||
node_tags: '$(echo "$NODE_TAGS" | sed -e "s/'/''/g")'
|
||||
node_instance_prefix: '$(echo "$NODE_INSTANCE_PREFIX" | sed -e "s/'/''/g")'
|
||||
cluster_cidr: '$(echo "$CLUSTER_IP_RANGE" | sed -e "s/'/''/g")'
|
||||
allocate_node_cidrs: '$(echo "$ALLOCATE_NODE_CIDRS" | sed -e "s/'/''/g")'
|
||||
non_masquerade_cidr: '$(echo "$NON_MASQUERADE_CIDR" | sed -e "s/'/''/g")'
|
||||
service_cluster_ip_range: '$(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e "s/'/''/g")'
|
||||
enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")'
|
||||
enable_cluster_logging: '$(echo "$ENABLE_CLUSTER_LOGGING" | sed -e "s/'/''/g")'
|
||||
enable_cluster_ui: '$(echo "$ENABLE_CLUSTER_UI" | sed -e "s/'/''/g")'
|
||||
enable_node_problem_detector: '$(echo "$ENABLE_NODE_PROBLEM_DETECTOR" | sed -e "s/'/''/g")'
|
||||
enable_l7_loadbalancing: '$(echo "$ENABLE_L7_LOADBALANCING" | sed -e "s/'/''/g")'
|
||||
enable_node_logging: '$(echo "$ENABLE_NODE_LOGGING" | sed -e "s/'/''/g")'
|
||||
enable_metadata_proxy: '$(echo "$ENABLE_METADATA_CONCEALMENT" | sed -e "s/'/''/g")'
|
||||
enable_metrics_server: '$(echo "$ENABLE_METRICS_SERVER" | sed -e "s/'/''/g")'
|
||||
enable_pod_security_policy: '$(echo "$ENABLE_POD_SECURITY_POLICY" | sed -e "s/'/''/g")'
|
||||
enable_rescheduler: '$(echo "$ENABLE_RESCHEDULER" | sed -e "s/'/''/g")'
|
||||
logging_destination: '$(echo "$LOGGING_DESTINATION" | sed -e "s/'/''/g")'
|
||||
elasticsearch_replicas: '$(echo "$ELASTICSEARCH_LOGGING_REPLICAS" | sed -e "s/'/''/g")'
|
||||
enable_cluster_dns: '$(echo "$ENABLE_CLUSTER_DNS" | sed -e "s/'/''/g")'
|
||||
cluster_dns_core_dns: '$(echo "$CLUSTER_DNS_CORE_DNS" | sed -e "s/'/''/g")'
|
||||
enable_cluster_registry: '$(echo "$ENABLE_CLUSTER_REGISTRY" | sed -e "s/'/''/g")'
|
||||
dns_server: '$(echo "$DNS_SERVER_IP" | sed -e "s/'/''/g")'
|
||||
dns_domain: '$(echo "$DNS_DOMAIN" | sed -e "s/'/''/g")'
|
||||
enable_dns_horizontal_autoscaler: '$(echo "$ENABLE_DNS_HORIZONTAL_AUTOSCALER" | sed -e "s/'/''/g")'
|
||||
admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")'
|
||||
network_provider: '$(echo "$NETWORK_PROVIDER" | sed -e "s/'/''/g")'
|
||||
prepull_e2e_images: '$(echo "$PREPULL_E2E_IMAGES" | sed -e "s/'/''/g")'
|
||||
hairpin_mode: '$(echo "$HAIRPIN_MODE" | sed -e "s/'/''/g")'
|
||||
softlockup_panic: '$(echo "$SOFTLOCKUP_PANIC" | sed -e "s/'/''/g")'
|
||||
opencontrail_tag: '$(echo "$OPENCONTRAIL_TAG" | sed -e "s/'/''/g")'
|
||||
opencontrail_kubernetes_tag: '$(echo "$OPENCONTRAIL_KUBERNETES_TAG")'
|
||||
opencontrail_public_subnet: '$(echo "$OPENCONTRAIL_PUBLIC_SUBNET")'
|
||||
network_policy_provider: '$(echo "$NETWORK_POLICY_PROVIDER" | sed -e "s/'/''/g")'
|
||||
enable_manifest_url: '$(echo "${ENABLE_MANIFEST_URL:-}" | sed -e "s/'/''/g")'
|
||||
manifest_url: '$(echo "${MANIFEST_URL:-}" | sed -e "s/'/''/g")'
|
||||
manifest_url_header: '$(echo "${MANIFEST_URL_HEADER:-}" | sed -e "s/'/''/g")'
|
||||
num_nodes: $(echo "${NUM_NODES:-}" | sed -e "s/'/''/g")
|
||||
e2e_storage_test_environment: '$(echo "$E2E_STORAGE_TEST_ENVIRONMENT" | sed -e "s/'/''/g")'
|
||||
kube_uid: '$(echo "${KUBE_UID}" | sed -e "s/'/''/g")'
|
||||
initial_etcd_cluster: '$(echo "${INITIAL_ETCD_CLUSTER:-}" | sed -e "s/'/''/g")'
|
||||
initial_etcd_cluster_state: '$(echo "${INITIAL_ETCD_CLUSTER_STATE:-}" | sed -e "s/'/''/g")'
|
||||
ca_cert_bundle_path: '$(echo "${CA_CERT_BUNDLE_PATH:-}" | sed -e "s/'/''/g")'
|
||||
hostname: '$(echo "${ETCD_HOSTNAME:-$(hostname -s)}" | sed -e "s/'/''/g")'
|
||||
enable_pod_priority: '$(echo "${ENABLE_POD_PRIORITY:-}" | sed -e "s/'/''/g")'
|
||||
enable_default_storage_class: '$(echo "$ENABLE_DEFAULT_STORAGE_CLASS" | sed -e "s/'/''/g")'
|
||||
kube_proxy_daemonset: '$(echo "$KUBE_PROXY_DAEMONSET" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
if [ -n "${STORAGE_BACKEND:-}" ]; then
|
||||
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
||||
storage_backend: '$(echo "$STORAGE_BACKEND" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
if [ -n "${STORAGE_MEDIA_TYPE:-}" ]; then
|
||||
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
||||
storage_media_type: '$(echo "$STORAGE_MEDIA_TYPE" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
if [ -n "${KUBE_APISERVER_REQUEST_TIMEOUT_SEC:-}" ]; then
|
||||
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
||||
kube_apiserver_request_timeout_sec: '$(echo "$KUBE_APISERVER_REQUEST_TIMEOUT_SEC" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
if [ -n "${ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC:-}" ]; then
|
||||
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
||||
etcd_liveness_probe_initial_delay: '$(echo "$ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
if [ -n "${KUBE_APISERVER_LIVENESS_PROBE_INITIAL_DELAY_SEC:-}" ]; then
|
||||
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
||||
kube_apiserver_liveness_probe_initial_delay: '$(echo "$KUBE_APISERVER_LIVENESS_PROBE_INITIAL_DELAY_SEC" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
if [ -n "${ADMISSION_CONTROL:-}" ] && [ ${ADMISSION_CONTROL} == *"ImagePolicyWebhook"* ]; then
|
||||
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
||||
admission-control-config-file: /etc/admission_controller.config
|
||||
EOF
|
||||
fi
|
||||
if [ -n "${KUBELET_PORT:-}" ]; then
|
||||
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
||||
kubelet_port: '$(echo "$KUBELET_PORT" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
if [ -n "${ETCD_IMAGE:-}" ]; then
|
||||
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
||||
etcd_docker_tag: '$(echo "$ETCD_IMAGE" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
if [ -n "${ETCD_DOCKER_REPOSITORY:-}" ]; then
|
||||
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
||||
etcd_docker_repository: '$(echo "$ETCD_DOCKER_REPOSITORY" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
if [ -n "${ETCD_VERSION:-}" ]; then
|
||||
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
||||
etcd_version: '$(echo "$ETCD_VERSION" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
if [[ -n "${ETCD_CA_KEY:-}" && -n "${ETCD_CA_CERT:-}" && -n "${ETCD_PEER_KEY:-}" && -n "${ETCD_PEER_CERT:-}" ]]; then
|
||||
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
||||
etcd_over_ssl: 'true'
|
||||
EOF
|
||||
else
|
||||
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
||||
etcd_over_ssl: 'false'
|
||||
EOF
|
||||
fi
|
||||
if [ -n "${ETCD_QUORUM_READ:-}" ]; then
|
||||
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
||||
etcd_quorum_read: '$(echo "${ETCD_QUORUM_READ}" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
# Configuration changes for test clusters
|
||||
if [ -n "${APISERVER_TEST_ARGS:-}" ]; then
|
||||
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
||||
apiserver_test_args: '$(echo "$APISERVER_TEST_ARGS" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
if [ -n "${API_SERVER_TEST_LOG_LEVEL:-}" ]; then
|
||||
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
||||
api_server_test_log_level: '$(echo "$API_SERVER_TEST_LOG_LEVEL" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
if [ -n "${KUBELET_TEST_ARGS:-}" ]; then
|
||||
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
||||
kubelet_test_args: '$(echo "$KUBELET_TEST_ARGS" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
if [ -n "${KUBELET_TEST_LOG_LEVEL:-}" ]; then
|
||||
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
||||
kubelet_test_log_level: '$(echo "$KUBELET_TEST_LOG_LEVEL" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
if [ -n "${DOCKER_TEST_LOG_LEVEL:-}" ]; then
|
||||
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
||||
docker_test_log_level: '$(echo "$DOCKER_TEST_LOG_LEVEL" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
if [ -n "${CONTROLLER_MANAGER_TEST_ARGS:-}" ]; then
|
||||
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
||||
controller_manager_test_args: '$(echo "$CONTROLLER_MANAGER_TEST_ARGS" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
if [ -n "${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-}" ]; then
|
||||
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
||||
controller_manager_test_log_level: '$(echo "$CONTROLLER_MANAGER_TEST_LOG_LEVEL" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
if [ -n "${SCHEDULER_TEST_ARGS:-}" ]; then
|
||||
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
||||
scheduler_test_args: '$(echo "$SCHEDULER_TEST_ARGS" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
if [ -n "${SCHEDULER_TEST_LOG_LEVEL:-}" ]; then
|
||||
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
||||
scheduler_test_log_level: '$(echo "$SCHEDULER_TEST_LOG_LEVEL" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
if [ -n "${KUBEPROXY_TEST_ARGS:-}" ]; then
|
||||
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
||||
kubeproxy_test_args: '$(echo "$KUBEPROXY_TEST_ARGS" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
if [ -n "${KUBEPROXY_TEST_LOG_LEVEL:-}" ]; then
|
||||
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
||||
kubeproxy_test_log_level: '$(echo "$KUBEPROXY_TEST_LOG_LEVEL" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
# TODO: Replace this with a persistent volume (and create it).
|
||||
if [[ "${ENABLE_CLUSTER_REGISTRY}" == true && -n "${CLUSTER_REGISTRY_DISK}" ]]; then
|
||||
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
||||
cluster_registry_disk_type: gce
|
||||
cluster_registry_disk_size: $(echo $(convert-bytes-gce-kube ${CLUSTER_REGISTRY_DISK_SIZE}) | sed -e "s/'/''/g")
|
||||
cluster_registry_disk_name: $(echo ${CLUSTER_REGISTRY_DISK} | sed -e "s/'/''/g")
|
||||
EOF
|
||||
fi
|
||||
if [ -n "${TERMINATED_POD_GC_THRESHOLD:-}" ]; then
|
||||
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
||||
terminated_pod_gc_threshold: '$(echo "${TERMINATED_POD_GC_THRESHOLD}" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
if [ -n "${ENABLE_CUSTOM_METRICS:-}" ]; then
|
||||
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
||||
enable_custom_metrics: '$(echo "${ENABLE_CUSTOM_METRICS}" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
if [ -n "${NODE_LABELS:-}" ]; then
|
||||
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
||||
node_labels: '$(echo "${NODE_LABELS}" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
if [ -n "${NON_MASTER_NODE_LABELS:-}" ]; then
|
||||
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
||||
non_master_node_labels: '$(echo "${NON_MASTER_NODE_LABELS}" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
if [ -n "${NODE_TAINTS:-}" ]; then
|
||||
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
||||
node_taints: '$(echo "${NODE_TAINTS}" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
if [ -n "${EVICTION_HARD:-}" ]; then
|
||||
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
||||
eviction_hard: '$(echo "${EVICTION_HARD}" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
if [[ "${ENABLE_CLUSTER_AUTOSCALER:-false}" == "true" ]]; then
|
||||
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
||||
enable_cluster_autoscaler: '$(echo "${ENABLE_CLUSTER_AUTOSCALER}" | sed -e "s/'/''/g")'
|
||||
autoscaler_mig_config: '$(echo "${AUTOSCALER_MIG_CONFIG}" | sed -e "s/'/''/g")'
|
||||
autoscaler_expander_config: '$(echo "${AUTOSCALER_EXPANDER_CONFIG}" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
if [ -n "${SCHEDULING_ALGORITHM_PROVIDER:-}" ]; then
|
||||
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
||||
scheduling_algorithm_provider: '$(echo "${SCHEDULING_ALGORITHM_PROVIDER}" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
if [ -n "${ENABLE_IP_ALIASES:-}" ]; then
|
||||
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
||||
enable_ip_aliases: '$(echo "$ENABLE_IP_ALIASES" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
}
|
||||
|
||||
# The job of this function is simple, but the basic regular expression syntax makes
|
||||
# this difficult to read. What we want to do is convert from [0-9]+B, KB, KiB, MB, etc
|
||||
# into [0-9]+, Ki, Mi, Gi, etc.
|
||||
# This is done in two steps:
|
||||
# 1. Convert from [0-9]+X?i?B into [0-9]X? (X denotes the prefix, ? means the field
|
||||
# is optional.
|
||||
# 2. Attach an 'i' to the end of the string if we find a letter.
|
||||
# The two step process is needed to handle the edge case in which we want to convert
|
||||
# a raw byte count, as the result should be a simple number (e.g. 5B -> 5).
|
||||
function convert-bytes-gce-kube() {
|
||||
local -r storage_space=$1
|
||||
echo "${storage_space}" | sed -e 's/^\([0-9]\+\)\([A-Z]\)\?i\?B$/\1\2/g' -e 's/\([A-Z]\)$/\1i/'
|
||||
}
|
||||
|
||||
# This should happen both on cluster initialization and node upgrades.
|
||||
#
|
||||
# - Uses KUBELET_CA_CERT (falling back to CA_CERT), KUBELET_CERT, and
|
||||
# KUBELET_KEY to generate a kubeconfig file for the kubelet to securely
|
||||
# connect to the apiserver.
|
||||
|
||||
function create-salt-kubelet-auth() {
|
||||
local -r kubelet_kubeconfig_file="/srv/salt-overlay/salt/kubelet/bootstrap-kubeconfig"
|
||||
if [ ! -e "${kubelet_kubeconfig_file}" ]; then
|
||||
mkdir -p /srv/salt-overlay/salt/kubelet
|
||||
(umask 077;
|
||||
cat > "${kubelet_kubeconfig_file}" <<EOF
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
users:
|
||||
- name: kubelet
|
||||
user:
|
||||
client-certificate: ${KUBELET_CERT_PATH}
|
||||
client-key: ${KUBELET_KEY_PATH}
|
||||
clusters:
|
||||
- name: local
|
||||
cluster:
|
||||
server: https://${KUBERNETES_MASTER_NAME}
|
||||
certificate-authority: ${CA_CERT_BUNDLE_PATH}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
user: kubelet
|
||||
name: service-account-context
|
||||
current-context: service-account-context
|
||||
EOF
|
||||
)
|
||||
fi
|
||||
}
|
||||
|
||||
# This should happen both on cluster initialization and node upgrades.
|
||||
#
|
||||
# - When run as static pods, use the CA_CERT and KUBE_PROXY_TOKEN to generate a
|
||||
# kubeconfig file for the kube-proxy to securely connect to the apiserver.
|
||||
function create-salt-kubeproxy-auth() {
|
||||
local -r kube_proxy_kubeconfig_file="/srv/salt-overlay/salt/kube-proxy/kubeconfig"
|
||||
if [ ! -e "${kube_proxy_kubeconfig_file}" ]; then
|
||||
mkdir -p /srv/salt-overlay/salt/kube-proxy
|
||||
(umask 077;
|
||||
cat > "${kube_proxy_kubeconfig_file}" <<EOF
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
users:
|
||||
- name: kube-proxy
|
||||
user:
|
||||
token: ${KUBE_PROXY_TOKEN}
|
||||
clusters:
|
||||
- name: local
|
||||
cluster:
|
||||
certificate-authority-data: ${CA_CERT_BUNDLE}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
user: kube-proxy
|
||||
name: service-account-context
|
||||
current-context: service-account-context
|
||||
EOF
|
||||
)
|
||||
fi
|
||||
}
|
||||
|
||||
function split-commas() {
|
||||
echo $1 | tr "," "\n"
|
||||
}
|
||||
|
||||
function try-download-release() {
|
||||
# TODO(zmerlynn): Now we REALLy have no excuse not to do the reboot
|
||||
# optimization.
|
||||
|
||||
local -r server_binary_tar_urls=( $(split-commas "${SERVER_BINARY_TAR_URL}") )
|
||||
local -r server_binary_tar="${server_binary_tar_urls[0]##*/}"
|
||||
if [[ -n "${SERVER_BINARY_TAR_HASH:-}" ]]; then
|
||||
local -r server_binary_tar_hash="${SERVER_BINARY_TAR_HASH}"
|
||||
else
|
||||
echo "Downloading binary release sha1 (not found in env)"
|
||||
download-or-bust "" "${server_binary_tar_urls[@]/.tar.gz/.tar.gz.sha1}"
|
||||
local -r server_binary_tar_hash=$(cat "${server_binary_tar}.sha1")
|
||||
fi
|
||||
|
||||
echo "Downloading binary release tar (${server_binary_tar_urls[@]})"
|
||||
download-or-bust "${server_binary_tar_hash}" "${server_binary_tar_urls[@]}"
|
||||
|
||||
local -r salt_tar_urls=( $(split-commas "${SALT_TAR_URL}") )
|
||||
local -r salt_tar="${salt_tar_urls[0]##*/}"
|
||||
if [[ -n "${SALT_TAR_HASH:-}" ]]; then
|
||||
local -r salt_tar_hash="${SALT_TAR_HASH}"
|
||||
else
|
||||
echo "Downloading Salt tar sha1 (not found in env)"
|
||||
download-or-bust "" "${salt_tar_urls[@]/.tar.gz/.tar.gz.sha1}"
|
||||
local -r salt_tar_hash=$(cat "${salt_tar}.sha1")
|
||||
fi
|
||||
|
||||
echo "Downloading Salt tar (${salt_tar_urls[@]})"
|
||||
download-or-bust "${salt_tar_hash}" "${salt_tar_urls[@]}"
|
||||
|
||||
echo "Unpacking Salt tree and checking integrity of binary release tar"
|
||||
rm -rf kubernetes
|
||||
tar xzf "${salt_tar}" && tar tzf "${server_binary_tar}" > /dev/null
|
||||
}
|
||||
|
||||
function download-release() {
|
||||
# In case of failure checking integrity of release, retry.
|
||||
until try-download-release; do
|
||||
sleep 15
|
||||
echo "Couldn't download release. Retrying..."
|
||||
done
|
||||
|
||||
echo "Running release install script"
|
||||
kubernetes/saltbase/install.sh "${SERVER_BINARY_TAR_URL##*/}"
|
||||
}
|
||||
|
||||
function fix-apt-sources() {
|
||||
sed -i -e "\|^deb.*http://http.debian.net/debian| s/^/#/" /etc/apt/sources.list
|
||||
sed -i -e "\|^deb.*http://ftp.debian.org/debian| s/^/#/" /etc/apt/sources.list.d/backports.list
|
||||
}
|
||||
|
||||
function salt-run-local() {
|
||||
cat <<EOF >/etc/salt/minion.d/local.conf
|
||||
file_client: local
|
||||
file_roots:
|
||||
base:
|
||||
- /srv/salt
|
||||
EOF
|
||||
}
|
||||
|
||||
function salt-debug-log() {
|
||||
cat <<EOF >/etc/salt/minion.d/log-level-debug.conf
|
||||
log_level: debug
|
||||
log_level_logfile: debug
|
||||
EOF
|
||||
}
|
||||
|
||||
function salt-node-role() {
|
||||
local -r kubelet_bootstrap_kubeconfig="/srv/salt-overlay/salt/kubelet/bootstrap-kubeconfig"
|
||||
local -r kubelet_kubeconfig="/srv/salt-overlay/salt/kubelet/kubeconfig"
|
||||
cat <<EOF >/etc/salt/minion.d/grains.conf
|
||||
grains:
|
||||
roles:
|
||||
- kubernetes-pool
|
||||
cloud: gce
|
||||
api_servers: '${KUBERNETES_MASTER_NAME}'
|
||||
kubelet_bootstrap_kubeconfig: /var/lib/kubelet/bootstrap-kubeconfig
|
||||
kubelet_kubeconfig: /var/lib/kubelet/kubeconfig
|
||||
EOF
|
||||
}
|
||||
|
||||
function env-to-grains {
|
||||
local key=$1
|
||||
local env_key=`echo $key | tr '[:lower:]' '[:upper:]'`
|
||||
local value=${!env_key:-}
|
||||
if [[ -n "${value}" ]]; then
|
||||
# Note this is yaml, so indentation matters
|
||||
cat <<EOF >>/etc/salt/minion.d/grains.conf
|
||||
${key}: '$(echo "${value}" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
}
|
||||
|
||||
function node-docker-opts() {
|
||||
if [[ -n "${EXTRA_DOCKER_OPTS-}" ]]; then
|
||||
DOCKER_OPTS="${DOCKER_OPTS:-} ${EXTRA_DOCKER_OPTS}"
|
||||
fi
|
||||
|
||||
# Decide whether to enable a docker registry mirror. This is taken from
|
||||
# the "kube-env" metadata value.
|
||||
if [[ -n "${DOCKER_REGISTRY_MIRROR_URL:-}" ]]; then
|
||||
echo "Enable docker registry mirror at: ${DOCKER_REGISTRY_MIRROR_URL}"
|
||||
DOCKER_OPTS="${DOCKER_OPTS:-} --registry-mirror=${DOCKER_REGISTRY_MIRROR_URL}"
|
||||
fi
|
||||
}
|
||||
|
||||
function salt-grains() {
|
||||
env-to-grains "docker_opts"
|
||||
env-to-grains "docker_root"
|
||||
env-to-grains "kubelet_root"
|
||||
env-to-grains "feature_gates"
|
||||
}
|
||||
|
||||
function configure-salt() {
|
||||
mkdir -p /etc/salt/minion.d
|
||||
salt-run-local
|
||||
salt-node-role
|
||||
node-docker-opts
|
||||
salt-grains
|
||||
install-salt
|
||||
stop-salt-minion
|
||||
}
|
||||
|
||||
function run-salt() {
|
||||
echo "== Calling Salt =="
|
||||
local rc=0
|
||||
for i in {0..6}; do
|
||||
salt-call --retcode-passthrough --local state.highstate && rc=0 || rc=$?
|
||||
if [[ "${rc}" == 0 ]]; then
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
echo "Salt failed to run repeatedly" >&2
|
||||
return "${rc}"
|
||||
}
|
||||
|
||||
function run-user-script() {
|
||||
if curl-metadata k8s-user-startup-script > "${INSTALL_DIR}/k8s-user-script.sh"; then
|
||||
user_script=$(cat "${INSTALL_DIR}/k8s-user-script.sh")
|
||||
fi
|
||||
if [[ ! -z ${user_script:-} ]]; then
|
||||
chmod u+x "${INSTALL_DIR}/k8s-user-script.sh"
|
||||
echo "== running user startup script =="
|
||||
"${INSTALL_DIR}/k8s-user-script.sh"
|
||||
fi
|
||||
}
|
||||
|
||||
if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
|
||||
echo "Support for debian master has been removed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "${is_push}" ]]; then
|
||||
echo "== kube-up node config starting =="
|
||||
set-broken-motd
|
||||
ensure-basic-networking
|
||||
fix-apt-sources
|
||||
ensure-install-dir
|
||||
ensure-packages
|
||||
set-kube-env
|
||||
auto-upgrade
|
||||
ensure-local-disks
|
||||
create-node-pki
|
||||
create-salt-pillar
|
||||
create-salt-kubelet-auth
|
||||
if [[ "${KUBE_PROXY_DAEMONSET:-}" != "true" ]]; then
|
||||
create-salt-kubeproxy-auth
|
||||
fi
|
||||
download-release
|
||||
configure-salt
|
||||
remove-docker-artifacts
|
||||
config-ip-firewall
|
||||
run-salt
|
||||
reset-motd
|
||||
|
||||
run-user-script
|
||||
echo "== kube-up node config done =="
|
||||
else
|
||||
echo "== kube-push node config starting =="
|
||||
ensure-basic-networking
|
||||
ensure-install-dir
|
||||
set-kube-env
|
||||
create-salt-pillar
|
||||
download-release
|
||||
reset-motd
|
||||
run-salt
|
||||
echo "== kube-push node config done =="
|
||||
fi
|
@ -1,8 +0,0 @@
|
||||
approvers:
|
||||
- euank
|
||||
- yifan-gu
|
||||
- ethernetdan
|
||||
reviewers:
|
||||
- euank
|
||||
- yifan-gu
|
||||
- ethernetdan
|
@ -1,8 +0,0 @@
|
||||
# Container Linux image
|
||||
|
||||
The [Container Linux Operating System](https://coreos.com/why/) is a Linux distribution optimized for running containers securely at scale.
|
||||
CoreOS provides [a Container Linux image](https://coreos.com/os/docs/latest/booting-on-google-compute-engine.html) for Google Cloud Platform (GCP).
|
||||
|
||||
This folder contains configuration and tooling to allow kube-up to create a Kubernetes cluster on Google Cloud Platform running on the official Container Linux image.
|
||||
|
||||
[]()
|
File diff suppressed because it is too large
Load Diff
@ -1,182 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
# Use --retry-connrefused opt only if it's supported by curl.
|
||||
CURL_RETRY_CONNREFUSED=""
|
||||
if curl --help | grep -q -- '--retry-connrefused'; then
|
||||
CURL_RETRY_CONNREFUSED='--retry-connrefused'
|
||||
fi
|
||||
|
||||
function download-kube-env {
|
||||
# Fetch kube-env from GCE metadata server.
|
||||
local -r tmp_kube_env="/tmp/kube-env.yaml"
|
||||
curl --fail --retry 5 --retry-delay 3 ${CURL_RETRY_CONNREFUSED} --silent --show-error \
|
||||
-H "X-Google-Metadata-Request: True" \
|
||||
-o "${tmp_kube_env}" \
|
||||
http://metadata.google.internal/computeMetadata/v1/instance/attributes/kube-env
|
||||
# Convert the yaml format file into a shell-style file.
|
||||
sed 's/: /=/' < "${tmp_kube_env}" > "${KUBE_HOME}/kube-env"
|
||||
rm -f "${tmp_kube_env}"
|
||||
}
|
||||
|
||||
function validate-hash {
|
||||
local -r file="$1"
|
||||
local -r expected="$2"
|
||||
|
||||
actual=$(sha1sum ${file} | awk '{ print $1 }') || true
|
||||
if [[ "${actual}" != "${expected}" ]]; then
|
||||
echo "== ${file} corrupted, sha1 ${actual} doesn't match expected ${expected} =="
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
# Retry a download until we get it. Takes a hash and a set of URLs.
|
||||
#
|
||||
# $1 is the sha1 of the URL. Can be "" if the sha1 is unknown.
|
||||
# $2+ are the URLs to download.
|
||||
function download-or-bust {
|
||||
local -r hash="$1"
|
||||
shift 1
|
||||
|
||||
local -r urls=( $* )
|
||||
while true; do
|
||||
for url in "${urls[@]}"; do
|
||||
local file="${url##*/}"
|
||||
rm -f "${file}"
|
||||
if ! curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --max-time 300 --retry 6 --retry-delay 10 ${CURL_RETRY_CONNREFUSED} "${url}"; then
|
||||
echo "== Failed to download ${url}. Retrying. =="
|
||||
elif [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
|
||||
echo "== Hash validation of ${url} failed. Retrying. =="
|
||||
else
|
||||
if [[ -n "${hash}" ]]; then
|
||||
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
|
||||
else
|
||||
echo "== Downloaded ${url} =="
|
||||
fi
|
||||
return
|
||||
fi
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
function split-commas {
|
||||
echo $1 | tr "," "\n"
|
||||
}
|
||||
|
||||
# Downloads kubernetes binaries and kube-system manifest tarball, unpacks them,
|
||||
# and places them into suitable directories. Files are placed in /opt/kubernetes.
|
||||
function install-kube-binary-config {
|
||||
cd "${KUBE_HOME}"
|
||||
local -r server_binary_tar_urls=( $(split-commas "${SERVER_BINARY_TAR_URL}") )
|
||||
local -r server_binary_tar="${server_binary_tar_urls[0]##*/}"
|
||||
if [[ -n "${SERVER_BINARY_TAR_HASH:-}" ]]; then
|
||||
local -r server_binary_tar_hash="${SERVER_BINARY_TAR_HASH}"
|
||||
else
|
||||
echo "Downloading binary release sha1 (not found in env)"
|
||||
download-or-bust "" "${server_binary_tar_urls[@]/.tar.gz/.tar.gz.sha1}"
|
||||
local -r server_binary_tar_hash=$(cat "${server_binary_tar}.sha1")
|
||||
fi
|
||||
echo "Downloading binary release tar"
|
||||
download-or-bust "${server_binary_tar_hash}" "${server_binary_tar_urls[@]}"
|
||||
tar xzf "${KUBE_HOME}/${server_binary_tar}" -C "${KUBE_HOME}" --overwrite
|
||||
# Copy docker_tag and image files to ${KUBE_HOME}/kube-docker-files.
|
||||
src_dir="${KUBE_HOME}/kubernetes/server/bin"
|
||||
dst_dir="${KUBE_HOME}/kube-docker-files"
|
||||
mkdir -p "${dst_dir}"
|
||||
cp "${src_dir}/"*.docker_tag "${dst_dir}"
|
||||
if [[ "${KUBERNETES_MASTER:-}" == "false" ]]; then
|
||||
cp "${src_dir}/kube-proxy.tar" "${dst_dir}"
|
||||
else
|
||||
cp "${src_dir}/kube-apiserver.tar" "${dst_dir}"
|
||||
cp "${src_dir}/kube-controller-manager.tar" "${dst_dir}"
|
||||
cp "${src_dir}/kube-scheduler.tar" "${dst_dir}"
|
||||
cp -r "${KUBE_HOME}/kubernetes/addons" "${dst_dir}"
|
||||
fi
|
||||
local -r kube_bin="${KUBE_HOME}/bin"
|
||||
mv "${src_dir}/kubelet" "${kube_bin}"
|
||||
mv "${src_dir}/kubectl" "${kube_bin}"
|
||||
|
||||
if [[ "${NETWORK_PROVIDER:-}" == "kubenet" ]] || \
|
||||
[[ "${NETWORK_PROVIDER:-}" == "cni" ]]; then
|
||||
local -r cni_version="v0.6.0"
|
||||
local -r cni_tar="cni-plugins-amd64-${cni_version}.tgz"
|
||||
local -r cni_sha1="d595d3ded6499a64e8dac02466e2f5f2ce257c9f"
|
||||
download-or-bust "${cni_sha1}" "https://storage.googleapis.com/kubernetes-release/network-plugins/${cni_tar}"
|
||||
local -r cni_dir="${KUBE_HOME}/cni"
|
||||
mkdir -p "${cni_dir}/bin"
|
||||
tar xzf "${KUBE_HOME}/${cni_tar}" -C "${cni_dir}/bin" --overwrite
|
||||
mv "${cni_dir}/bin"/* "${kube_bin}"
|
||||
rmdir "${cni_dir}/bin"
|
||||
rm -f "${KUBE_HOME}/${cni_tar}"
|
||||
fi
|
||||
|
||||
mv "${KUBE_HOME}/kubernetes/LICENSES" "${KUBE_HOME}"
|
||||
mv "${KUBE_HOME}/kubernetes/kubernetes-src.tar.gz" "${KUBE_HOME}"
|
||||
|
||||
# Put kube-system pods manifests in ${KUBE_HOME}/kube-manifests/.
|
||||
dst_dir="${KUBE_HOME}/kube-manifests"
|
||||
mkdir -p "${dst_dir}"
|
||||
local -r manifests_tar_urls=( $(split-commas "${KUBE_MANIFESTS_TAR_URL}") )
|
||||
local -r manifests_tar="${manifests_tar_urls[0]##*/}"
|
||||
if [ -n "${KUBE_MANIFESTS_TAR_HASH:-}" ]; then
|
||||
local -r manifests_tar_hash="${KUBE_MANIFESTS_TAR_HASH}"
|
||||
else
|
||||
echo "Downloading k8s manifests sha1 (not found in env)"
|
||||
download-or-bust "" "${manifests_tar_urls[@]/.tar.gz/.tar.gz.sha1}"
|
||||
local -r manifests_tar_hash=$(cat "${manifests_tar}.sha1")
|
||||
fi
|
||||
echo "Downloading k8s manifests tar"
|
||||
download-or-bust "${manifests_tar_hash}" "${manifests_tar_urls[@]}"
|
||||
tar xzf "${KUBE_HOME}/${manifests_tar}" -C "${dst_dir}" --overwrite
|
||||
local -r kube_addon_registry="${KUBE_ADDON_REGISTRY:-gcr.io/google_containers}"
|
||||
if [[ "${kube_addon_registry}" != "gcr.io/google_containers" ]]; then
|
||||
find "${dst_dir}" -name \*.yaml -or -name \*.yaml.in | \
|
||||
xargs sed -ri "s@(image:\s.*)gcr.io/google_containers@\1${kube_addon_registry}@"
|
||||
find "${dst_dir}" -name \*.manifest -or -name \*.json | \
|
||||
xargs sed -ri "s@(image\":\s+\")gcr.io/google_containers@\1${kube_addon_registry}@"
|
||||
fi
|
||||
cp "${dst_dir}/kubernetes/gci-trusty/container-linux-configure-helper.sh" "${KUBE_HOME}/bin/configure-helper.sh"
|
||||
chmod -R 755 "${kube_bin}"
|
||||
|
||||
# Clean up.
|
||||
rm -rf "${KUBE_HOME}/kubernetes"
|
||||
rm -f "${KUBE_HOME}/${server_binary_tar}"
|
||||
rm -f "${KUBE_HOME}/${server_binary_tar}.sha1"
|
||||
rm -f "${KUBE_HOME}/${manifests_tar}"
|
||||
rm -f "${KUBE_HOME}/${manifests_tar}.sha1"
|
||||
}
|
||||
|
||||
######### Main Function ##########
|
||||
echo "Start to install kubernetes files"
|
||||
KUBE_HOME="/opt/kubernetes"
|
||||
mkdir -p "${KUBE_HOME}"
|
||||
download-kube-env
|
||||
source "${KUBE_HOME}/kube-env"
|
||||
install-kube-binary-config
|
||||
echo "Done for installing kubernetes files"
|
||||
|
||||
# On Container Linux, the hosts is in /usr/share/baselayout/hosts
|
||||
# So we need to manually populdate the hosts file here on gce.
|
||||
echo "127.0.0.1 localhost" >> /etc/hosts
|
||||
echo "::1 localhost" >> /etc/hosts
|
||||
|
||||
echo "Configuring hostname"
|
||||
hostnamectl set-hostname $(hostname | cut -f1 -d.)
|
@ -1,83 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script is for master and node instance health monitoring, which is
|
||||
# packed in kube-manifest tarball. It is executed through a systemd service
|
||||
# in cluster/gce/gci/<master/node>.yaml. The env variables come from an env
|
||||
# file provided by the systemd service.
|
||||
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
# We simply kill the process when there is a failure. Another systemd service will
|
||||
# automatically restart the process.
|
||||
function docker_monitoring {
|
||||
while [ 1 ]; do
|
||||
if ! timeout 60 docker ps > /dev/null; then
|
||||
echo "Docker daemon failed!"
|
||||
pkill docker
|
||||
# Wait for a while, as we don't want to kill it again before it is really up.
|
||||
sleep 30
|
||||
else
|
||||
sleep "${SLEEP_SECONDS}"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
function kubelet_monitoring {
|
||||
echo "Wait for 2 minutes for kubelet to be fuctional"
|
||||
# TODO(andyzheng0831): replace it with a more reliable method if possible.
|
||||
sleep 120
|
||||
local -r max_seconds=10
|
||||
local output=""
|
||||
while [ 1 ]; do
|
||||
if ! output=$(curl --insecure -m "${max_seconds}" -f -s -S https://127.0.0.1:${KUBELET_PORT:-10250}/healthz 2>&1); then
|
||||
# Print the response and/or errors.
|
||||
echo $output
|
||||
echo "Kubelet is unhealthy!"
|
||||
pkill kubelet
|
||||
# Wait for a while, as we don't want to kill it again before it is really up.
|
||||
sleep 60
|
||||
else
|
||||
sleep "${SLEEP_SECONDS}"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
############## Main Function ################
|
||||
if [[ "$#" -ne 1 ]]; then
|
||||
echo "Usage: health-monitor.sh <docker/kubelet>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
KUBE_ENV="/home/kubernetes/kube-env"
|
||||
if [[ ! -e "${KUBE_ENV}" ]]; then
|
||||
echo "The ${KUBE_ENV} file does not exist!! Terminate health monitoring"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
SLEEP_SECONDS=10
|
||||
component=$1
|
||||
echo "Start kubernetes health monitoring for ${component}"
|
||||
source "${KUBE_ENV}"
|
||||
if [[ "${component}" == "docker" ]]; then
|
||||
docker_monitoring
|
||||
elif [[ "${component}" == "kubelet" ]]; then
|
||||
kubelet_monitoring
|
||||
else
|
||||
echo "Health monitoring for component "${component}" is not supported!"
|
||||
fi
|
@ -1,19 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# A library of helper functions and constants for the Container Linux distro.
|
||||
|
||||
# This file intentionally left blank
|
@ -1,139 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# A library of helper functions and constant for the Container Linux distro.
|
||||
source "${KUBE_ROOT}/cluster/gce/container-linux/helper.sh"
|
||||
|
||||
# create-master-instance creates the master instance. If called with
|
||||
# an argument, the argument is used as the name to a reserved IP
|
||||
# address for the master. (In the case of upgrade/repair, we re-use
|
||||
# the same IP.)
|
||||
#
|
||||
# It requires a whole slew of assumed variables, partially due to to
|
||||
# the call to write-master-env. Listing them would be rather
|
||||
# futile. Instead, we list the required calls to ensure any additional
|
||||
#
|
||||
# variables are set:
|
||||
# ensure-temp-dir
|
||||
# detect-project
|
||||
# get-bearer-token
|
||||
function create-master-instance {
|
||||
local address=""
|
||||
[[ -n ${1:-} ]] && address="${1}"
|
||||
|
||||
write-master-env
|
||||
create-master-instance-internal "${MASTER_NAME}" "${address}"
|
||||
}
|
||||
|
||||
function replicate-master-instance() {
|
||||
local existing_master_zone="${1}"
|
||||
local existing_master_name="${2}"
|
||||
local existing_master_replicas="${3}"
|
||||
|
||||
local kube_env="$(get-metadata "${existing_master_zone}" "${existing_master_name}" kube-env)"
|
||||
# Substitute INITIAL_ETCD_CLUSTER to enable etcd clustering.
|
||||
kube_env="$(echo "${kube_env}" | grep -v "INITIAL_ETCD_CLUSTER")"
|
||||
kube_env="$(echo -e "${kube_env}\nINITIAL_ETCD_CLUSTER: '${existing_master_replicas},${REPLICA_NAME}'")"
|
||||
ETCD_CA_KEY="$(echo "${kube_env}" | grep "ETCD_CA_KEY" | sed "s/^.*: '//" | sed "s/'$//")"
|
||||
ETCD_CA_CERT="$(echo "${kube_env}" | grep "ETCD_CA_CERT" | sed "s/^.*: '//" | sed "s/'$//")"
|
||||
|
||||
create-etcd-certs "${REPLICA_NAME}" "${ETCD_CA_CERT}" "${ETCD_CA_KEY}"
|
||||
|
||||
kube_env="$(echo "${kube_env}" | grep -v "ETCD_PEER_KEY")"
|
||||
kube_env="$(echo -e "${kube_env}\nETCD_PEER_KEY: '${ETCD_PEER_KEY_BASE64}'")"
|
||||
kube_env="$(echo "${kube_env}" | grep -v "ETCD_PEER_CERT")"
|
||||
kube_env="$(echo -e "${kube_env}\nETCD_PEER_CERT: '${ETCD_PEER_CERT_BASE64}'")"
|
||||
|
||||
echo "${kube_env}" > ${KUBE_TEMP}/master-kube-env.yaml
|
||||
get-metadata "${existing_master_zone}" "${existing_master_name}" cluster-name > "${KUBE_TEMP}/cluster-name.txt"
|
||||
|
||||
create-master-instance-internal "${REPLICA_NAME}"
|
||||
}
|
||||
|
||||
|
||||
function create-master-instance-internal() {
|
||||
local gcloud="gcloud"
|
||||
local retries=5
|
||||
if [[ "${ENABLE_IP_ALIASES:-}" == 'true' ]]; then
|
||||
gcloud="gcloud beta"
|
||||
fi
|
||||
|
||||
local -r master_name="${1}"
|
||||
local -r address="${2:-}"
|
||||
|
||||
local preemptible_master=""
|
||||
if [[ "${PREEMPTIBLE_MASTER:-}" == "true" ]]; then
|
||||
preemptible_master="--preemptible --maintenance-policy TERMINATE"
|
||||
fi
|
||||
|
||||
local network=$(make-gcloud-network-argument \
|
||||
"${NETWORK_PROJECT}" "${REGION}" "${NETWORK}" "${SUBNETWORK:-}" \
|
||||
"${address:-}" "${ENABLE_IP_ALIASES:-}" "${IP_ALIAS_SIZE:-}")
|
||||
|
||||
local metadata="kube-env=${KUBE_TEMP}/master-kube-env.yaml"
|
||||
metadata="${metadata},user-data=${KUBE_ROOT}/cluster/gce/container-linux/master.yaml"
|
||||
metadata="${metadata},configure-sh=${KUBE_ROOT}/cluster/gce/container-linux/configure.sh"
|
||||
metadata="${metadata},cluster-name=${KUBE_TEMP}/cluster-name.txt"
|
||||
|
||||
local disk="name=${master_name}-pd"
|
||||
disk="${disk},device-name=master-pd"
|
||||
disk="${disk},mode=rw"
|
||||
disk="${disk},boot=no"
|
||||
disk="${disk},auto-delete=no"
|
||||
|
||||
for attempt in $(seq 1 ${retries}); do
|
||||
if result=$(${gcloud} compute instances create "${master_name}" \
|
||||
--project "${PROJECT}" \
|
||||
--zone "${ZONE}" \
|
||||
--machine-type "${MASTER_SIZE}" \
|
||||
--image-project="${MASTER_IMAGE_PROJECT}" \
|
||||
--image "${MASTER_IMAGE}" \
|
||||
--tags "${MASTER_TAG}" \
|
||||
--scopes "storage-ro,compute-rw,monitoring,logging-write" \
|
||||
--metadata-from-file "${metadata}" \
|
||||
--disk "${disk}" \
|
||||
--boot-disk-size "${MASTER_ROOT_DISK_SIZE}" \
|
||||
${MASTER_MIN_CPU_ARCHITECTURE:+"--min-cpu-platform=${MASTER_MIN_CPU_ARCHITECTURE}"} \
|
||||
${preemptible_master} \
|
||||
${network} 2>&1); then
|
||||
echo "${result}" >&2
|
||||
return 0
|
||||
else
|
||||
echo "${result}" >&2
|
||||
if [[ ! "${result}" =~ "try again later" ]]; then
|
||||
echo "Failed to create master instance due to non-retryable error" >&2
|
||||
return 1
|
||||
fi
|
||||
sleep 10
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Failed to create master instance despite ${retries} attempts" >&2
|
||||
return 1
|
||||
}
|
||||
|
||||
function get-metadata() {
|
||||
local zone="${1}"
|
||||
local name="${2}"
|
||||
local key="${3}"
|
||||
|
||||
local metadata_url="http://metadata.google.internal/computeMetadata/v1/instance/attributes/${key}"
|
||||
|
||||
gcloud compute ssh "${name}" \
|
||||
--project "${PROJECT}" \
|
||||
--zone "${zone}" \
|
||||
--command "curl '${metadata_url}' -H 'Metadata-Flavor: Google'" 2>/dev/null
|
||||
}
|
@ -1,57 +0,0 @@
|
||||
#cloud-config
|
||||
|
||||
coreos:
|
||||
update:
|
||||
reboot-strategy: off
|
||||
units:
|
||||
- name: locksmithd.service
|
||||
mask: true
|
||||
- name: kube-master-installation.service
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Download and install k8s binaries and configurations
|
||||
After=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=yes
|
||||
ExecStartPre=/bin/mkdir -p /opt/kubernetes/bin
|
||||
# Use --retry-connrefused opt only if it's supported by curl.
|
||||
ExecStartPre=/bin/bash -c 'OPT=""; if curl --help | grep -q -- "--retry-connrefused"; then OPT="--retry-connrefused"; fi; /usr/bin/curl --fail --retry 5 --retry-delay 3 $OPT --silent --show-error -H "X-Google-Metadata-Request: True" -o /opt/kubernetes/bin/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh'
|
||||
ExecStartPre=/bin/chmod 544 /opt/kubernetes/bin/configure.sh
|
||||
ExecStart=/opt/kubernetes/bin/configure.sh
|
||||
|
||||
[Install]
|
||||
WantedBy=kubernetes.target
|
||||
- name: kube-master-configuration.service
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Configure kubernetes master
|
||||
After=kube-master-installation.service
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=yes
|
||||
ExecStartPre=/bin/chmod 544 /opt/kubernetes/bin/configure-helper.sh
|
||||
ExecStart=/opt/kubernetes/bin/configure-helper.sh
|
||||
|
||||
[Install]
|
||||
WantedBy=kubernetes.target
|
||||
- name: kubernetes.target
|
||||
enable: true
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Kubernetes
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
- name: docker.service
|
||||
drop-ins:
|
||||
- name: "use-cgroupfs-driver.conf"
|
||||
# This is required for setting cgroup parent in the current ~1.4 per-pod cgroup impl
|
||||
content: |
|
||||
[Service]
|
||||
Environment="DOCKER_CGROUPS=--exec-opt native.cgroupdriver="
|
@ -1,35 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# A library of helper functions and constant for the Container Linux distro.
|
||||
source "${KUBE_ROOT}/cluster/gce/container-linux/helper.sh"
|
||||
|
||||
function get-node-instance-metadata {
|
||||
local metadata=""
|
||||
metadata+="kube-env=${KUBE_TEMP}/node-kube-env.yaml,"
|
||||
metadata+="user-data=${KUBE_ROOT}/cluster/gce/container-linux/node.yaml,"
|
||||
metadata+="configure-sh=${KUBE_ROOT}/cluster/gce/container-linux/configure.sh,"
|
||||
metadata+="cluster-name=${KUBE_TEMP}/cluster-name.txt"
|
||||
echo "${metadata}"
|
||||
}
|
||||
|
||||
# $1: template name (required).
|
||||
function create-node-instance-template {
|
||||
local template_name="$1"
|
||||
|
||||
create-node-template "$template_name" "${scope_flags[*]}" "$(get-node-instance-metadata)"
|
||||
# TODO(euank): We should include update-strategy here. We should also switch to ignition
|
||||
}
|
@ -1,57 +0,0 @@
|
||||
#cloud-config
|
||||
|
||||
coreos:
|
||||
update:
|
||||
reboot-strategy: off
|
||||
units:
|
||||
- name: locksmithd.service
|
||||
mask: true
|
||||
- name: kube-node-installation.service
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Download and install k8s binaries and configurations
|
||||
After=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=yes
|
||||
ExecStartPre=/bin/mkdir -p /opt/kubernetes/bin
|
||||
# Use --retry-connrefused opt only if it's supported by curl.
|
||||
ExecStartPre=/bin/bash -c 'OPT=""; if curl --help | grep -q -- "--retry-connrefused"; then OPT="--retry-connrefused"; fi; /usr/bin/curl --fail --retry 5 --retry-delay 3 $OPT --silent --show-error -H "X-Google-Metadata-Request: True" -o /opt/kubernetes/bin/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh'
|
||||
ExecStartPre=/bin/chmod 544 /opt/kubernetes/bin/configure.sh
|
||||
ExecStart=/opt/kubernetes/bin/configure.sh
|
||||
|
||||
[Install]
|
||||
WantedBy=kubernetes.target
|
||||
- name: kube-node-configuration.service
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Configure kubernetes master
|
||||
After=kube-node-installation.service
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=yes
|
||||
ExecStartPre=/bin/chmod 544 /opt/kubernetes/bin/configure-helper.sh
|
||||
ExecStart=/opt/kubernetes/bin/configure-helper.sh
|
||||
|
||||
[Install]
|
||||
WantedBy=kubernetes.target
|
||||
- name: kubernetes.target
|
||||
enable: true
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Kubernetes
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
- name: docker.service
|
||||
drop-ins:
|
||||
- name: "use-cgroupfs-driver.conf"
|
||||
# This is required for setting cgroup parent in the current ~1.4 per-pod cgroup impl
|
||||
content: |
|
||||
[Service]
|
||||
Environment="DOCKER_CGROUPS=--exec-opt native.cgroupdriver="
|
@ -1,32 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# A library of helper functions and constant for debian os distro
|
||||
|
||||
function get-node-instance-metadata {
|
||||
local metadata=""
|
||||
metadata+="startup-script=${KUBE_TEMP}/configure-vm.sh,"
|
||||
metadata+="kube-env=${KUBE_TEMP}/node-kube-env.yaml,"
|
||||
metadata+="cluster-name=${KUBE_TEMP}/cluster-name.txt"
|
||||
echo "${metadata}"
|
||||
}
|
||||
|
||||
# $1: template name (required)
|
||||
function create-node-instance-template {
|
||||
local template_name="$1"
|
||||
prepare-startup-script
|
||||
create-node-template "$template_name" "${scope_flags}" "$(get-node-instance-metadata)"
|
||||
}
|
@ -77,7 +77,9 @@ function config-ip-firewall {
|
||||
iptables -w -t nat -A IP-MASQ -m comment --comment "ip-masq: outbound traffic is subject to MASQUERADE (must be last in chain)" -j MASQUERADE
|
||||
fi
|
||||
|
||||
if [[ "${ENABLE_METADATA_CONCEALMENT:-}" == "true" ]]; then
|
||||
# If METADATA_CONCEALMENT_NO_FIREWALL is set, don't create a firewall on this
|
||||
# node because we don't expect the daemonset to run on this node.
|
||||
if [[ "${ENABLE_METADATA_CONCEALMENT:-}" == "true" ]] && [[ ! "${METADATA_CONCEALMENT_NO_FIREWALL:-}" == "true" ]]; then
|
||||
echo "Add rule for metadata concealment"
|
||||
iptables -w -t nat -I PREROUTING -p tcp -d 169.254.169.254 --dport 80 -m comment --comment "metadata-concealment: bridge traffic to metadata server goes to metadata proxy" -j DNAT --to-destination 127.0.0.1:988
|
||||
fi
|
||||
@ -1334,7 +1336,7 @@ function prepare-kube-proxy-manifest-variables {
|
||||
function start-kube-proxy {
|
||||
echo "Start kube-proxy static pod"
|
||||
prepare-log-file /var/log/kube-proxy.log
|
||||
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/kube-proxy.manifest"
|
||||
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-proxy.manifest"
|
||||
prepare-kube-proxy-manifest-variables "${src_file}"
|
||||
|
||||
cp "${src_file}" /etc/kubernetes/manifests
|
||||
@ -2077,7 +2079,7 @@ EOF
|
||||
sed -i -e "s@{{ *pillar\['dns_server'\] *}}@${DNS_SERVER_IP}@g" "${kubedns_file}"
|
||||
|
||||
if [[ "${ENABLE_DNS_HORIZONTAL_AUTOSCALER:-}" == "true" ]]; then
|
||||
setup-addon-manifests "addons" "dns-horizontal-autoscaler"
|
||||
setup-addon-manifests "addons" "dns-horizontal-autoscaler" "gce"
|
||||
fi
|
||||
}
|
||||
|
||||
@ -2225,7 +2227,7 @@ EOF
|
||||
setup-addon-manifests "addons" "node-problem-detector/standalone" "node-problem-detector"
|
||||
fi
|
||||
if echo "${ADMISSION_CONTROL:-}" | grep -q "LimitRanger"; then
|
||||
setup-addon-manifests "admission-controls" "limit-range"
|
||||
setup-addon-manifests "admission-controls" "limit-range" "gce"
|
||||
fi
|
||||
if [[ "${NETWORK_POLICY_PROVIDER:-}" == "calico" ]]; then
|
||||
setup-addon-manifests "addons" "calico-policy-controller"
|
||||
@ -2260,7 +2262,7 @@ function start-image-puller {
|
||||
# Starts kube-registry proxy
|
||||
function start-kube-registry-proxy {
|
||||
echo "Start kube-registry-proxy"
|
||||
cp "${KUBE_HOME}/kube-manifests/kubernetes/kube-registry-proxy.yaml" /etc/kubernetes/manifests
|
||||
cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-registry-proxy.yaml" /etc/kubernetes/manifests
|
||||
}
|
||||
|
||||
# Starts a l7 loadbalancing controller for ingress.
|
||||
|
@ -14,24 +14,14 @@
|
||||
{% set srv_sshproxy_path = "/srv/sshproxy" -%}
|
||||
|
||||
{% if grains.cloud is defined -%}
|
||||
{% if grains.cloud not in ['vagrant', 'photon-controller', 'azure-legacy'] -%}
|
||||
{% set cloud_provider = "--cloud-provider=" + grains.cloud -%}
|
||||
{% endif -%}
|
||||
|
||||
{% if grains.cloud in [ 'openstack' ] and grains.cloud_config is defined -%}
|
||||
{% set cloud_config = "--cloud-config=" + grains.cloud_config -%}
|
||||
{% endif -%}
|
||||
|
||||
{% if grains.cloud in [ 'aws', 'gce' ] and grains.cloud_config is defined -%}
|
||||
{% if grains.cloud == 'gce' and grains.cloud_config is defined -%}
|
||||
{% set cloud_config = "--cloud-config=" + grains.cloud_config -%}
|
||||
{% set cloud_config_mount = "{\"name\": \"cloudconfigmount\",\"mountPath\": \"" + grains.cloud_config + "\", \"readOnly\": true}," -%}
|
||||
{% set cloud_config_volume = "{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"" + grains.cloud_config + "\", \"type\": \"FileOrCreate\"}}," -%}
|
||||
{% endif -%}
|
||||
|
||||
{% if grains.cloud in ['openstack'] -%}
|
||||
{% set cloud_config_mount = "{\"name\": \"instanceid\",\"mountPath\": \"/var/lib/cloud/data/instance-id\",\"readOnly\": true}," -%}
|
||||
{% set cloud_config_volume = "{\"name\": \"instanceid\",\"hostPath\": {\"path\": \"/var/lib/cloud/data/instance-id\"}}," -%}
|
||||
{% endif -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set advertise_address = "" -%}
|
||||
@ -99,7 +89,7 @@
|
||||
{% set client_ca_file = "" -%}
|
||||
|
||||
{% set secure_port = "6443" -%}
|
||||
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'photon-controller', 'openstack'] %}
|
||||
{% if grains['cloud'] is defined and grains.cloud == 'gce' %}
|
||||
{% set secure_port = "443" -%}
|
||||
{% set client_ca_file = "--client-ca-file=/srv/kubernetes/ca.crt" -%}
|
||||
{% endif -%}
|
||||
@ -113,7 +103,7 @@
|
||||
{% set basic_auth_file = "" -%}
|
||||
{% set authz_mode = "" -%}
|
||||
{% set abac_policy_file = "" -%}
|
||||
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'photon-controller', 'openstack'] %}
|
||||
{% if grains['cloud'] is defined and grains.cloud == 'gce' %}
|
||||
{% set token_auth_file = " --token-auth-file=/srv/kubernetes/known_tokens.csv" -%}
|
||||
{% set basic_auth_file = " --basic-auth-file=/srv/kubernetes/basic_auth.csv" -%}
|
||||
{% set authz_mode = " --authorization-mode=ABAC" -%}
|
@ -40,30 +40,20 @@
|
||||
{% flex_vol_plugin_dir = "/usr/libexec/kubernetes/kubelet-plugins/volume/exec" -%}
|
||||
|
||||
{% if grains.cloud is defined -%}
|
||||
{% if grains.cloud not in ['vagrant', 'photon-controller', 'azure-legacy'] -%}
|
||||
{% set cloud_provider = "--cloud-provider=" + grains.cloud -%}
|
||||
{% endif -%}
|
||||
{% set service_account_key = "--service-account-private-key-file=/srv/kubernetes/server.key" -%}
|
||||
|
||||
{% if grains.cloud in [ 'openstack' ] and grains.cloud_config is defined -%}
|
||||
{% set cloud_config = "--cloud-config=" + grains.cloud_config -%}
|
||||
{% endif -%}
|
||||
|
||||
{% if grains.cloud in [ 'aws', 'gce' ] and grains.cloud_config is defined -%}
|
||||
{% if grains.cloud == 'gce' and grains.cloud_config is defined -%}
|
||||
{% set cloud_config = "--cloud-config=" + grains.cloud_config -%}
|
||||
{% set cloud_config_mount = "{\"name\": \"cloudconfigmount\",\"mountPath\": \"" + grains.cloud_config + "\", \"readOnly\": true}," -%}
|
||||
{% set cloud_config_volume = "{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"" + grains.cloud_config + "\", \"type\": \"FileOrCreate\"}}," -%}
|
||||
{% endif -%}
|
||||
|
||||
{% if grains.cloud in ['openstack'] -%}
|
||||
{% set cloud_config_mount = "{\"name\": \"instanceid\",\"mountPath\": \"/var/lib/cloud/data/instance-id\",\"readOnly\": true}," -%}
|
||||
{% set cloud_config_volume = "{\"name\": \"instanceid\",\"hostPath\": {\"path\": \"/var/lib/cloud/data/instance-id\"}}," -%}
|
||||
{% endif -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set root_ca_file = "" -%}
|
||||
|
||||
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'photon-controller', 'openstack', 'azure-legacy'] %}
|
||||
{% if grains.cloud is defined and grains.cloud == 'gce' %}
|
||||
{% set root_ca_file = "--root-ca-file=/srv/kubernetes/ca.crt" -%}
|
||||
{% endif -%}
|
||||
|
@ -8,7 +8,7 @@
|
||||
{% set ips = salt['mine.get']('roles:kubernetes-master', 'network.ip_addrs', 'grain').values() -%}
|
||||
{% set api_servers = "--master=https://" + ips[0][0] -%}
|
||||
{% endif -%}
|
||||
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'photon-controller', 'openstack', 'azure-legacy' ] %}
|
||||
{% if grains['cloud'] is defined and grains.cloud == 'gce' %}
|
||||
{% set api_servers_with_port = api_servers -%}
|
||||
{% else -%}
|
||||
{% set api_servers_with_port = api_servers + ":6443" -%}
|
@ -25,14 +25,14 @@ source "${KUBE_ROOT}/cluster/gce/${KUBE_CONFIG_FILE-"config-default.sh"}"
|
||||
source "${KUBE_ROOT}/cluster/common.sh"
|
||||
source "${KUBE_ROOT}/hack/lib/util.sh"
|
||||
|
||||
if [[ "${NODE_OS_DISTRIBUTION}" == "debian" || "${NODE_OS_DISTRIBUTION}" == "container-linux" || "${NODE_OS_DISTRIBUTION}" == "trusty" || "${NODE_OS_DISTRIBUTION}" == "gci" || "${NODE_OS_DISTRIBUTION}" == "ubuntu" ]]; then
|
||||
if [[ "${NODE_OS_DISTRIBUTION}" == "gci" || "${NODE_OS_DISTRIBUTION}" == "ubuntu" ]]; then
|
||||
source "${KUBE_ROOT}/cluster/gce/${NODE_OS_DISTRIBUTION}/node-helper.sh"
|
||||
else
|
||||
echo "Cannot operate on cluster using node os distro: ${NODE_OS_DISTRIBUTION}" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "${MASTER_OS_DISTRIBUTION}" == "container-linux" || "${MASTER_OS_DISTRIBUTION}" == "trusty" || "${MASTER_OS_DISTRIBUTION}" == "gci" || "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]]; then
|
||||
if [[ "${MASTER_OS_DISTRIBUTION}" == "trusty" || "${MASTER_OS_DISTRIBUTION}" == "gci" || "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]]; then
|
||||
source "${KUBE_ROOT}/cluster/gce/${MASTER_OS_DISTRIBUTION}/master-helper.sh"
|
||||
else
|
||||
echo "Cannot operate on cluster using master os distro: ${MASTER_OS_DISTRIBUTION}" >&2
|
||||
@ -255,21 +255,16 @@ fi
|
||||
# Assumed vars:
|
||||
# PROJECT
|
||||
# SERVER_BINARY_TAR
|
||||
# SALT_TAR
|
||||
# KUBE_MANIFESTS_TAR
|
||||
# ZONE
|
||||
# Vars set:
|
||||
# SERVER_BINARY_TAR_URL
|
||||
# SERVER_BINARY_TAR_HASH
|
||||
# SALT_TAR_URL
|
||||
# SALT_TAR_HASH
|
||||
# KUBE_MANIFESTS_TAR_URL
|
||||
# KUBE_MANIFESTS_TAR_HASH
|
||||
function upload-server-tars() {
|
||||
SERVER_BINARY_TAR_URL=
|
||||
SERVER_BINARY_TAR_HASH=
|
||||
SALT_TAR_URL=
|
||||
SALT_TAR_HASH=
|
||||
KUBE_MANIFESTS_TAR_URL=
|
||||
KUBE_MANIFESTS_TAR_HASH=
|
||||
|
||||
@ -287,13 +282,11 @@ function upload-server-tars() {
|
||||
set-preferred-region
|
||||
|
||||
SERVER_BINARY_TAR_HASH=$(sha1sum-file "${SERVER_BINARY_TAR}")
|
||||
SALT_TAR_HASH=$(sha1sum-file "${SALT_TAR}")
|
||||
if [[ -n "${KUBE_MANIFESTS_TAR:-}" ]]; then
|
||||
KUBE_MANIFESTS_TAR_HASH=$(sha1sum-file "${KUBE_MANIFESTS_TAR}")
|
||||
fi
|
||||
|
||||
local server_binary_tar_urls=()
|
||||
local salt_tar_urls=()
|
||||
local kube_manifest_tar_urls=()
|
||||
|
||||
for region in "${PREFERRED_REGION[@]}"; do
|
||||
@ -313,13 +306,10 @@ function upload-server-tars() {
|
||||
|
||||
echo "+++ Staging server tars to Google Storage: ${staging_path}"
|
||||
local server_binary_gs_url="${staging_path}/${SERVER_BINARY_TAR##*/}"
|
||||
local salt_gs_url="${staging_path}/${SALT_TAR##*/}"
|
||||
copy-to-staging "${staging_path}" "${server_binary_gs_url}" "${SERVER_BINARY_TAR}" "${SERVER_BINARY_TAR_HASH}"
|
||||
copy-to-staging "${staging_path}" "${salt_gs_url}" "${SALT_TAR}" "${SALT_TAR_HASH}"
|
||||
|
||||
# Convert from gs:// URL to an https:// URL
|
||||
server_binary_tar_urls+=("${server_binary_gs_url/gs:\/\//https://storage.googleapis.com/}")
|
||||
salt_tar_urls+=("${salt_gs_url/gs:\/\//https://storage.googleapis.com/}")
|
||||
if [[ -n "${KUBE_MANIFESTS_TAR:-}" ]]; then
|
||||
local kube_manifests_gs_url="${staging_path}/${KUBE_MANIFESTS_TAR##*/}"
|
||||
copy-to-staging "${staging_path}" "${kube_manifests_gs_url}" "${KUBE_MANIFESTS_TAR}" "${KUBE_MANIFESTS_TAR_HASH}"
|
||||
@ -329,7 +319,6 @@ function upload-server-tars() {
|
||||
done
|
||||
|
||||
SERVER_BINARY_TAR_URL=$(join_csv "${server_binary_tar_urls[@]}")
|
||||
SALT_TAR_URL=$(join_csv "${salt_tar_urls[@]}")
|
||||
if [[ -n "${KUBE_MANIFESTS_TAR:-}" ]]; then
|
||||
KUBE_MANIFESTS_TAR_URL=$(join_csv "${kube_manifests_tar_urls[@]}")
|
||||
fi
|
||||
@ -2065,66 +2054,6 @@ function prepare-push() {
|
||||
fi
|
||||
}
|
||||
|
||||
# Push binaries to kubernetes master
|
||||
function push-master() {
|
||||
echo "Updating master metadata ..."
|
||||
write-master-env
|
||||
prepare-startup-script
|
||||
add-instance-metadata-from-file "${KUBE_MASTER}" "kube-env=${KUBE_TEMP}/master-kube-env.yaml" "startup-script=${KUBE_TEMP}/configure-vm.sh"
|
||||
|
||||
echo "Pushing to master (log at ${OUTPUT}/push-${KUBE_MASTER}.log) ..."
|
||||
cat ${KUBE_TEMP}/configure-vm.sh | gcloud compute ssh --ssh-flag="-o LogLevel=quiet" --project "${PROJECT}" --zone "${ZONE}" "${KUBE_MASTER}" --command "sudo bash -s -- --push" &> ${OUTPUT}/push-"${KUBE_MASTER}".log
|
||||
}
|
||||
|
||||
# Push binaries to kubernetes node
|
||||
function push-node() {
|
||||
node=${1}
|
||||
|
||||
echo "Updating node ${node} metadata... "
|
||||
prepare-startup-script
|
||||
add-instance-metadata-from-file "${node}" "kube-env=${KUBE_TEMP}/node-kube-env.yaml" "startup-script=${KUBE_TEMP}/configure-vm.sh"
|
||||
|
||||
echo "Start upgrading node ${node} (log at ${OUTPUT}/push-${node}.log) ..."
|
||||
cat ${KUBE_TEMP}/configure-vm.sh | gcloud compute ssh --ssh-flag="-o LogLevel=quiet" --project "${PROJECT}" --zone "${ZONE}" "${node}" --command "sudo bash -s -- --push" &> ${OUTPUT}/push-"${node}".log
|
||||
}
|
||||
|
||||
# Push binaries to kubernetes cluster
|
||||
function kube-push() {
|
||||
# Disable this until it's fixed.
|
||||
# See https://github.com/kubernetes/kubernetes/issues/17397
|
||||
echo "./cluster/kube-push.sh is currently not supported in GCE."
|
||||
echo "Please use ./cluster/gce/upgrade.sh."
|
||||
exit 1
|
||||
|
||||
prepare-push true
|
||||
|
||||
push-master
|
||||
|
||||
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
|
||||
push-node "${NODE_NAMES[$i]}" &
|
||||
done
|
||||
|
||||
kube::util::wait-for-jobs || {
|
||||
echo -e "${color_red}Some commands failed.${color_norm}" >&2
|
||||
}
|
||||
|
||||
# TODO(zmerlynn): Re-create instance-template with the new
|
||||
# node-kube-env. This isn't important until the node-ip-range issue
|
||||
# is solved (because that's blocking automatic dynamic nodes from
|
||||
# working). The node-kube-env has to be composed with the KUBELET_TOKEN
|
||||
# and KUBE_PROXY_TOKEN. Ideally we would have
|
||||
# http://issue.k8s.io/3168
|
||||
# implemented before then, though, so avoiding this mess until then.
|
||||
|
||||
echo
|
||||
echo "Kubernetes cluster is running. The master is running at:"
|
||||
echo
|
||||
echo " https://${KUBE_MASTER_IP}"
|
||||
echo
|
||||
echo "The user name and password to use is located in ~/.kube/config"
|
||||
echo
|
||||
}
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Cluster specific test helpers used from hack/e2e.go
|
||||
|
||||
@ -2233,12 +2162,3 @@ function ssh-to-node() {
|
||||
function prepare-e2e() {
|
||||
detect-project
|
||||
}
|
||||
|
||||
# Writes configure-vm.sh to a temporary location with comments stripped. GCE
|
||||
# limits the size of metadata fields to 32K, and stripping comments is the
|
||||
# easiest way to buy us a little more room.
|
||||
function prepare-startup-script() {
|
||||
# Find a standard sed instance (and ensure that the command works as expected on a Mac).
|
||||
kube::util::ensure-gnu-sed
|
||||
${SED} '/^\s*#\([^!].*\)*$/ d' ${KUBE_ROOT}/cluster/gce/configure-vm.sh > ${KUBE_TEMP}/configure-vm.sh
|
||||
}
|
||||
|
@ -24,14 +24,6 @@
|
||||
# Set KUBERNETES_PROVIDER to choose between different providers:
|
||||
# Google Compute Engine [default]
|
||||
# * export KUBERNETES_PROVIDER=gce; wget -q -O - https://get.k8s.io | bash
|
||||
# Libvirt (with CoreOS as a guest operating system)
|
||||
# * export KUBERNETES_PROVIDER=libvirt-coreos; wget -q -O - https://get.k8s.io | bash
|
||||
# Vagrant (local virtual machines)
|
||||
# * export KUBERNETES_PROVIDER=vagrant; wget -q -O - https://get.k8s.io | bash
|
||||
# VMWare Photon Controller
|
||||
# * export KUBERNETES_PROVIDER=photon-controller; wget -q -O - https://get.k8s.io | bash
|
||||
# OpenStack-Heat
|
||||
# * export KUBERNETES_PROVIDER=openstack-heat; wget -q -O - https://get.k8s.io | bash
|
||||
#
|
||||
# Set KUBERNETES_RELEASE to choose a specific release instead of the current
|
||||
# stable release, (e.g. 'v1.3.7').
|
||||
|
@ -24,7 +24,7 @@ import string
|
||||
import json
|
||||
import ipaddress
|
||||
|
||||
import charms.leadership
|
||||
from charms.leadership import leader_get, leader_set
|
||||
|
||||
from shutil import move
|
||||
|
||||
@ -112,6 +112,7 @@ def check_for_upgrade_needed():
|
||||
# we take no risk and forcibly upgrade the snaps.
|
||||
# Forcibly means we do not prompt the user to call the upgrade action.
|
||||
set_upgrade_needed(forced=True)
|
||||
upgrade_for_etcd()
|
||||
|
||||
|
||||
def snap_resources_changed():
|
||||
@ -137,6 +138,14 @@ def snap_resources_changed():
|
||||
return 'unknown'
|
||||
|
||||
|
||||
def upgrade_for_etcd():
|
||||
# we are upgrading the charm.
|
||||
# If this is an old deployment etcd_version is not set
|
||||
# so if we are the leader we need to set it to v2
|
||||
if not leader_get('etcd_version') and is_state('leadership.is_leader'):
|
||||
leader_set(etcd_version='etcd2')
|
||||
|
||||
|
||||
def add_rbac_roles():
|
||||
'''Update the known_tokens file with proper groups.'''
|
||||
|
||||
@ -316,7 +325,7 @@ def setup_leader_authentication():
|
||||
# path as a key.
|
||||
# eg:
|
||||
# {'/root/cdk/serviceaccount.key': 'RSA:2471731...'}
|
||||
charms.leadership.leader_set(leader_data)
|
||||
leader_set(leader_data)
|
||||
remove_state('kubernetes-master.components.started')
|
||||
set_state('authentication.setup')
|
||||
|
||||
@ -364,7 +373,7 @@ def get_keys_from_leader(keys, overwrite_local=False):
|
||||
# If the path does not exist, assume we need it
|
||||
if not os.path.exists(k) or overwrite_local:
|
||||
# Fetch data from leadership broadcast
|
||||
contents = charms.leadership.leader_get(k)
|
||||
contents = leader_get(k)
|
||||
# Default to logging the warning and wait for leader data to be set
|
||||
if contents is None:
|
||||
msg = "Waiting on leaders crypto keys."
|
||||
@ -423,6 +432,7 @@ def master_services_down():
|
||||
|
||||
@when('etcd.available', 'tls_client.server.certificate.saved',
|
||||
'authentication.setup')
|
||||
@when('leadership.set.etcd_version')
|
||||
@when_not('kubernetes-master.components.started')
|
||||
def start_master(etcd):
|
||||
'''Run the Kubernetes master components.'''
|
||||
@ -440,7 +450,8 @@ def start_master(etcd):
|
||||
handle_etcd_relation(etcd)
|
||||
|
||||
# Add CLI options to all components
|
||||
configure_apiserver(etcd)
|
||||
leader_etcd_version = leader_get('etcd_version')
|
||||
configure_apiserver(etcd.get_connection_string(), leader_etcd_version)
|
||||
configure_controller_manager()
|
||||
configure_scheduler()
|
||||
set_state('kubernetes-master.components.started')
|
||||
@ -462,6 +473,14 @@ def etcd_data_change(etcd):
|
||||
if data_changed('etcd-connect', connection_string):
|
||||
remove_state('kubernetes-master.components.started')
|
||||
|
||||
# We are the leader and the etcd_version is not set meaning
|
||||
# this is the first time we connect to etcd.
|
||||
if is_state('leadership.is_leader') and not leader_get('etcd_version'):
|
||||
if etcd.get_version().startswith('3.'):
|
||||
leader_set(etcd_version='etcd3')
|
||||
else:
|
||||
leader_set(etcd_version='etcd2')
|
||||
|
||||
|
||||
@when('kube-control.connected')
|
||||
@when('cdk-addons.configured')
|
||||
@ -816,9 +835,11 @@ def on_config_allow_privileged_change():
|
||||
|
||||
@when('config.changed.api-extra-args')
|
||||
@when('kubernetes-master.components.started')
|
||||
@when('leadership.set.etcd_version')
|
||||
@when('etcd.available')
|
||||
def on_config_api_extra_args_change(etcd):
|
||||
configure_apiserver(etcd)
|
||||
configure_apiserver(etcd.get_connection_string(),
|
||||
leader_get('etcd_version'))
|
||||
|
||||
|
||||
@when('config.changed.controller-manager-extra-args')
|
||||
@ -1045,7 +1066,7 @@ def configure_kubernetes_service(service, base_args, extra_args_key):
|
||||
db.set(prev_args_key, args)
|
||||
|
||||
|
||||
def configure_apiserver(etcd):
|
||||
def configure_apiserver(etcd_connection_string, leader_etcd_version):
|
||||
api_opts = {}
|
||||
|
||||
# Get the tls paths from the layer data.
|
||||
@ -1075,8 +1096,7 @@ def configure_apiserver(etcd):
|
||||
api_opts['logtostderr'] = 'true'
|
||||
api_opts['insecure-bind-address'] = '127.0.0.1'
|
||||
api_opts['insecure-port'] = '8080'
|
||||
api_opts['storage-backend'] = 'etcd2' # FIXME: add etcd3 support
|
||||
|
||||
api_opts['storage-backend'] = leader_etcd_version
|
||||
api_opts['basic-auth-file'] = '/root/cdk/basic_auth.csv'
|
||||
api_opts['token-auth-file'] = '/root/cdk/known_tokens.csv'
|
||||
api_opts['service-account-key-file'] = '/root/cdk/serviceaccount.key'
|
||||
@ -1089,7 +1109,7 @@ def configure_apiserver(etcd):
|
||||
api_opts['etcd-cafile'] = etcd_ca
|
||||
api_opts['etcd-keyfile'] = etcd_key
|
||||
api_opts['etcd-certfile'] = etcd_cert
|
||||
api_opts['etcd-servers'] = etcd.get_connection_string()
|
||||
api_opts['etcd-servers'] = etcd_connection_string
|
||||
|
||||
admission_control = [
|
||||
'Initializers',
|
||||
|
@ -1,96 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Push a new release to the cluster.
|
||||
#
|
||||
# This will find the release tar, cause it to be downloaded, unpacked, installed
|
||||
# and enacted.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
echo "kube-push.sh is currently broken; see https://github.com/kubernetes/kubernetes/issues/17397"
|
||||
exit 1
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
|
||||
if [ -f "${KUBE_ROOT}/cluster/env.sh" ]; then
|
||||
source "${KUBE_ROOT}/cluster/env.sh"
|
||||
fi
|
||||
|
||||
source "${KUBE_ROOT}/cluster/kube-util.sh"
|
||||
|
||||
function usage() {
|
||||
echo "${0} [-m|-n <node id>] <version>"
|
||||
echo " Updates Kubernetes binaries. Can be done for all components (by default), master(-m) or specified node(-n)."
|
||||
echo " If the version is not specified will try to use local binaries."
|
||||
echo " Warning: upgrading single node is experimental"
|
||||
}
|
||||
|
||||
push_to_master=false
|
||||
push_to_node=false
|
||||
|
||||
while getopts "mn:h" opt; do
|
||||
case ${opt} in
|
||||
m)
|
||||
push_to_master=true;;
|
||||
n)
|
||||
push_to_node=true
|
||||
node_id="$OPTARG";;
|
||||
h)
|
||||
usage
|
||||
exit 0;;
|
||||
\?)
|
||||
echo "Invalid option: -$OPTARG" >&2
|
||||
usage
|
||||
exit 1;;
|
||||
esac
|
||||
done
|
||||
shift $((OPTIND-1))
|
||||
|
||||
if [[ "${push_to_master}" == "true" ]] && [[ "${push_to_node}" == "true" ]]; then
|
||||
echo "Only one of options -m -n should be specified"
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
verify-prereqs
|
||||
verify-kube-binaries
|
||||
KUBE_VERSION=${1-}
|
||||
|
||||
if [[ "${push_to_master}" == "false" ]] && [[ "${push_to_node}" == "false" ]]; then
|
||||
echo "Updating cluster using provider: $KUBERNETES_PROVIDER"
|
||||
kube-push
|
||||
fi
|
||||
|
||||
if [[ "${push_to_master}" == "true" ]]; then
|
||||
echo "Updating master to version ${KUBE_VERSION:-"dev"}"
|
||||
prepare-push false
|
||||
push-master
|
||||
fi
|
||||
|
||||
if [[ "${push_to_node}" == "true" ]]; then
|
||||
echo "Updating node $node_id to version ${KUBE_VERSION:-"dev"}"
|
||||
prepare-push true
|
||||
push-node $node_id
|
||||
fi
|
||||
|
||||
echo "Validating cluster post-push..."
|
||||
|
||||
"${KUBE_ROOT}/cluster/validate-cluster.sh"
|
||||
|
||||
echo "Done"
|
@ -34,13 +34,7 @@ source "${KUBE_ROOT}/cluster/kube-util.sh"
|
||||
|
||||
DEPRECATED_PROVIDERS=(
|
||||
"centos"
|
||||
"libvirt-coreos"
|
||||
"local"
|
||||
"openstack-heat"
|
||||
"photon-controller"
|
||||
"vagrant"
|
||||
"vsphere"
|
||||
"windows"
|
||||
)
|
||||
|
||||
for provider in "${DEPRECATED_PROVIDERS[@]}"; do
|
||||
|
@ -1,25 +0,0 @@
|
||||
sh_library(
|
||||
name = "lib",
|
||||
srcs = [
|
||||
"logging.sh",
|
||||
],
|
||||
visibility = [
|
||||
"//build/visible_to:COMMON_testing",
|
||||
"//build/visible_to:cluster",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = [
|
||||
"//build/visible_to:cluster",
|
||||
],
|
||||
)
|
@ -1,15 +0,0 @@
|
||||
# Configuration options
|
||||
|
||||
These options can be set as environment variables, to customize how your cluster is created.
|
||||
|
||||
These options apply across providers. There are additional documents for options specific to providers:
|
||||
|
||||
* [AWS](aws/options.md)
|
||||
|
||||
This is a work-in-progress; not all options are documented yet!
|
||||
|
||||
**NUM_NODES**
|
||||
|
||||
The number of node instances to create. Most providers default this to 4.
|
||||
|
||||
[]()
|
@ -1,72 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
##########################################################
|
||||
#
|
||||
# These parameters describe objects we are using from
|
||||
# Photon Controller. They are all assumed to be pre-existing.
|
||||
#
|
||||
# Note: if you want help in creating them, you can use
|
||||
# the setup-prereq.sh script, which will create any of these
|
||||
# that do not already exist.
|
||||
#
|
||||
##########################################################
|
||||
|
||||
# Pre-created tenant for Kubernetes to use
|
||||
PHOTON_TENANT=kube-tenant
|
||||
|
||||
# Pre-created project in PHOTON_TENANT for Kubernetes to use
|
||||
PHOTON_PROJECT=kube-project
|
||||
|
||||
# Pre-created VM flavor for Kubernetes master to use
|
||||
# Can be same as master
|
||||
# We recommend at least 1GB of memory
|
||||
PHOTON_MASTER_FLAVOR=kube-vm
|
||||
|
||||
# Pre-created VM flavor for Kubernetes node to use
|
||||
# Can be same as master
|
||||
# We recommend at least 2GB of memory
|
||||
PHOTON_NODE_FLAVOR=kube-vm
|
||||
|
||||
# Pre-created disk flavor for Kubernetes to use
|
||||
PHOTON_DISK_FLAVOR=kube-disk
|
||||
|
||||
# Pre-created Debian 8 image with kube user uploaded to Photon Controller
|
||||
# Note: While Photon Controller allows multiple images to have the same
|
||||
# name, we assume that there is exactly one image with this name.
|
||||
PHOTON_IMAGE=kube
|
||||
|
||||
##########################################################
|
||||
#
|
||||
# Parameters just for the setup-prereq.sh script: not used
|
||||
# elsewhere. If you create the above objects by hand, you
|
||||
# do not need to edit these.
|
||||
#
|
||||
# Note that setup-prereq.sh also creates the objects
|
||||
# above.
|
||||
#
|
||||
##########################################################
|
||||
|
||||
# The specifications for the master and node flavors
|
||||
SETUP_MASTER_FLAVOR_SPEC="vm 1 COUNT, vm.cpu 1 COUNT, vm.memory 2 GB"
|
||||
SETUP_NODE_FLAVOR_SPEC=${SETUP_MASTER_FLAVOR_SPEC}
|
||||
|
||||
# The specification for the ephemeral disk flavor.
|
||||
SETUP_DISK_FLAVOR_SPEC="ephemeral-disk 1 COUNT"
|
||||
|
||||
# The specification for the tenant resource ticket and the project resources
|
||||
SETUP_TICKET_SPEC="vm.memory 1000 GB, vm 1000 COUNT"
|
||||
SETUP_PROJECT_SPEC="${SETUP_TICKET_SPEC}"
|
@ -1,94 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
##########################################################
|
||||
#
|
||||
# Common parameters for Kubernetes
|
||||
#
|
||||
##########################################################
|
||||
|
||||
# Default number of nodes to make. You can change this as needed
|
||||
NUM_NODES=3
|
||||
|
||||
# Range of IPs assigned to pods
|
||||
NODE_IP_RANGES="10.244.0.0/16"
|
||||
|
||||
# IPs used by Kubernetes master
|
||||
MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
|
||||
|
||||
# Range of IPs assigned by Kubernetes to services
|
||||
SERVICE_CLUSTER_IP_RANGE="10.244.240.0/20"
|
||||
|
||||
##########################################################
|
||||
#
|
||||
# Advanced parameters for Kubernetes
|
||||
#
|
||||
##########################################################
|
||||
|
||||
# The instance prefix is the beginning of the name given to each VM we create
|
||||
# If this is changed, you can have multiple kubernetes clusters per project
|
||||
# Note that even if you don't change it, each tenant/project can have its own
|
||||
# Kubernetes cluster
|
||||
INSTANCE_PREFIX=kubernetes
|
||||
|
||||
# Name of the user used to configure the VM
|
||||
# We use cloud-init to create the user
|
||||
VM_USER=kube
|
||||
|
||||
# SSH options for how we connect to the Kubernetes VMs
|
||||
# We set the user known hosts file to /dev/null because we are connecting to new VMs.
|
||||
# When working in an environment where there is a lot of VM churn, VM IP addresses
|
||||
# will be reused, and the ssh keys will be different. This prevents us from seeing error
|
||||
# due to this, and it will not save the SSH key to the known_hosts file, so users will
|
||||
# still have standard ssh security checks.
|
||||
SSH_OPTS="-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oLogLevel=ERROR -C"
|
||||
|
||||
# Optional: Enable node logging.
|
||||
# Note: currently untested
|
||||
ENABLE_NODE_LOGGING=false
|
||||
LOGGING_DESTINATION=elasticsearch
|
||||
|
||||
# Optional: When set to true, Elasticsearch and Kibana will be setup
|
||||
# Note: currently untested
|
||||
ENABLE_CLUSTER_LOGGING=false
|
||||
ELASTICSEARCH_LOGGING_REPLICAS=1
|
||||
|
||||
# Optional: Cluster monitoring to setup as part of the cluster bring up:
|
||||
# none - No cluster monitoring setup
|
||||
# influxdb - Heapster, InfluxDB, and Grafana
|
||||
# google - Heapster, Google Cloud Monitoring, and Google Cloud Logging
|
||||
# Note: currently untested
|
||||
ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}"
|
||||
|
||||
# Optional: Install cluster DNS.
|
||||
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
|
||||
DNS_SERVER_IP="10.244.240.240"
|
||||
DNS_DOMAIN="cluster.local"
|
||||
|
||||
# Optional: Enable DNS horizontal autoscaler
|
||||
ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-false}"
|
||||
|
||||
# Optional: Install Kubernetes UI
|
||||
ENABLE_CLUSTER_UI=true
|
||||
|
||||
# We need to configure subject alternate names (SANs) for the master's certificate
|
||||
# we generate. While users will connect via the external IP, pods (like the UI)
|
||||
# will connect via the cluster IP, from the SERVICE_CLUSTER_IP_RANGE.
|
||||
# In addition to the extra SANS here, we'll also add one for for the service IP.
|
||||
MASTER_EXTRA_SANS="DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.${DNS_DOMAIN}"
|
||||
|
||||
# Optional: if set to true, kube-up will configure the cluster to run e2e tests.
|
||||
E2E_STORAGE_TEST_ENVIRONMENT=${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false}
|
@ -1,20 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
NUM_NODES=2
|
||||
NODE_IP_RANGES="10.244.0.0/16"
|
||||
MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
|
||||
SERVICE_CLUSTER_IP_RANGE="10.244.240.0/20"
|
@ -1,239 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This sets up a Photon Controller with the tenant, project, flavors
|
||||
# and image that are needed to deploy Kubernetes with kube-up.
|
||||
#
|
||||
# This is not meant to be used in production: it creates resource tickets
|
||||
# (quotas) that are arbitrary and not likely to work in your environment.
|
||||
# However, it may be a quick way to get your environment set up to try out
|
||||
# a Kubernetes installation.
|
||||
#
|
||||
# It uses the names for the tenant, project, and flavors as specified in the
|
||||
# config-common.sh file
|
||||
#
|
||||
# If you want to do this by hand, this script is equivalent to the following
|
||||
# Photon Controller commands (assuming you haven't edited config-common.sh
|
||||
# to change the names)
|
||||
#
|
||||
# photon target set https://192.0.2.2
|
||||
# photon tenant create kube-tenant
|
||||
# photon tenant set kube-tenant
|
||||
# photon resource-ticket create --tenant kube-tenant --name kube-resources --limits "vm.memory 1000 GB, vm 1000 COUNT"
|
||||
# photon project create --tenant kube-tenant --resource-ticket kube-resources --name kube-project --limits "vm.memory 1000 GB, vm 1000 COUNT"
|
||||
# photon project set kube-project
|
||||
# photon -n flavor create --name "kube-vm" --kind "vm" --cost "vm 1 COUNT, vm.cpu 1 COUNT, vm.memory 2 GB"
|
||||
# photon -n flavor create --name "kube-disk" --kind "ephemeral-disk" --cost "ephemeral-disk 1 COUNT"
|
||||
# photon image create kube.vmdk -n kube-image -i EAGER
|
||||
#
|
||||
# Note that the kube.vmdk can be downloaded as specified in the documentation.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../..
|
||||
# shellcheck source=./util.sh
|
||||
source "${KUBE_ROOT}/cluster/photon-controller/util.sh"
|
||||
|
||||
function main {
|
||||
verify-cmd-in-path photon
|
||||
set-target
|
||||
create-tenant
|
||||
create-project
|
||||
create-vm-flavor "${PHOTON_MASTER_FLAVOR}" "${SETUP_MASTER_FLAVOR_SPEC}"
|
||||
if [ "${PHOTON_MASTER_FLAVOR}" != "${PHOTON_NODE_FLAVOR}" ]; then
|
||||
create-vm-flavor "${PHOTON_NODE_FLAVOR}" "${SETUP_NODE_FLAVOR_SPEC}"
|
||||
fi
|
||||
create-disk-flavor
|
||||
create-image
|
||||
}
|
||||
|
||||
function parse-cmd-line {
|
||||
PHOTON_TARGET=${1:-""}
|
||||
PHOTON_VMDK=${2:-""}
|
||||
|
||||
if [[ "${PHOTON_TARGET}" = "" || "${PHOTON_VMDK}" = "" ]]; then
|
||||
echo "Usage: setup-prereq <photon target> <path-to-kube-vmdk>"
|
||||
echo "Target should be a URL like https://192.0.2.1"
|
||||
echo ""
|
||||
echo "This will create the following, based on the configuration in config-common.sh"
|
||||
echo " * A tenant named ${PHOTON_TENANT}"
|
||||
echo " * A project named ${PHOTON_PROJECT}"
|
||||
echo " * A VM flavor named ${PHOTON_MASTER_FLAVOR}"
|
||||
echo " * A disk flavor named ${PHOTON_DISK_FLAVOR}"
|
||||
echo "It will also upload the Kube VMDK"
|
||||
echo ""
|
||||
echo "It creates the tenant with a resource ticket (quota) that may"
|
||||
echo "be inappropriate for your environment. For a production"
|
||||
echo "environment, you should configure these to match your"
|
||||
echo "environment."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Photon Target: ${PHOTON_TARGET}"
|
||||
echo "Photon VMDK: ${PHOTON_VMDK}"
|
||||
}
|
||||
|
||||
function set-target {
|
||||
${PHOTON} target set "${PHOTON_TARGET}" > /dev/null 2>&1
|
||||
}
|
||||
|
||||
function create-tenant {
|
||||
local rc=0
|
||||
local output
|
||||
|
||||
${PHOTON} tenant list | grep -q "\t${PHOTON_TENANT}$" > /dev/null 2>&1 || rc=$?
|
||||
if [[ ${rc} -eq 0 ]]; then
|
||||
echo "Tenant ${PHOTON_TENANT} already made, skipping"
|
||||
else
|
||||
echo "Making tenant ${PHOTON_TENANT}"
|
||||
rc=0
|
||||
output=$(${PHOTON} tenant create "${PHOTON_TENANT}" 2>&1) || {
|
||||
echo "ERROR: Could not create tenant \"${PHOTON_TENANT}\", exiting"
|
||||
echo "Output from tenant creation:"
|
||||
echo "${output}"
|
||||
exit 1
|
||||
}
|
||||
fi
|
||||
${PHOTON} tenant set "${PHOTON_TENANT}" > /dev/null 2>&1
|
||||
}
|
||||
|
||||
function create-project {
|
||||
local rc=0
|
||||
local output
|
||||
|
||||
${PHOTON} project list | grep -q "\t${PHOTON_PROJECT}\t" > /dev/null 2>&1 || rc=$?
|
||||
if [[ ${rc} -eq 0 ]]; then
|
||||
echo "Project ${PHOTON_PROJECT} already made, skipping"
|
||||
else
|
||||
echo "Making project ${PHOTON_PROJECT}"
|
||||
rc=0
|
||||
output=$(${PHOTON} resource-ticket create --tenant "${PHOTON_TENANT}" --name "${PHOTON_TENANT}-resources" --limits "${SETUP_TICKET_SPEC}" 2>&1) || {
|
||||
echo "ERROR: Could not create resource ticket, exiting"
|
||||
echo "Output from resource ticket creation:"
|
||||
echo "${output}"
|
||||
exit 1
|
||||
}
|
||||
|
||||
rc=0
|
||||
output=$(${PHOTON} project create --tenant "${PHOTON_TENANT}" --resource-ticket "${PHOTON_TENANT}-resources" --name "${PHOTON_PROJECT}" --limits "${SETUP_PROJECT_SPEC}" 2>&1) || {
|
||||
echo "ERROR: Could not create project \"${PHOTON_PROJECT}\", exiting"
|
||||
echo "Output from project creation:"
|
||||
echo "${output}"
|
||||
exit 1
|
||||
}
|
||||
fi
|
||||
${PHOTON} project set "${PHOTON_PROJECT}"
|
||||
}
|
||||
|
||||
function create-vm-flavor {
|
||||
local flavor_name=${1}
|
||||
local flavor_spec=${2}
|
||||
local rc=0
|
||||
local output
|
||||
|
||||
${PHOTON} flavor list | grep -q "\t${flavor_name}\t" > /dev/null 2>&1 || rc=$?
|
||||
if [[ ${rc} -eq 0 ]]; then
|
||||
check-flavor-ready "${flavor_name}"
|
||||
echo "Flavor ${flavor_name} already made, skipping"
|
||||
else
|
||||
echo "Making VM flavor ${flavor_name}"
|
||||
rc=0
|
||||
output=$(${PHOTON} -n flavor create --name "${flavor_name}" --kind "vm" --cost "${flavor_spec}" 2>&1) || {
|
||||
echo "ERROR: Could not create vm flavor \"${flavor_name}\", exiting"
|
||||
echo "Output from flavor creation:"
|
||||
echo "${output}"
|
||||
exit 1
|
||||
}
|
||||
fi
|
||||
}
|
||||
|
||||
function create-disk-flavor {
|
||||
local rc=0
|
||||
local output
|
||||
|
||||
${PHOTON} flavor list | grep -q "\t${PHOTON_DISK_FLAVOR}\t" > /dev/null 2>&1 || rc=$?
|
||||
if [[ ${rc} -eq 0 ]]; then
|
||||
check-flavor-ready "${PHOTON_DISK_FLAVOR}"
|
||||
echo "Flavor ${PHOTON_DISK_FLAVOR} already made, skipping"
|
||||
else
|
||||
echo "Making disk flavor ${PHOTON_DISK_FLAVOR}"
|
||||
rc=0
|
||||
output=$(${PHOTON} -n flavor create --name "${PHOTON_DISK_FLAVOR}" --kind "ephemeral-disk" --cost "${SETUP_DISK_FLAVOR_SPEC}" 2>&1) || {
|
||||
echo "ERROR: Could not create disk flavor \"${PHOTON_DISK_FLAVOR}\", exiting"
|
||||
echo "Output from flavor creation:"
|
||||
echo "${output}"
|
||||
exit 1
|
||||
}
|
||||
fi
|
||||
}
|
||||
|
||||
function check-flavor-ready {
|
||||
local flavor_name=${1}
|
||||
local rc=0
|
||||
|
||||
local flavor_id
|
||||
flavor_id=$(${PHOTON} flavor list | grep "\t${flavor_name}\t" | awk '{print $1}') || {
|
||||
echo "ERROR: Found ${flavor_name} but cannot find it's id"
|
||||
exit 1
|
||||
}
|
||||
|
||||
${PHOTON} flavor show "${flavor_id}" | grep "\tREADY\$" > /dev/null 2>&1 || {
|
||||
echo "ERROR: Flavor \"${flavor_name}\" already exists but is not READY. Please delete or fix it."
|
||||
exit 1
|
||||
}
|
||||
}
|
||||
|
||||
function create-image {
|
||||
local rc=0
|
||||
local num_images
|
||||
local output
|
||||
|
||||
${PHOTON} image list | grep "\t${PHOTON_IMAGE}\t" | grep -q ERROR > /dev/null 2>&1 || rc=$?
|
||||
if [[ ${rc} -eq 0 ]]; then
|
||||
echo "Warning: You have at least one ${PHOTON_IMAGE} image in the ERROR state. You may want to investigate."
|
||||
echo "Images in the ERROR state will be ignored."
|
||||
fi
|
||||
|
||||
rc=0
|
||||
# We don't use grep -c because it exists non-zero when there are no matches, tell shellcheck
|
||||
# shellcheck disable=SC2126
|
||||
num_images=$(${PHOTON} image list | grep "\t${PHOTON_IMAGE}\t" | grep READY | wc -l)
|
||||
if [[ "${num_images}" -gt 1 ]]; then
|
||||
echo "Warning: You have more than one good ${PHOTON_IMAGE} image. You may want to remove duplicates."
|
||||
fi
|
||||
|
||||
${PHOTON} image list | grep "\t${PHOTON_IMAGE}\t" | grep -q READY > /dev/null 2>&1 || rc=$?
|
||||
if [[ ${rc} -eq 0 ]]; then
|
||||
echo "Image ${PHOTON_VMDK} already uploaded, skipping"
|
||||
else
|
||||
echo "Uploading image ${PHOTON_VMDK}"
|
||||
rc=0
|
||||
output=$(${PHOTON} image create "${PHOTON_VMDK}" -n "${PHOTON_IMAGE}" -i EAGER 2>&1) || {
|
||||
echo "ERROR: Could not upload image, exiting"
|
||||
echo "Output from image create:"
|
||||
echo "${output}"
|
||||
exit 1
|
||||
}
|
||||
fi
|
||||
}
|
||||
|
||||
# We don't want silent pipeline failure: we check for failure
|
||||
set +o pipefail
|
||||
|
||||
parse-cmd-line "$@"
|
||||
main
|
@ -1,4 +0,0 @@
|
||||
The scripts in this directory are not meant to be invoked
|
||||
directly. Instead they are partial scripts that are combined into full
|
||||
scripts by util.sh and are run on the Kubernetes nodes are part of the
|
||||
setup.
|
@ -1,127 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
#generate token files
|
||||
|
||||
KUBELET_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
KUBE_PROXY_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
known_tokens_file="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv"
|
||||
if [[ ! -f "${known_tokens_file}" ]]; then
|
||||
|
||||
mkdir -p /srv/salt-overlay/salt/kube-apiserver
|
||||
known_tokens_file="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv"
|
||||
(umask u=rw,go= ;
|
||||
echo "$KUBELET_TOKEN,kubelet,kubelet" > $known_tokens_file;
|
||||
echo "$KUBE_PROXY_TOKEN,kube_proxy,kube_proxy" >> $known_tokens_file)
|
||||
|
||||
mkdir -p /srv/salt-overlay/salt/kubelet
|
||||
kubelet_auth_file="/srv/salt-overlay/salt/kubelet/kubernetes_auth"
|
||||
(umask u=rw,go= ; echo "{\"BearerToken\": \"$KUBELET_TOKEN\", \"Insecure\": true }" > $kubelet_auth_file)
|
||||
kubelet_kubeconfig_file="/srv/salt-overlay/salt/kubelet/kubeconfig"
|
||||
|
||||
mkdir -p /srv/salt-overlay/salt/kubelet
|
||||
(umask 077;
|
||||
cat > "${kubelet_kubeconfig_file}" << EOF
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- cluster:
|
||||
server: https://${KUBE_MASTER_IP}
|
||||
insecure-skip-tls-verify: true
|
||||
name: local
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
user: kubelet
|
||||
name: service-account-context
|
||||
current-context: service-account-context
|
||||
users:
|
||||
- name: kubelet
|
||||
user:
|
||||
token: ${KUBELET_TOKEN}
|
||||
EOF
|
||||
)
|
||||
|
||||
|
||||
mkdir -p /srv/salt-overlay/salt/kube-proxy
|
||||
kube_proxy_kubeconfig_file="/srv/salt-overlay/salt/kube-proxy/kubeconfig"
|
||||
# Make a kubeconfig file with the token.
|
||||
# TODO(etune): put apiserver certs into secret too, and reference from authfile,
|
||||
# so that "Insecure" is not needed.
|
||||
(umask 077;
|
||||
cat > "${kube_proxy_kubeconfig_file}" << EOF
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- cluster:
|
||||
insecure-skip-tls-verify: true
|
||||
name: local
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
user: kube-proxy
|
||||
name: service-account-context
|
||||
current-context: service-account-context
|
||||
users:
|
||||
- name: kube-proxy
|
||||
user:
|
||||
token: ${KUBE_PROXY_TOKEN}
|
||||
EOF
|
||||
)
|
||||
|
||||
# Generate tokens for other "service accounts". Append to known_tokens.
|
||||
#
|
||||
# NB: If this list ever changes, this script actually has to
|
||||
# change to detect the existence of this file, kill any deleted
|
||||
# old tokens and add any new tokens (to handle the upgrade case).
|
||||
service_accounts=("system:scheduler" "system:controller_manager" "system:logging" "system:monitoring" "system:dns")
|
||||
for account in "${service_accounts[@]}"; do
|
||||
token=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
echo "${token},${account},${account}" >> "${known_tokens_file}"
|
||||
done
|
||||
fi
|
||||
|
||||
readonly BASIC_AUTH_FILE="/srv/salt-overlay/salt/kube-apiserver/basic_auth.csv"
|
||||
if [[ ! -e "${BASIC_AUTH_FILE}" ]]; then
|
||||
mkdir -p /srv/salt-overlay/salt/kube-apiserver
|
||||
(umask 077;
|
||||
echo "${KUBE_PASSWORD},${KUBE_USER},admin" > "${BASIC_AUTH_FILE}")
|
||||
fi
|
||||
|
||||
|
||||
# Create the overlay files for the salt tree. We create these in a separate
|
||||
# place so that we can blow away the rest of the salt configs on a kube-push and
|
||||
# re-apply these.
|
||||
|
||||
mkdir -p /srv/salt-overlay/pillar
|
||||
cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
|
||||
instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")'
|
||||
node_instance_prefix: $NODE_INSTANCE_PREFIX
|
||||
service_cluster_ip_range: $SERVICE_CLUSTER_IP_RANGE
|
||||
enable_cluster_monitoring: "${ENABLE_CLUSTER_MONITORING:-none}"
|
||||
enable_cluster_logging: "${ENABLE_CLUSTER_LOGGING:false}"
|
||||
enable_cluster_ui: "${ENABLE_CLUSTER_UI:true}"
|
||||
enable_node_logging: "${ENABLE_NODE_LOGGING:false}"
|
||||
logging_destination: $LOGGING_DESTINATION
|
||||
elasticsearch_replicas: $ELASTICSEARCH_LOGGING_REPLICAS
|
||||
enable_cluster_dns: "${ENABLE_CLUSTER_DNS:-false}"
|
||||
dns_server: $DNS_SERVER_IP
|
||||
dns_domain: $DNS_DOMAIN
|
||||
e2e_storage_test_environment: "${E2E_STORAGE_TEST_ENVIRONMENT:-false}"
|
||||
cluster_cidr: "$NODE_IP_RANGES"
|
||||
allocate_node_cidrs: "${ALLOCATE_NODE_CIDRS:-true}"
|
||||
admission_control: Initializers,NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,ResourceQuota
|
||||
EOF
|
@ -1,22 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Remove kube.vm from /etc/hosts
|
||||
sed -i -e 's/\b\w\+.vm\b//' /etc/hosts
|
||||
|
||||
# Update hostname in /etc/hosts and /etc/hostname
|
||||
sed -i -e "s/\\bkube\\b/${MY_NAME}/g" /etc/host{s,name}
|
||||
hostname ${MY_NAME}
|
@ -1,26 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script assumes that the environment variable SERVER_BINARY_TAR contains
|
||||
# the release tar to download and unpack. It is meant to be pushed to the
|
||||
# master and run.
|
||||
|
||||
echo "Unpacking Salt tree"
|
||||
rm -rf kubernetes
|
||||
tar xzf "${SALT_TAR}"
|
||||
|
||||
echo "Running release install script"
|
||||
sudo kubernetes/saltbase/install.sh "${SERVER_BINARY_TAR}"
|
@ -1,59 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Use other Debian mirror
|
||||
sed -i -e "s/http.us.debian.org/mirrors.kernel.org/" /etc/apt/sources.list
|
||||
|
||||
# Prepopulate the name of the Master
|
||||
mkdir -p /etc/salt/minion.d
|
||||
echo "master: ${MASTER_NAME}" > /etc/salt/minion.d/master.conf
|
||||
|
||||
cat <<EOF >/etc/salt/minion.d/grains.conf
|
||||
grains:
|
||||
roles:
|
||||
- kubernetes-master
|
||||
cbr-cidr: $MASTER_IP_RANGE
|
||||
cloud: photon-controller
|
||||
master_extra_sans: $MASTER_EXTRA_SANS
|
||||
api_servers: $MASTER_NAME
|
||||
kubelet_kubeconfig: /srv/salt-overlay/salt/kubelet/kubeconfig
|
||||
kube_user: $KUBE_USER
|
||||
EOF
|
||||
|
||||
# Auto accept all keys from minions that try to join
|
||||
mkdir -p /etc/salt/master.d
|
||||
cat <<EOF >/etc/salt/master.d/auto-accept.conf
|
||||
auto_accept: True
|
||||
EOF
|
||||
|
||||
cat <<EOF >/etc/salt/master.d/reactor.conf
|
||||
# React to new minions starting by running highstate on them.
|
||||
reactor:
|
||||
- 'salt/minion/*/start':
|
||||
- /srv/reactor/highstate-new.sls
|
||||
- /srv/reactor/highstate-masters.sls
|
||||
- /srv/reactor/highstate-minions.sls
|
||||
EOF
|
||||
|
||||
# Install Salt
|
||||
#
|
||||
# We specify -X to avoid a race condition that can cause minion failure to
|
||||
# install. See https://github.com/saltstack/salt-bootstrap/issues/270
|
||||
#
|
||||
# -M installs the master
|
||||
set +x
|
||||
curl -L --connect-timeout 20 --retry 6 --retry-delay 10 https://bootstrap.saltstack.com | sh -s -- -M -X
|
||||
set -x
|
@ -1,51 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Use other Debian mirror
|
||||
sed -i -e "s/http.us.debian.org/mirrors.kernel.org/" /etc/apt/sources.list
|
||||
|
||||
# Resolve hostname of master
|
||||
if ! grep -q $KUBE_MASTER /etc/hosts; then
|
||||
echo "Adding host entry for $KUBE_MASTER"
|
||||
echo "${KUBE_MASTER_IP} ${KUBE_MASTER}" >> /etc/hosts
|
||||
fi
|
||||
|
||||
# Prepopulate the name of the Master
|
||||
mkdir -p /etc/salt/minion.d
|
||||
echo "master: ${KUBE_MASTER}" > /etc/salt/minion.d/master.conf
|
||||
|
||||
# Turn on debugging for salt-minion
|
||||
# echo "DAEMON_ARGS=\"\$DAEMON_ARGS --log-file-level=debug\"" > /etc/default/salt-minion
|
||||
|
||||
# Our minions will have a pool role to distinguish them from the master.
|
||||
#
|
||||
# Setting the "minion_ip" here causes the kubelet to use its IP for
|
||||
# identification instead of its hostname.
|
||||
#
|
||||
cat <<EOF >/etc/salt/minion.d/grains.conf
|
||||
grains:
|
||||
hostname_override: $(ip route get 1.1.1.1 | awk '{print $7}')
|
||||
roles:
|
||||
- kubernetes-pool
|
||||
- kubernetes-pool-photon-controller
|
||||
cloud: photon-controller
|
||||
EOF
|
||||
|
||||
# Install Salt
|
||||
#
|
||||
# We specify -X to avoid a race condition that can cause minion failure to
|
||||
# install. See https://github.com/saltstack/salt-bootstrap/issues/270
|
||||
curl -L --connect-timeout 20 --retry 6 --retry-delay 10 https://bootstrap.saltstack.com | sh -s -- -X
|
File diff suppressed because it is too large
Load Diff
@ -1,86 +0,0 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load("@io_bazel//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
||||
# TODO(#3579): This is a temporary hack. It gathers up the yaml,
|
||||
# yaml.in, json files in cluster/addons (minus any demos) and overlays
|
||||
# them into kube-addons, where we expect them.
|
||||
# These files are expected in a salt/kube-addons subdirectory.
|
||||
pkg_tar(
|
||||
name = "_salt_kube-addons",
|
||||
package_dir = "salt/kube-addons",
|
||||
strip_prefix = "/cluster/addons",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = [
|
||||
"//cluster/addons",
|
||||
],
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "salt",
|
||||
files = glob(
|
||||
["**"],
|
||||
exclude = ["BUILD"],
|
||||
),
|
||||
mode = "0644",
|
||||
modes = {
|
||||
"install.sh": "0755",
|
||||
},
|
||||
package_dir = "kubernetes/saltbase",
|
||||
strip_prefix = ".",
|
||||
deps = [
|
||||
":_salt_kube-addons",
|
||||
],
|
||||
)
|
||||
|
||||
# The following are used in the kubernetes salt tarball.
|
||||
pkg_tar(
|
||||
name = "salt-manifests",
|
||||
files = [
|
||||
"salt/kube-proxy/kube-proxy.manifest",
|
||||
"salt/kube-registry-proxy/kube-registry-proxy.yaml",
|
||||
],
|
||||
mode = "0644",
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "_kube-admission-controls",
|
||||
files = glob(["salt/kube-admission-controls/limit-range/**"]),
|
||||
mode = "0644",
|
||||
# Maintain limit-range/ subdirectory in tarball
|
||||
strip_prefix = "./salt/kube-admission-controls/",
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "gci-trusty-salt-manifests",
|
||||
files = [
|
||||
"salt/cluster-autoscaler/cluster-autoscaler.manifest",
|
||||
"salt/e2e-image-puller/e2e-image-puller.manifest",
|
||||
"salt/etcd/etcd.manifest",
|
||||
"salt/kube-addons/kube-addon-manager.yaml",
|
||||
"salt/kube-apiserver/abac-authz-policy.jsonl",
|
||||
"salt/kube-apiserver/kube-apiserver.manifest",
|
||||
"salt/kube-controller-manager/kube-controller-manager.manifest",
|
||||
"salt/kube-scheduler/kube-scheduler.manifest",
|
||||
"salt/l7-gcp/glbc.manifest",
|
||||
"salt/rescheduler/rescheduler.manifest",
|
||||
],
|
||||
mode = "0644",
|
||||
deps = [
|
||||
"_kube-admission-controls",
|
||||
],
|
||||
)
|
@ -1,20 +0,0 @@
|
||||
# SaltStack configuration
|
||||
|
||||
This is the root of the SaltStack configuration for Kubernetes. A high
|
||||
level overview for the Kubernetes SaltStack configuration can be found [in the docs tree.](https://kubernetes.io/docs/admin/salt/)
|
||||
|
||||
This SaltStack configuration currently applies to default
|
||||
configurations for Debian-on-GCE, Fedora-on-Vagrant, Ubuntu-on-AWS and
|
||||
Ubuntu-on-Azure. (That doesn't mean it can't be made to apply to an
|
||||
arbitrary configuration, but those are only the in-tree OS/IaaS
|
||||
combinations supported today.) As you peruse the configuration, these
|
||||
are shorthanded as `gce`, `vagrant`, `aws`, `azure-legacy` in `grains.cloud`;
|
||||
the documentation in this tree uses this same shorthand for convenience.
|
||||
|
||||
See more:
|
||||
* [pillar](pillar/)
|
||||
* [reactor](reactor/)
|
||||
* [salt](salt/)
|
||||
|
||||
|
||||
[]()
|
@ -1,109 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script will set up the salt directory on the target server. It takes one
|
||||
# argument that is a tarball with the pre-compiled kubernetes server binaries.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
SALT_ROOT=$(dirname "${BASH_SOURCE}")
|
||||
readonly SALT_ROOT
|
||||
|
||||
readonly KUBE_DOCKER_WRAPPED_BINARIES=(
|
||||
kube-apiserver
|
||||
kube-controller-manager
|
||||
kube-scheduler
|
||||
kube-proxy
|
||||
)
|
||||
|
||||
readonly SERVER_BIN_TAR=${1-}
|
||||
if [[ -z "$SERVER_BIN_TAR" ]]; then
|
||||
echo "!!! No binaries specified"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create a temp dir for untaring
|
||||
KUBE_TEMP=$(mktemp --tmpdir=/srv -d -t kubernetes.XXXXXX)
|
||||
trap 'rm -rf "${KUBE_TEMP}"' EXIT
|
||||
|
||||
# This file is meant to run on the master. It will install the salt configs
|
||||
# into the appropriate place on the master. We do this by creating a new set of
|
||||
# salt trees and then quickly mv'ing them where the old ones were.
|
||||
|
||||
readonly SALTDIRS=(salt pillar reactor)
|
||||
|
||||
echo "+++ Installing salt files into new trees"
|
||||
rm -rf /srv/salt-new
|
||||
mkdir -p /srv/salt-new
|
||||
|
||||
# This bash voodoo will prepend $SALT_ROOT to the start of each item in the
|
||||
# $SALTDIRS array
|
||||
cp -v -R --preserve=mode "${SALTDIRS[@]/#/${SALT_ROOT}/}" /srv/salt-new
|
||||
|
||||
echo "+++ Installing salt overlay files"
|
||||
for dir in "${SALTDIRS[@]}"; do
|
||||
if [[ -d "/srv/salt-overlay/$dir" ]]; then
|
||||
cp -v -R --preserve=mode "/srv/salt-overlay/$dir" "/srv/salt-new/"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "+++ Install binaries from tar: $1"
|
||||
tar -xz -C "${KUBE_TEMP}" -f "$1"
|
||||
mkdir -p /srv/salt-new/salt/kube-bins
|
||||
mkdir -p /srv/salt-new/salt/kube-docs
|
||||
cp -v "${KUBE_TEMP}/kubernetes/server/bin/"* /srv/salt-new/salt/kube-bins/
|
||||
cp -v "${KUBE_TEMP}/kubernetes/LICENSES" /srv/salt-new/salt/kube-docs/
|
||||
cp -v "${KUBE_TEMP}/kubernetes/kubernetes-src.tar.gz" /srv/salt-new/salt/kube-docs/
|
||||
|
||||
kube_bin_dir="/srv/salt-new/salt/kube-bins";
|
||||
docker_images_sls_file="/srv/salt-new/pillar/docker-images.sls";
|
||||
for docker_file in "${KUBE_DOCKER_WRAPPED_BINARIES[@]}"; do
|
||||
docker_tag=$(cat ${kube_bin_dir}/${docker_file}.docker_tag);
|
||||
if [[ ! -z "${KUBE_IMAGE_TAG:-}" ]]; then
|
||||
docker_tag="${KUBE_IMAGE_TAG}"
|
||||
fi
|
||||
sed -i "s/#${docker_file}_docker_tag_value#/${docker_tag}/" "${docker_images_sls_file}";
|
||||
done
|
||||
|
||||
cat <<EOF >>"${docker_images_sls_file}"
|
||||
kube_docker_registry: '$(echo ${KUBE_DOCKER_REGISTRY:-gcr.io/google_containers})'
|
||||
EOF
|
||||
|
||||
# TODO(zmerlynn): Forgive me, this is really gross. But in order to
|
||||
# avoid breaking the non-Salt deployments, which already painfully
|
||||
# have to templatize a couple of the add-ons anyways, manually
|
||||
# templatize the addon registry for regional support. When we get
|
||||
# better templating, we can fix this.
|
||||
readonly kube_addon_registry="${KUBE_ADDON_REGISTRY:-gcr.io/google_containers}"
|
||||
if [[ "${kube_addon_registry}" != "gcr.io/google_containers" ]]; then
|
||||
find /srv/salt-new -name \*.yaml -or -name \*.yaml.in | \
|
||||
xargs sed -ri "s@(image:\s.*)gcr.io/google_containers@\1${kube_addon_registry}@"
|
||||
# All the legacy .manifest files with hardcoded gcr.io are JSON.
|
||||
find /srv/salt-new -name \*.manifest -or -name \*.json | \
|
||||
xargs sed -ri "s@(image\":\s+\")gcr.io/google_containers@\1${kube_addon_registry}@"
|
||||
fi
|
||||
|
||||
echo "+++ Swapping in new configs"
|
||||
for dir in "${SALTDIRS[@]}"; do
|
||||
if [[ -d "/srv/$dir" ]]; then
|
||||
rm -rf "/srv/$dir"
|
||||
fi
|
||||
mv -v "/srv/salt-new/$dir" "/srv/$dir"
|
||||
done
|
||||
|
||||
rm -rf /srv/salt-new
|
@ -1,22 +0,0 @@
|
||||
The
|
||||
[SaltStack pillar](http://docs.saltstack.com/en/latest/topics/pillar/)
|
||||
data is partially statically derived from the contents of this
|
||||
directory. The bulk of the pillars are hard to perceive from browsing
|
||||
this directory, though, because they are written into
|
||||
[cluster-params.sls](cluster-params.sls) at cluster inception.
|
||||
|
||||
* [cluster-params.sls](cluster-params.sls) is generated entirely at cluster inception. See e.g. [configure-vm.sh](../../gce/configure-vm.sh#L262)
|
||||
* [docker-images.sls](docker-images.sls) stores the Docker tags of the current Docker-wrapped server binaries, twiddling by the Salt install script
|
||||
* [logging.sls](logging.sls) defines the cluster log level
|
||||
* [mine.sls](mine.sls): defines the variables shared across machines in the Salt
|
||||
mine. It is starting to be largely deprecated in use, and is totally
|
||||
unavailable on GCE, which runs standalone.
|
||||
* [privilege.sls](privilege.sls) defines whether privileged containers are allowed.
|
||||
* [top.sls](top.sls) defines which pillars are active across the cluster.
|
||||
|
||||
## Future work
|
||||
|
||||
Document the current pillars across providers
|
||||
|
||||
|
||||
[]()
|
@ -1,4 +0,0 @@
|
||||
# This file is meant to be replaced with cluster specific parameters if necessary.
|
||||
|
||||
# Examples:
|
||||
# node_instance_prefix: <base of regex for -minion_regexp to apiserver>
|
@ -1,5 +0,0 @@
|
||||
# This file is populated when kubernetes is built.
|
||||
kube-apiserver_docker_tag: #kube-apiserver_docker_tag_value#
|
||||
kube-controller-manager_docker_tag: #kube-controller-manager_docker_tag_value#
|
||||
kube-scheduler_docker_tag: #kube-scheduler_docker_tag_value#
|
||||
kube-proxy_docker_tag: #kube-proxy_docker_tag_value#
|
@ -1 +0,0 @@
|
||||
log_level: "--v=2"
|
@ -1,12 +0,0 @@
|
||||
{% if grains.cloud is defined and grains.cloud == 'gce' -%}
|
||||
# On GCE, there is no Salt mine. We run standalone.
|
||||
{% else %}
|
||||
# Allow everyone to see cached values of who sits at what IP
|
||||
{% set networkInterfaceName = "eth0" %}
|
||||
{% if grains.networkInterfaceName is defined %}
|
||||
{% set networkInterfaceName = grains.networkInterfaceName %}
|
||||
{% endif %}
|
||||
mine_functions:
|
||||
network.ip_addrs: [{{networkInterfaceName}}]
|
||||
grains.items: []
|
||||
{% endif -%}
|
@ -1,2 +0,0 @@
|
||||
# If true, allow privileged containers to be created by API
|
||||
allow_privileged: true
|
@ -1,9 +0,0 @@
|
||||
{% if grains['oscodename'] in [ 'vivid', 'wily', 'jessie', 'xenial', 'yakkety' ] %}
|
||||
is_systemd: True
|
||||
systemd_system_path: /lib/systemd/system
|
||||
{% elif grains['os_family'] == 'RedHat' %}
|
||||
is_systemd: True
|
||||
systemd_system_path: /usr/lib/systemd/system
|
||||
{% else %}
|
||||
is_systemd: False
|
||||
{% endif %}
|
@ -1,8 +0,0 @@
|
||||
base:
|
||||
'*':
|
||||
- mine
|
||||
- cluster-params
|
||||
- logging
|
||||
- docker-images
|
||||
- privilege
|
||||
- systemd
|
@ -1,6 +0,0 @@
|
||||
[SaltStack reactor](http://docs.saltstack.com/en/latest/topics/reactor/) files, largely defining reactions to new nodes.
|
||||
|
||||
**Ignored for GCE, which runs standalone on each machine**
|
||||
|
||||
|
||||
[]()
|
@ -1,10 +0,0 @@
|
||||
# This runs highstate on the master node(s).
|
||||
#
|
||||
# Some of the cluster deployment scripts pass the list of minion addresses to
|
||||
# the apiserver as a command line argument. This list needs to be updated if a
|
||||
# new minion is started, so run highstate on the master(s) when this happens.
|
||||
#
|
||||
highstate_master:
|
||||
cmd.state.highstate:
|
||||
- tgt: 'roles:kubernetes-master'
|
||||
- expr_form: grain
|
@ -1,10 +0,0 @@
|
||||
# This runs highstate on the minion nodes.
|
||||
#
|
||||
# Some of the cluster deployment scripts use the list of minions on the minions
|
||||
# themselves. To propagate changes throughout
|
||||
# the pool, run highstate on all minions whenever a single minion starts.
|
||||
#
|
||||
highstate_minions:
|
||||
cmd.state.highstate:
|
||||
- tgt: 'roles:kubernetes-pool'
|
||||
- expr_form: grain
|
@ -1,4 +0,0 @@
|
||||
# This runs highstate only on the NEW node, regardless of type.
|
||||
highstate_new:
|
||||
cmd.state.highstate:
|
||||
- tgt: {{ data['id'] }}
|
@ -1,29 +0,0 @@
|
||||
This directory forms the base of the main SaltStack configuration. The
|
||||
place to start with any SaltStack configuration is
|
||||
[top.sls](top.sls). However, unless you are particularly keen on
|
||||
reading Jinja templates, the following tables break down what
|
||||
configurations run on what providers. (NB: The [_states](_states/)
|
||||
directory is a special directory included by Salt for `ensure` blocks,
|
||||
and is only used for the [docker](docker/) config.)
|
||||
|
||||
Key: M = Config applies to master, n = config applies to nodes
|
||||
|
||||
Config | GCE | Vagrant | AWS | Azure
|
||||
----------------------------------------------------|-------|---------|-----|------
|
||||
[debian-auto-upgrades](debian-auto-upgrades/) | M n | M n | M n | M n
|
||||
[docker](docker/) | M n | M n | M n | M n
|
||||
[etcd](etcd/) | M | M | M | M
|
||||
[generate-cert](generate-cert/) | M | M | M | M
|
||||
[kube-addons](kube-addons/) | M | M | M | M
|
||||
[kube-apiserver](kube-apiserver/) | M | M | M | M
|
||||
[kube-controller-manager](kube-controller-manager/) | M | M | M | M
|
||||
[kube-proxy](kube-proxy/) | n | n | n | n
|
||||
[kube-scheduler](kube-scheduler/) | M | M | M | M
|
||||
[kubelet](kubelet/) | M n | M n | M n | M n
|
||||
[logrotate](logrotate/) | M n | n | M n | M n
|
||||
[supervisord](supervisor/) | M n | M n | M n | M n
|
||||
[base](base.sls) | M n | M n | M n | M n
|
||||
[kube-client-tools](kube-client-tools.sls) | M | M | M | M
|
||||
|
||||
|
||||
[]()
|
@ -1,60 +0,0 @@
|
||||
pkg-core:
|
||||
pkg.installed:
|
||||
- names:
|
||||
- curl
|
||||
- ebtables
|
||||
{% if grains['os_family'] == 'RedHat' %}
|
||||
- python
|
||||
- git
|
||||
- socat
|
||||
{% else %}
|
||||
- apt-transport-https
|
||||
- python-apt
|
||||
- nfs-common
|
||||
- socat
|
||||
{% endif %}
|
||||
# Ubuntu installs netcat-openbsd by default, but on GCE/Debian netcat-traditional is installed.
|
||||
# They behave slightly differently.
|
||||
# For sanity, we try to make sure we have the same netcat on all OSes (#15166)
|
||||
{% if grains['os'] == 'Ubuntu' %}
|
||||
- netcat-traditional
|
||||
{% endif %}
|
||||
# Make sure git is installed for mounting git volumes
|
||||
{% if grains['os'] == 'Ubuntu' %}
|
||||
- git
|
||||
{% endif %}
|
||||
|
||||
# Fix ARP cache issues on AWS by setting net.ipv4.neigh.default.gc_thresh1=0
|
||||
# See issue #23395
|
||||
{% if grains.get('cloud') == 'aws' %}
|
||||
# Work around Salt #18089: https://github.com/saltstack/salt/issues/18089
|
||||
# (we also have to give it a different id from the same fix elsewhere)
|
||||
99-salt-conf-with-a-different-id:
|
||||
file.touch:
|
||||
- name: /etc/sysctl.d/99-salt.conf
|
||||
|
||||
net.ipv4.neigh.default.gc_thresh1:
|
||||
sysctl.present:
|
||||
- value: 0
|
||||
{% endif %}
|
||||
|
||||
/usr/local/share/doc/kubernetes:
|
||||
file.directory:
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
- makedirs: True
|
||||
|
||||
/usr/local/share/doc/kubernetes/LICENSES:
|
||||
file.managed:
|
||||
- source: salt://kube-docs/LICENSES
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
|
||||
/usr/local/share/doc/kubernetes/kubernetes-src.tar.gz:
|
||||
file.managed:
|
||||
- source: salt://kube-docs/kubernetes-src.tar.gz
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
@ -1,6 +0,0 @@
|
||||
approvers:
|
||||
- bowei
|
||||
- dnardo
|
||||
reviewers:
|
||||
- bowei
|
||||
- dnardo
|
@ -1,9 +0,0 @@
|
||||
{% if pillar.get('network_policy_provider', '').lower() == 'calico' %}
|
||||
|
||||
ip6_tables:
|
||||
kmod.present
|
||||
|
||||
xt_set:
|
||||
kmod.present
|
||||
|
||||
{% endif -%}
|
@ -1,12 +0,0 @@
|
||||
reviewers:
|
||||
- mwielgus
|
||||
- jszczepkowski
|
||||
- MaciekPytel
|
||||
- aleksandra-malinowska
|
||||
- bskiba
|
||||
approvers:
|
||||
- mwielgus
|
||||
- jszczepkowski
|
||||
- MaciekPytel
|
||||
- aleksandra-malinowska
|
||||
- bskiba
|
@ -1,25 +0,0 @@
|
||||
# Copy autoscaler manifest to manifests folder for master.
|
||||
# The ordering of salt states for service docker, kubelet and
|
||||
# master-addon below is very important to avoid the race between
|
||||
# salt restart docker or kubelet and kubelet start master components.
|
||||
# Please see http://issue.k8s.io/10122#issuecomment-114566063
|
||||
# for detail explanation on this very issue.
|
||||
|
||||
/etc/kubernetes/manifests/cluster-autoscaler.manifest:
|
||||
file.managed:
|
||||
- source: salt://cluster-autoscaler/cluster-autoscaler.manifest
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
- dir_mode: 755
|
||||
- require:
|
||||
- service: docker
|
||||
- service: kubelet
|
||||
|
||||
/var/log/cluster-autoscaler.log:
|
||||
file.managed:
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
@ -1,8 +0,0 @@
|
||||
approvers:
|
||||
- bowei
|
||||
- dnardo
|
||||
- freehan
|
||||
reviewers:
|
||||
- bowei
|
||||
- dnardo
|
||||
- freehan
|
@ -1,41 +0,0 @@
|
||||
/home/kubernetes:
|
||||
file.directory:
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
- makedirs: True
|
||||
|
||||
/etc/cni/net.d:
|
||||
file.directory:
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
- makedirs: True
|
||||
|
||||
# These are all available CNI network plugins.
|
||||
cni-tar:
|
||||
archive:
|
||||
- extracted
|
||||
- user: root
|
||||
- name: /home/kubernetes/bin
|
||||
- makedirs: True
|
||||
- source: https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-amd64-v0.6.0.tgz
|
||||
- tar_options: v
|
||||
- source_hash: md5=9534876FAE7DBE813CDAB404DC1F9219
|
||||
- archive_format: tar
|
||||
- if_missing: /home/kubernetes/bin
|
||||
|
||||
{% if grains['cloud'] is defined and grains.cloud in [ 'vagrant' ] %}
|
||||
# Install local CNI network plugins in a Vagrant environment
|
||||
cmd-local-cni-plugins:
|
||||
cmd.run:
|
||||
- name: |
|
||||
cp -v /vagrant/cluster/network-plugins/cni/bin/* /home/kubernetes/bin/.
|
||||
chmod +x /home/kubernetes/bin/*
|
||||
cmd-local-cni-config:
|
||||
cmd.run:
|
||||
- name: |
|
||||
cp -v /vagrant/cluster/network-plugins/cni/config/* /etc/cni/net.d/.
|
||||
chown root:root /etc/cni/net.d/*
|
||||
chmod 744 /etc/cni/net.d/*
|
||||
{% endif -%}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user