Merge branch 'master' of github.com:kubernetes/kubernetes

This commit is contained in:
Yinan Li
2017-08-09 08:46:17 -07:00
1175 changed files with 28897 additions and 10175 deletions

View File

@@ -1,45 +1,52 @@
<!-- BEGIN MUNGE: GENERATED_TOC -->
- [v1.7.3](#v173)
- [Downloads for v1.7.3](#downloads-for-v173)
- [v1.6.8](#v168)
- [Downloads for v1.6.8](#downloads-for-v168)
- [Client Binaries](#client-binaries)
- [Server Binaries](#server-binaries)
- [Node Binaries](#node-binaries)
- [Changelog since v1.7.2](#changelog-since-v172)
- [Changelog since v1.6.7](#changelog-since-v167)
- [Other notable changes](#other-notable-changes)
- [v1.7.2](#v172)
- [Downloads for v1.7.2](#downloads-for-v172)
- [v1.7.3](#v173)
- [Downloads for v1.7.3](#downloads-for-v173)
- [Client Binaries](#client-binaries-1)
- [Server Binaries](#server-binaries-1)
- [Node Binaries](#node-binaries-1)
- [Changelog since v1.7.1](#changelog-since-v171)
- [Changelog since v1.7.2](#changelog-since-v172)
- [Other notable changes](#other-notable-changes-1)
- [v1.7.1](#v171)
- [Downloads for v1.7.1](#downloads-for-v171)
- [v1.7.2](#v172)
- [Downloads for v1.7.2](#downloads-for-v172)
- [Client Binaries](#client-binaries-2)
- [Server Binaries](#server-binaries-2)
- [Node Binaries](#node-binaries-2)
- [Changelog since v1.7.0](#changelog-since-v170)
- [Changelog since v1.7.1](#changelog-since-v171)
- [Other notable changes](#other-notable-changes-2)
- [v1.8.0-alpha.2](#v180-alpha2)
- [Downloads for v1.8.0-alpha.2](#downloads-for-v180-alpha2)
- [v1.7.1](#v171)
- [Downloads for v1.7.1](#downloads-for-v171)
- [Client Binaries](#client-binaries-3)
- [Server Binaries](#server-binaries-3)
- [Node Binaries](#node-binaries-3)
- [Changelog since v1.7.0](#changelog-since-v170-1)
- [Action Required](#action-required)
- [Changelog since v1.7.0](#changelog-since-v170)
- [Other notable changes](#other-notable-changes-3)
- [v1.6.7](#v167)
- [Downloads for v1.6.7](#downloads-for-v167)
- [v1.8.0-alpha.2](#v180-alpha2)
- [Downloads for v1.8.0-alpha.2](#downloads-for-v180-alpha2)
- [Client Binaries](#client-binaries-4)
- [Server Binaries](#server-binaries-4)
- [Node Binaries](#node-binaries-4)
- [Changelog since v1.6.6](#changelog-since-v166)
- [Changelog since v1.7.0](#changelog-since-v170-1)
- [Action Required](#action-required)
- [Other notable changes](#other-notable-changes-4)
- [v1.7.0](#v170)
- [Downloads for v1.7.0](#downloads-for-v170)
- [v1.6.7](#v167)
- [Downloads for v1.6.7](#downloads-for-v167)
- [Client Binaries](#client-binaries-5)
- [Server Binaries](#server-binaries-5)
- [Node Binaries](#node-binaries-5)
- [Changelog since v1.6.6](#changelog-since-v166)
- [Other notable changes](#other-notable-changes-5)
- [v1.7.0](#v170)
- [Downloads for v1.7.0](#downloads-for-v170)
- [Client Binaries](#client-binaries-6)
- [Server Binaries](#server-binaries-6)
- [Node Binaries](#node-binaries-6)
- [**Major Themes**](#major-themes)
- [**Action Required Before Upgrading**](#action-required-before-upgrading)
- [Network](#network)
@@ -95,7 +102,7 @@
- [Local Storage](#local-storage)
- [Volume Plugins](#volume-plugins)
- [Metrics](#metrics)
- [**Other notable changes**](#other-notable-changes-5)
- [**Other notable changes**](#other-notable-changes-6)
- [Admission plugin](#admission-plugin)
- [API Machinery](#api-machinery-1)
- [Application autoscaling](#application-autoscaling-1)
@@ -123,127 +130,127 @@
- [Previous Releases Included in v1.7.0](#previous-releases-included-in-v170)
- [v1.7.0-rc.1](#v170-rc1)
- [Downloads for v1.7.0-rc.1](#downloads-for-v170-rc1)
- [Client Binaries](#client-binaries-6)
- [Server Binaries](#server-binaries-6)
- [Node Binaries](#node-binaries-6)
- [Changelog since v1.7.0-beta.2](#changelog-since-v170-beta2)
- [Action Required](#action-required-1)
- [Other notable changes](#other-notable-changes-6)
- [v1.8.0-alpha.1](#v180-alpha1)
- [Downloads for v1.8.0-alpha.1](#downloads-for-v180-alpha1)
- [Client Binaries](#client-binaries-7)
- [Server Binaries](#server-binaries-7)
- [Node Binaries](#node-binaries-7)
- [Changelog since v1.7.0-alpha.4](#changelog-since-v170-alpha4)
- [Action Required](#action-required-2)
- [Changelog since v1.7.0-beta.2](#changelog-since-v170-beta2)
- [Action Required](#action-required-1)
- [Other notable changes](#other-notable-changes-7)
- [v1.6.6](#v166)
- [Downloads for v1.6.6](#downloads-for-v166)
- [v1.8.0-alpha.1](#v180-alpha1)
- [Downloads for v1.8.0-alpha.1](#downloads-for-v180-alpha1)
- [Client Binaries](#client-binaries-8)
- [Server Binaries](#server-binaries-8)
- [Node Binaries](#node-binaries-8)
- [Changelog since v1.6.5](#changelog-since-v165)
- [Action Required](#action-required-3)
- [Changelog since v1.7.0-alpha.4](#changelog-since-v170-alpha4)
- [Action Required](#action-required-2)
- [Other notable changes](#other-notable-changes-8)
- [v1.7.0-beta.2](#v170-beta2)
- [Downloads for v1.7.0-beta.2](#downloads-for-v170-beta2)
- [v1.6.6](#v166)
- [Downloads for v1.6.6](#downloads-for-v166)
- [Client Binaries](#client-binaries-9)
- [Server Binaries](#server-binaries-9)
- [Node Binaries](#node-binaries-9)
- [Changelog since v1.7.0-beta.1](#changelog-since-v170-beta1)
- [Action Required](#action-required-4)
- [Changelog since v1.6.5](#changelog-since-v165)
- [Action Required](#action-required-3)
- [Other notable changes](#other-notable-changes-9)
- [v1.6.5](#v165)
- [Known Issues for v1.6.5](#known-issues-for-v165)
- [Downloads for v1.6.5](#downloads-for-v165)
- [v1.7.0-beta.2](#v170-beta2)
- [Downloads for v1.7.0-beta.2](#downloads-for-v170-beta2)
- [Client Binaries](#client-binaries-10)
- [Server Binaries](#server-binaries-10)
- [Node Binaries](#node-binaries-10)
- [Changelog since v1.6.4](#changelog-since-v164)
- [Changelog since v1.7.0-beta.1](#changelog-since-v170-beta1)
- [Action Required](#action-required-4)
- [Other notable changes](#other-notable-changes-10)
- [v1.7.0-beta.1](#v170-beta1)
- [Downloads for v1.7.0-beta.1](#downloads-for-v170-beta1)
- [v1.6.5](#v165)
- [Known Issues for v1.6.5](#known-issues-for-v165)
- [Downloads for v1.6.5](#downloads-for-v165)
- [Client Binaries](#client-binaries-11)
- [Server Binaries](#server-binaries-11)
- [Node Binaries](#node-binaries-11)
- [Changelog since v1.7.0-alpha.4](#changelog-since-v170-alpha4-1)
- [Action Required](#action-required-5)
- [Changelog since v1.6.4](#changelog-since-v164)
- [Other notable changes](#other-notable-changes-11)
- [v1.6.4](#v164)
- [Known Issues for v1.6.4](#known-issues-for-v164)
- [Downloads for v1.6.4](#downloads-for-v164)
- [v1.7.0-beta.1](#v170-beta1)
- [Downloads for v1.7.0-beta.1](#downloads-for-v170-beta1)
- [Client Binaries](#client-binaries-12)
- [Server Binaries](#server-binaries-12)
- [Node Binaries](#node-binaries-12)
- [Changelog since v1.6.3](#changelog-since-v163)
- [Changelog since v1.7.0-alpha.4](#changelog-since-v170-alpha4-1)
- [Action Required](#action-required-5)
- [Other notable changes](#other-notable-changes-12)
- [v1.7.0-alpha.4](#v170-alpha4)
- [Downloads for v1.7.0-alpha.4](#downloads-for-v170-alpha4)
- [v1.6.4](#v164)
- [Known Issues for v1.6.4](#known-issues-for-v164)
- [Downloads for v1.6.4](#downloads-for-v164)
- [Client Binaries](#client-binaries-13)
- [Server Binaries](#server-binaries-13)
- [Node Binaries](#node-binaries-13)
- [Changelog since v1.7.0-alpha.3](#changelog-since-v170-alpha3)
- [Action Required](#action-required-6)
- [Changelog since v1.6.3](#changelog-since-v163)
- [Other notable changes](#other-notable-changes-13)
- [v1.6.3](#v163)
- [Known Issues for v1.6.3](#known-issues-for-v163)
- [Downloads for v1.6.3](#downloads-for-v163)
- [v1.7.0-alpha.4](#v170-alpha4)
- [Downloads for v1.7.0-alpha.4](#downloads-for-v170-alpha4)
- [Client Binaries](#client-binaries-14)
- [Server Binaries](#server-binaries-14)
- [Node Binaries](#node-binaries-14)
- [Changelog since v1.6.2](#changelog-since-v162)
- [Changelog since v1.7.0-alpha.3](#changelog-since-v170-alpha3)
- [Action Required](#action-required-6)
- [Other notable changes](#other-notable-changes-14)
- [v1.7.0-alpha.3](#v170-alpha3)
- [Downloads for v1.7.0-alpha.3](#downloads-for-v170-alpha3)
- [v1.6.3](#v163)
- [Known Issues for v1.6.3](#known-issues-for-v163)
- [Downloads for v1.6.3](#downloads-for-v163)
- [Client Binaries](#client-binaries-15)
- [Server Binaries](#server-binaries-15)
- [Node Binaries](#node-binaries-15)
- [Changelog since v1.7.0-alpha.2](#changelog-since-v170-alpha2)
- [Action Required](#action-required-7)
- [Changelog since v1.6.2](#changelog-since-v162)
- [Other notable changes](#other-notable-changes-15)
- [v1.5.7](#v157)
- [Downloads for v1.5.7](#downloads-for-v157)
- [v1.7.0-alpha.3](#v170-alpha3)
- [Downloads for v1.7.0-alpha.3](#downloads-for-v170-alpha3)
- [Client Binaries](#client-binaries-16)
- [Server Binaries](#server-binaries-16)
- [Node Binaries](#node-binaries-16)
- [Changelog since v1.5.6](#changelog-since-v156)
- [Changelog since v1.7.0-alpha.2](#changelog-since-v170-alpha2)
- [Action Required](#action-required-7)
- [Other notable changes](#other-notable-changes-16)
- [v1.4.12](#v1412)
- [Downloads for v1.4.12](#downloads-for-v1412)
- [v1.5.7](#v157)
- [Downloads for v1.5.7](#downloads-for-v157)
- [Client Binaries](#client-binaries-17)
- [Server Binaries](#server-binaries-17)
- [Node Binaries](#node-binaries-17)
- [Changelog since v1.4.9](#changelog-since-v149)
- [Changelog since v1.5.6](#changelog-since-v156)
- [Other notable changes](#other-notable-changes-17)
- [v1.7.0-alpha.2](#v170-alpha2)
- [Downloads for v1.7.0-alpha.2](#downloads-for-v170-alpha2)
- [v1.4.12](#v1412)
- [Downloads for v1.4.12](#downloads-for-v1412)
- [Client Binaries](#client-binaries-18)
- [Server Binaries](#server-binaries-18)
- [Changelog since v1.7.0-alpha.1](#changelog-since-v170-alpha1)
- [Action Required](#action-required-8)
- [Node Binaries](#node-binaries-18)
- [Changelog since v1.4.9](#changelog-since-v149)
- [Other notable changes](#other-notable-changes-18)
- [v1.6.2](#v162)
- [Downloads for v1.6.2](#downloads-for-v162)
- [v1.7.0-alpha.2](#v170-alpha2)
- [Downloads for v1.7.0-alpha.2](#downloads-for-v170-alpha2)
- [Client Binaries](#client-binaries-19)
- [Server Binaries](#server-binaries-19)
- [Changelog since v1.6.1](#changelog-since-v161)
- [Changelog since v1.7.0-alpha.1](#changelog-since-v170-alpha1)
- [Action Required](#action-required-8)
- [Other notable changes](#other-notable-changes-19)
- [v1.7.0-alpha.1](#v170-alpha1)
- [Downloads for v1.7.0-alpha.1](#downloads-for-v170-alpha1)
- [v1.6.2](#v162)
- [Downloads for v1.6.2](#downloads-for-v162)
- [Client Binaries](#client-binaries-20)
- [Server Binaries](#server-binaries-20)
- [Changelog since v1.6.0](#changelog-since-v160)
- [Changelog since v1.6.1](#changelog-since-v161)
- [Other notable changes](#other-notable-changes-20)
- [v1.6.1](#v161)
- [Downloads for v1.6.1](#downloads-for-v161)
- [v1.7.0-alpha.1](#v170-alpha1)
- [Downloads for v1.7.0-alpha.1](#downloads-for-v170-alpha1)
- [Client Binaries](#client-binaries-21)
- [Server Binaries](#server-binaries-21)
- [Changelog since v1.6.0](#changelog-since-v160-1)
- [Changelog since v1.6.0](#changelog-since-v160)
- [Other notable changes](#other-notable-changes-21)
- [v1.6.0](#v160)
- [Downloads for v1.6.0](#downloads-for-v160)
- [v1.6.1](#v161)
- [Downloads for v1.6.1](#downloads-for-v161)
- [Client Binaries](#client-binaries-22)
- [Server Binaries](#server-binaries-22)
- [Changelog since v1.6.0](#changelog-since-v160-1)
- [Other notable changes](#other-notable-changes-22)
- [v1.6.0](#v160)
- [Downloads for v1.6.0](#downloads-for-v160)
- [Client Binaries](#client-binaries-23)
- [Server Binaries](#server-binaries-23)
- [WARNING: etcd backup strongly recommended](#warning:-etcd-backup-strongly-recommended)
- [Major updates and release themes](#major-updates-and-release-themes)
- [Action Required](#action-required-9)
@@ -310,7 +317,7 @@
- [vSphere](#vsphere)
- [Federation](#federation-2)
- [kubefed](#kubefed-1)
- [Other Notable Changes](#other-notable-changes-22)
- [Other Notable Changes](#other-notable-changes-23)
- [Garbage Collector](#garbage-collector)
- [kubeadm](#kubeadm-4)
- [kubectl](#kubectl-1)
@@ -320,7 +327,7 @@
- [Updates to apply](#updates-to-apply)
- [Updates to edit](#updates-to-edit)
- [Bug fixes](#bug-fixes)
- [Other Notable Changes](#other-notable-changes-23)
- [Other Notable Changes](#other-notable-changes-24)
- [Node Components](#node-components-4)
- [Bug fixes](#bug-fixes-1)
- [kube-controller-manager](#kube-controller-manager)
@@ -333,7 +340,7 @@
- [Photon](#photon)
- [rbd](#rbd)
- [vSphere](#vsphere-1)
- [Other Notable Changes](#other-notable-changes-24)
- [Other Notable Changes](#other-notable-changes-25)
- [Changes to Cluster Provisioning Scripts](#changes-to-cluster-provisioning-scripts)
- [AWS](#aws-1)
- [Juju](#juju)
@@ -341,7 +348,7 @@
- [GCE](#gce-1)
- [OpenStack](#openstack)
- [Container Images](#container-images)
- [Other Notable Changes](#other-notable-changes-25)
- [Other Notable Changes](#other-notable-changes-26)
- [Changes to Addons](#changes-to-addons)
- [Dashboard](#dashboard)
- [DNS](#dns)
@@ -357,108 +364,108 @@
- [Previous Releases Included in v1.6.0](#previous-releases-included-in-v160)
- [v1.5.6](#v156)
- [Downloads for v1.5.6](#downloads-for-v156)
- [Client Binaries](#client-binaries-23)
- [Server Binaries](#server-binaries-23)
- [Changelog since v1.5.5](#changelog-since-v155)
- [Other notable changes](#other-notable-changes-26)
- [v1.6.0-rc.1](#v160-rc1)
- [Downloads for v1.6.0-rc.1](#downloads-for-v160-rc1)
- [Client Binaries](#client-binaries-24)
- [Server Binaries](#server-binaries-24)
- [Changelog since v1.6.0-beta.4](#changelog-since-v160-beta4)
- [Changelog since v1.5.5](#changelog-since-v155)
- [Other notable changes](#other-notable-changes-27)
- [v1.5.5](#v155)
- [Downloads for v1.5.5](#downloads-for-v155)
- [v1.6.0-rc.1](#v160-rc1)
- [Downloads for v1.6.0-rc.1](#downloads-for-v160-rc1)
- [Client Binaries](#client-binaries-25)
- [Server Binaries](#server-binaries-25)
- [Changelog since v1.6.0-beta.4](#changelog-since-v160-beta4)
- [Other notable changes](#other-notable-changes-28)
- [v1.5.5](#v155)
- [Downloads for v1.5.5](#downloads-for-v155)
- [Client Binaries](#client-binaries-26)
- [Server Binaries](#server-binaries-26)
- [Changelog since v1.5.4](#changelog-since-v154)
- [v1.6.0-beta.4](#v160-beta4)
- [Downloads for v1.6.0-beta.4](#downloads-for-v160-beta4)
- [Client Binaries](#client-binaries-26)
- [Server Binaries](#server-binaries-26)
- [Changelog since v1.6.0-beta.3](#changelog-since-v160-beta3)
- [Other notable changes](#other-notable-changes-28)
- [v1.6.0-beta.3](#v160-beta3)
- [Downloads for v1.6.0-beta.3](#downloads-for-v160-beta3)
- [Client Binaries](#client-binaries-27)
- [Server Binaries](#server-binaries-27)
- [Changelog since v1.6.0-beta.2](#changelog-since-v160-beta2)
- [Changelog since v1.6.0-beta.3](#changelog-since-v160-beta3)
- [Other notable changes](#other-notable-changes-29)
- [v1.6.0-beta.2](#v160-beta2)
- [Downloads for v1.6.0-beta.2](#downloads-for-v160-beta2)
- [v1.6.0-beta.3](#v160-beta3)
- [Downloads for v1.6.0-beta.3](#downloads-for-v160-beta3)
- [Client Binaries](#client-binaries-28)
- [Server Binaries](#server-binaries-28)
- [Changelog since v1.6.0-beta.1](#changelog-since-v160-beta1)
- [Action Required](#action-required-10)
- [Changelog since v1.6.0-beta.2](#changelog-since-v160-beta2)
- [Other notable changes](#other-notable-changes-30)
- [v1.5.4](#v154)
- [Downloads for v1.5.4](#downloads-for-v154)
- [v1.6.0-beta.2](#v160-beta2)
- [Downloads for v1.6.0-beta.2](#downloads-for-v160-beta2)
- [Client Binaries](#client-binaries-29)
- [Server Binaries](#server-binaries-29)
- [Changelog since v1.5.3](#changelog-since-v153)
- [Changelog since v1.6.0-beta.1](#changelog-since-v160-beta1)
- [Action Required](#action-required-10)
- [Other notable changes](#other-notable-changes-31)
- [v1.6.0-beta.1](#v160-beta1)
- [Downloads for v1.6.0-beta.1](#downloads-for-v160-beta1)
- [v1.5.4](#v154)
- [Downloads for v1.5.4](#downloads-for-v154)
- [Client Binaries](#client-binaries-30)
- [Server Binaries](#server-binaries-30)
- [Changelog since v1.6.0-alpha.3](#changelog-since-v160-alpha3)
- [Action Required](#action-required-11)
- [Changelog since v1.5.3](#changelog-since-v153)
- [Other notable changes](#other-notable-changes-32)
- [v1.6.0-alpha.3](#v160-alpha3)
- [Downloads for v1.6.0-alpha.3](#downloads-for-v160-alpha3)
- [v1.6.0-beta.1](#v160-beta1)
- [Downloads for v1.6.0-beta.1](#downloads-for-v160-beta1)
- [Client Binaries](#client-binaries-31)
- [Server Binaries](#server-binaries-31)
- [Changelog since v1.6.0-alpha.2](#changelog-since-v160-alpha2)
- [Changelog since v1.6.0-alpha.3](#changelog-since-v160-alpha3)
- [Action Required](#action-required-11)
- [Other notable changes](#other-notable-changes-33)
- [v1.4.9](#v149)
- [Downloads for v1.4.9](#downloads-for-v149)
- [v1.6.0-alpha.3](#v160-alpha3)
- [Downloads for v1.6.0-alpha.3](#downloads-for-v160-alpha3)
- [Client Binaries](#client-binaries-32)
- [Server Binaries](#server-binaries-32)
- [Changelog since v1.4.8](#changelog-since-v148)
- [Changelog since v1.6.0-alpha.2](#changelog-since-v160-alpha2)
- [Other notable changes](#other-notable-changes-34)
- [v1.5.3](#v153)
- [Downloads for v1.5.3](#downloads-for-v153)
- [v1.4.9](#v149)
- [Downloads for v1.4.9](#downloads-for-v149)
- [Client Binaries](#client-binaries-33)
- [Server Binaries](#server-binaries-33)
- [Node Binaries](#node-binaries-18)
- [Changelog since v1.5.2](#changelog-since-v152)
- [Changelog since v1.4.8](#changelog-since-v148)
- [Other notable changes](#other-notable-changes-35)
- [v1.6.0-alpha.2](#v160-alpha2)
- [Downloads for v1.6.0-alpha.2](#downloads-for-v160-alpha2)
- [v1.5.3](#v153)
- [Downloads for v1.5.3](#downloads-for-v153)
- [Client Binaries](#client-binaries-34)
- [Server Binaries](#server-binaries-34)
- [Changelog since v1.6.0-alpha.1](#changelog-since-v160-alpha1)
- [Node Binaries](#node-binaries-19)
- [Changelog since v1.5.2](#changelog-since-v152)
- [Other notable changes](#other-notable-changes-36)
- [v1.6.0-alpha.1](#v160-alpha1)
- [Downloads for v1.6.0-alpha.1](#downloads-for-v160-alpha1)
- [v1.6.0-alpha.2](#v160-alpha2)
- [Downloads for v1.6.0-alpha.2](#downloads-for-v160-alpha2)
- [Client Binaries](#client-binaries-35)
- [Server Binaries](#server-binaries-35)
- [Changelog since v1.5.0](#changelog-since-v150)
- [Action Required](#action-required-12)
- [Changelog since v1.6.0-alpha.1](#changelog-since-v160-alpha1)
- [Other notable changes](#other-notable-changes-37)
- [v1.5.2](#v152)
- [Downloads for v1.5.2](#downloads-for-v152)
- [v1.6.0-alpha.1](#v160-alpha1)
- [Downloads for v1.6.0-alpha.1](#downloads-for-v160-alpha1)
- [Client Binaries](#client-binaries-36)
- [Server Binaries](#server-binaries-36)
- [Changelog since v1.5.1](#changelog-since-v151)
- [Changelog since v1.5.0](#changelog-since-v150)
- [Action Required](#action-required-12)
- [Other notable changes](#other-notable-changes-38)
- [v1.4.8](#v148)
- [Downloads for v1.4.8](#downloads-for-v148)
- [v1.5.2](#v152)
- [Downloads for v1.5.2](#downloads-for-v152)
- [Client Binaries](#client-binaries-37)
- [Server Binaries](#server-binaries-37)
- [Changelog since v1.4.7](#changelog-since-v147)
- [Changelog since v1.5.1](#changelog-since-v151)
- [Other notable changes](#other-notable-changes-39)
- [v1.5.1](#v151)
- [Downloads for v1.5.1](#downloads-for-v151)
- [v1.4.8](#v148)
- [Downloads for v1.4.8](#downloads-for-v148)
- [Client Binaries](#client-binaries-38)
- [Server Binaries](#server-binaries-38)
- [Changelog since v1.5.0](#changelog-since-v150-1)
- [Changelog since v1.4.7](#changelog-since-v147)
- [Other notable changes](#other-notable-changes-40)
- [v1.5.1](#v151)
- [Downloads for v1.5.1](#downloads-for-v151)
- [Client Binaries](#client-binaries-39)
- [Server Binaries](#server-binaries-39)
- [Changelog since v1.5.0](#changelog-since-v150-1)
- [Other notable changes](#other-notable-changes-41)
- [Known Issues for v1.5.1](#known-issues-for-v151)
- [v1.5.0](#v150)
- [Downloads for v1.5.0](#downloads-for-v150)
- [Client Binaries](#client-binaries-39)
- [Server Binaries](#server-binaries-39)
- [Client Binaries](#client-binaries-40)
- [Server Binaries](#server-binaries-40)
- [Major Themes](#major-themes-1)
- [Features](#features)
- [Known Issues](#known-issues-1)
@@ -467,103 +474,103 @@
- [Action Required Before Upgrading](#action-required-before-upgrading-1)
- [External Dependency Version Information](#external-dependency-version-information-2)
- [Changelog since v1.5.0-beta.3](#changelog-since-v150-beta3)
- [Other notable changes](#other-notable-changes-41)
- [Other notable changes](#other-notable-changes-42)
- [Previous Releases Included in v1.5.0](#previous-releases-included-in-v150)
- [v1.4.7](#v147)
- [Downloads for v1.4.7](#downloads-for-v147)
- [Client Binaries](#client-binaries-40)
- [Server Binaries](#server-binaries-40)
- [Changelog since v1.4.6](#changelog-since-v146)
- [Other notable changes](#other-notable-changes-42)
- [v1.5.0-beta.3](#v150-beta3)
- [Downloads for v1.5.0-beta.3](#downloads-for-v150-beta3)
- [Client Binaries](#client-binaries-41)
- [Server Binaries](#server-binaries-41)
- [Changelog since v1.5.0-beta.2](#changelog-since-v150-beta2)
- [Changelog since v1.4.6](#changelog-since-v146)
- [Other notable changes](#other-notable-changes-43)
- [v1.5.0-beta.2](#v150-beta2)
- [Downloads for v1.5.0-beta.2](#downloads-for-v150-beta2)
- [v1.5.0-beta.3](#v150-beta3)
- [Downloads for v1.5.0-beta.3](#downloads-for-v150-beta3)
- [Client Binaries](#client-binaries-42)
- [Server Binaries](#server-binaries-42)
- [Changelog since v1.5.0-beta.1](#changelog-since-v150-beta1)
- [Changelog since v1.5.0-beta.2](#changelog-since-v150-beta2)
- [Other notable changes](#other-notable-changes-44)
- [v1.5.0-beta.1](#v150-beta1)
- [Downloads for v1.5.0-beta.1](#downloads-for-v150-beta1)
- [v1.5.0-beta.2](#v150-beta2)
- [Downloads for v1.5.0-beta.2](#downloads-for-v150-beta2)
- [Client Binaries](#client-binaries-43)
- [Server Binaries](#server-binaries-43)
- [Changelog since v1.5.0-alpha.2](#changelog-since-v150-alpha2)
- [Action Required](#action-required-13)
- [Changelog since v1.5.0-beta.1](#changelog-since-v150-beta1)
- [Other notable changes](#other-notable-changes-45)
- [v1.4.6](#v146)
- [Downloads for v1.4.6](#downloads-for-v146)
- [v1.5.0-beta.1](#v150-beta1)
- [Downloads for v1.5.0-beta.1](#downloads-for-v150-beta1)
- [Client Binaries](#client-binaries-44)
- [Server Binaries](#server-binaries-44)
- [Changelog since v1.4.5](#changelog-since-v145)
- [Changelog since v1.5.0-alpha.2](#changelog-since-v150-alpha2)
- [Action Required](#action-required-13)
- [Other notable changes](#other-notable-changes-46)
- [v1.3.10](#v1310)
- [Downloads for v1.3.10](#downloads-for-v1310)
- [v1.4.6](#v146)
- [Downloads for v1.4.6](#downloads-for-v146)
- [Client Binaries](#client-binaries-45)
- [Server Binaries](#server-binaries-45)
- [Changelog since v1.3.9](#changelog-since-v139)
- [Changelog since v1.4.5](#changelog-since-v145)
- [Other notable changes](#other-notable-changes-47)
- [v1.4.5](#v145)
- [Downloads for v1.4.5](#downloads-for-v145)
- [v1.3.10](#v1310)
- [Downloads for v1.3.10](#downloads-for-v1310)
- [Client Binaries](#client-binaries-46)
- [Server Binaries](#server-binaries-46)
- [Changelog since v1.4.4](#changelog-since-v144)
- [Changelog since v1.3.9](#changelog-since-v139)
- [Other notable changes](#other-notable-changes-48)
- [v1.5.0-alpha.2](#v150-alpha2)
- [Downloads for v1.5.0-alpha.2](#downloads-for-v150-alpha2)
- [v1.4.5](#v145)
- [Downloads for v1.4.5](#downloads-for-v145)
- [Client Binaries](#client-binaries-47)
- [Server Binaries](#server-binaries-47)
- [Changelog since v1.5.0-alpha.1](#changelog-since-v150-alpha1)
- [Action Required](#action-required-14)
- [Changelog since v1.4.4](#changelog-since-v144)
- [Other notable changes](#other-notable-changes-49)
- [v1.2.7](#v127)
- [Downloads for v1.2.7](#downloads-for-v127)
- [v1.5.0-alpha.2](#v150-alpha2)
- [Downloads for v1.5.0-alpha.2](#downloads-for-v150-alpha2)
- [Client Binaries](#client-binaries-48)
- [Server Binaries](#server-binaries-48)
- [Changelog since v1.2.6](#changelog-since-v126)
- [Changelog since v1.5.0-alpha.1](#changelog-since-v150-alpha1)
- [Action Required](#action-required-14)
- [Other notable changes](#other-notable-changes-50)
- [v1.4.4](#v144)
- [Downloads for v1.4.4](#downloads-for-v144)
- [v1.2.7](#v127)
- [Downloads for v1.2.7](#downloads-for-v127)
- [Client Binaries](#client-binaries-49)
- [Server Binaries](#server-binaries-49)
- [Changelog since v1.4.3](#changelog-since-v143)
- [Changelog since v1.2.6](#changelog-since-v126)
- [Other notable changes](#other-notable-changes-51)
- [v1.4.4](#v144)
- [Downloads for v1.4.4](#downloads-for-v144)
- [Client Binaries](#client-binaries-50)
- [Server Binaries](#server-binaries-50)
- [Changelog since v1.4.3](#changelog-since-v143)
- [Other notable changes](#other-notable-changes-52)
- [v1.3.9](#v139)
- [Downloads](#downloads)
- [Changelog since v1.3.8](#changelog-since-v138)
- [Other notable changes](#other-notable-changes-52)
- [Other notable changes](#other-notable-changes-53)
- [v1.4.3](#v143)
- [Downloads](#downloads-1)
- [Changelog since v1.4.2-beta.1](#changelog-since-v142-beta1)
- [Other notable changes](#other-notable-changes-53)
- [Other notable changes](#other-notable-changes-54)
- [v1.4.2](#v142)
- [Downloads](#downloads-2)
- [Changelog since v1.4.2-beta.1](#changelog-since-v142-beta1-1)
- [Other notable changes](#other-notable-changes-54)
- [Other notable changes](#other-notable-changes-55)
- [v1.5.0-alpha.1](#v150-alpha1)
- [Downloads](#downloads-3)
- [Changelog since v1.4.0-alpha.3](#changelog-since-v140-alpha3)
- [Experimental Features](#experimental-features)
- [Action Required](#action-required-15)
- [Other notable changes](#other-notable-changes-55)
- [Other notable changes](#other-notable-changes-56)
- [v1.4.2-beta.1](#v142-beta1)
- [Downloads](#downloads-4)
- [Changelog since v1.4.1](#changelog-since-v141)
- [Other notable changes](#other-notable-changes-56)
- [Other notable changes](#other-notable-changes-57)
- [v1.4.1](#v141)
- [Downloads](#downloads-5)
- [Changelog since v1.4.1-beta.2](#changelog-since-v141-beta2)
- [v1.4.1-beta.2](#v141-beta2)
- [Downloads](#downloads-6)
- [Changelog since v1.4.0](#changelog-since-v140)
- [Other notable changes](#other-notable-changes-57)
- [Other notable changes](#other-notable-changes-58)
- [v1.3.8](#v138)
- [Downloads](#downloads-7)
- [Changelog since v1.3.7](#changelog-since-v137)
- [Other notable changes](#other-notable-changes-58)
- [Other notable changes](#other-notable-changes-59)
- [v1.4.0](#v140)
- [Downloads](#downloads-8)
- [Major Themes](#major-themes-2)
@@ -583,26 +590,26 @@
- [v1.4.0-beta.10](#v140-beta10)
- [Downloads](#downloads-10)
- [Changelog since v1.4.0-beta.8](#changelog-since-v140-beta8)
- [Other notable changes](#other-notable-changes-59)
- [Other notable changes](#other-notable-changes-60)
- [v1.4.0-beta.8](#v140-beta8)
- [Downloads](#downloads-11)
- [Changelog since v1.4.0-beta.7](#changelog-since-v140-beta7)
- [v1.4.0-beta.7](#v140-beta7)
- [Downloads](#downloads-12)
- [Changelog since v1.4.0-beta.6](#changelog-since-v140-beta6)
- [Other notable changes](#other-notable-changes-60)
- [Other notable changes](#other-notable-changes-61)
- [v1.4.0-beta.6](#v140-beta6)
- [Downloads](#downloads-13)
- [Changelog since v1.4.0-beta.5](#changelog-since-v140-beta5)
- [Other notable changes](#other-notable-changes-61)
- [Other notable changes](#other-notable-changes-62)
- [v1.4.0-beta.5](#v140-beta5)
- [Downloads](#downloads-14)
- [Changelog since v1.4.0-beta.3](#changelog-since-v140-beta3)
- [Other notable changes](#other-notable-changes-62)
- [Other notable changes](#other-notable-changes-63)
- [v1.3.7](#v137)
- [Downloads](#downloads-15)
- [Changelog since v1.3.6](#changelog-since-v136)
- [Other notable changes](#other-notable-changes-63)
- [Other notable changes](#other-notable-changes-64)
- [v1.4.0-beta.3](#v140-beta3)
- [Downloads](#downloads-16)
- [Changelog since v1.4.0-beta.2](#changelog-since-v140-beta2)
@@ -613,57 +620,57 @@
- [v1.4.0-beta.2](#v140-beta2)
- [Downloads](#downloads-17)
- [Changelog since v1.4.0-beta.1](#changelog-since-v140-beta1)
- [Other notable changes](#other-notable-changes-64)
- [Other notable changes](#other-notable-changes-65)
- [v1.4.0-beta.1](#v140-beta1)
- [Downloads](#downloads-18)
- [Changelog since v1.4.0-alpha.3](#changelog-since-v140-alpha3-1)
- [Action Required](#action-required-16)
- [Other notable changes](#other-notable-changes-65)
- [Other notable changes](#other-notable-changes-66)
- [v1.3.6](#v136)
- [Downloads](#downloads-19)
- [Changelog since v1.3.5](#changelog-since-v135)
- [Other notable changes](#other-notable-changes-66)
- [Other notable changes](#other-notable-changes-67)
- [v1.4.0-alpha.3](#v140-alpha3)
- [Downloads](#downloads-20)
- [Changelog since v1.4.0-alpha.2](#changelog-since-v140-alpha2)
- [Action Required](#action-required-17)
- [Other notable changes](#other-notable-changes-67)
- [Other notable changes](#other-notable-changes-68)
- [v1.3.5](#v135)
- [Downloads](#downloads-21)
- [Changelog since v1.3.4](#changelog-since-v134)
- [Other notable changes](#other-notable-changes-68)
- [Other notable changes](#other-notable-changes-69)
- [v1.3.4](#v134)
- [Downloads](#downloads-22)
- [Changelog since v1.3.3](#changelog-since-v133)
- [Other notable changes](#other-notable-changes-69)
- [Other notable changes](#other-notable-changes-70)
- [v1.4.0-alpha.2](#v140-alpha2)
- [Downloads](#downloads-23)
- [Changelog since v1.4.0-alpha.1](#changelog-since-v140-alpha1)
- [Action Required](#action-required-18)
- [Other notable changes](#other-notable-changes-70)
- [Other notable changes](#other-notable-changes-71)
- [v1.3.3](#v133)
- [Downloads](#downloads-24)
- [Changelog since v1.3.2](#changelog-since-v132)
- [Other notable changes](#other-notable-changes-71)
- [Other notable changes](#other-notable-changes-72)
- [Known Issues](#known-issues-3)
- [v1.3.2](#v132)
- [Downloads](#downloads-25)
- [Changelog since v1.3.1](#changelog-since-v131)
- [Other notable changes](#other-notable-changes-72)
- [Other notable changes](#other-notable-changes-73)
- [v1.3.1](#v131)
- [Downloads](#downloads-26)
- [Changelog since v1.3.0](#changelog-since-v130)
- [Other notable changes](#other-notable-changes-73)
- [Other notable changes](#other-notable-changes-74)
- [v1.2.6](#v126)
- [Downloads](#downloads-27)
- [Changelog since v1.2.5](#changelog-since-v125)
- [Other notable changes](#other-notable-changes-74)
- [Other notable changes](#other-notable-changes-75)
- [v1.4.0-alpha.1](#v140-alpha1)
- [Downloads](#downloads-28)
- [Changelog since v1.3.0](#changelog-since-v130-1)
- [Experimental Features](#experimental-features-1)
- [Action Required](#action-required-19)
- [Other notable changes](#other-notable-changes-75)
- [Other notable changes](#other-notable-changes-76)
- [v1.3.0](#v130)
- [Downloads](#downloads-29)
- [Highlights](#highlights)
@@ -679,62 +686,62 @@
- [Downloads](#downloads-30)
- [Changelog since v1.3.0-beta.2](#changelog-since-v130-beta2)
- [Action Required](#action-required-20)
- [Other notable changes](#other-notable-changes-76)
- [Other notable changes](#other-notable-changes-77)
- [v1.2.5](#v125)
- [Downloads](#downloads-31)
- [Changes since v1.2.4](#changes-since-v124)
- [Other notable changes](#other-notable-changes-77)
- [Other notable changes](#other-notable-changes-78)
- [v1.3.0-beta.2](#v130-beta2)
- [Downloads](#downloads-32)
- [Changes since v1.3.0-beta.1](#changes-since-v130-beta1)
- [Experimental Features](#experimental-features-2)
- [Other notable changes](#other-notable-changes-78)
- [Other notable changes](#other-notable-changes-79)
- [v1.3.0-beta.1](#v130-beta1)
- [Downloads](#downloads-33)
- [Changes since v1.3.0-alpha.5](#changes-since-v130-alpha5)
- [Action Required](#action-required-21)
- [Other notable changes](#other-notable-changes-79)
- [Other notable changes](#other-notable-changes-80)
- [v1.3.0-alpha.5](#v130-alpha5)
- [Downloads](#downloads-34)
- [Changes since v1.3.0-alpha.4](#changes-since-v130-alpha4)
- [Action Required](#action-required-22)
- [Other notable changes](#other-notable-changes-80)
- [Other notable changes](#other-notable-changes-81)
- [v1.3.0-alpha.4](#v130-alpha4)
- [Downloads](#downloads-35)
- [Changes since v1.3.0-alpha.3](#changes-since-v130-alpha3)
- [Action Required](#action-required-23)
- [Other notable changes](#other-notable-changes-81)
- [Other notable changes](#other-notable-changes-82)
- [v1.2.4](#v124)
- [Downloads](#downloads-36)
- [Changes since v1.2.3](#changes-since-v123)
- [Other notable changes](#other-notable-changes-82)
- [Other notable changes](#other-notable-changes-83)
- [v1.3.0-alpha.3](#v130-alpha3)
- [Downloads](#downloads-37)
- [Changes since v1.3.0-alpha.2](#changes-since-v130-alpha2)
- [Action Required](#action-required-24)
- [Other notable changes](#other-notable-changes-83)
- [Other notable changes](#other-notable-changes-84)
- [v1.2.3](#v123)
- [Downloads](#downloads-38)
- [Changes since v1.2.2](#changes-since-v122)
- [Action Required](#action-required-25)
- [Other notable changes](#other-notable-changes-84)
- [Other notable changes](#other-notable-changes-85)
- [v1.3.0-alpha.2](#v130-alpha2)
- [Downloads](#downloads-39)
- [Changes since v1.3.0-alpha.1](#changes-since-v130-alpha1)
- [Other notable changes](#other-notable-changes-85)
- [Other notable changes](#other-notable-changes-86)
- [v1.2.2](#v122)
- [Downloads](#downloads-40)
- [Changes since v1.2.1](#changes-since-v121)
- [Other notable changes](#other-notable-changes-86)
- [Other notable changes](#other-notable-changes-87)
- [v1.2.1](#v121)
- [Downloads](#downloads-41)
- [Changes since v1.2.0](#changes-since-v120)
- [Other notable changes](#other-notable-changes-87)
- [Other notable changes](#other-notable-changes-88)
- [v1.3.0-alpha.1](#v130-alpha1)
- [Downloads](#downloads-42)
- [Changes since v1.2.0](#changes-since-v120-1)
- [Action Required](#action-required-26)
- [Other notable changes](#other-notable-changes-88)
- [Other notable changes](#other-notable-changes-89)
- [v1.2.0](#v120)
- [Downloads](#downloads-43)
- [Changes since v1.1.1](#changes-since-v111)
@@ -754,6 +761,69 @@
<!-- NEW RELEASE NOTES ENTRY -->
# v1.6.8
[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.6/examples)
## Downloads for v1.6.8
filename | sha256 hash
-------- | -----------
[kubernetes.tar.gz](https://dl.k8s.io/v1.6.8/kubernetes.tar.gz) | `c87f7826f0b7cf91baddd97ebafb33e99d91dcf6b9019a50bee0689527541ef7`
[kubernetes-src.tar.gz](https://dl.k8s.io/v1.6.8/kubernetes-src.tar.gz) | `591c43f9624dac351745da35444302cd694ad4953275b8f09016b4654d37b793`
### Client Binaries
filename | sha256 hash
-------- | -----------
[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.6.8/kubernetes-client-darwin-386.tar.gz) | `3f6cda6ca2cf3e8f038649f1021ca23c35f4da12d66cefaa4339c9613ca9bbd6`
[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.6.8/kubernetes-client-darwin-amd64.tar.gz) | `147bf5124e44a1557b95e7daa76717992b7890e79910c446dc682103f62325eb`
[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.6.8/kubernetes-client-linux-386.tar.gz) | `cd7238c19f9d4a4ce0b14c2d954f6ead2235caa2d74b319524a0d2ffeea0ca37`
[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.6.8/kubernetes-client-linux-amd64.tar.gz) | `34042be9607ca75702384552b31514f594af22d3c1d88549b0cd4ce36ee8fd6b`
[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.6.8/kubernetes-client-linux-arm64.tar.gz) | `3a7d4be76dda07fac50a257275369b3f4c48848e2963b55b46fa9df44477bfc8`
[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.6.8/kubernetes-client-linux-arm.tar.gz) | `0a060b8745b3c0e8173827af3d91a4748eb191a9c15538625eee108f6024fcfd`
[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.6.8/kubernetes-client-linux-ppc64le.tar.gz) | `bbc7be082d20082179de5efb85c0da9d0f3811c2119d3928bf89edc8f59e8cd0`
[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.6.8/kubernetes-client-linux-s390x.tar.gz) | `5e93d7ed4797f6b8742925d13f791e862bdb410bdd2b33737882132aabcc0bfd`
[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.6.8/kubernetes-client-windows-386.tar.gz) | `22a0a80fa5ed5f0745371cc9fd68eeeb0671242cf7c476fb4e635ccd9ef8c2b1`
[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.6.8/kubernetes-client-windows-amd64.tar.gz) | `ce42d7e826aa07bd98a424332926b04e75effbe926b098565781de3c3b6d244c`
### Server Binaries
filename | sha256 hash
-------- | -----------
[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.6.8/kubernetes-server-linux-amd64.tar.gz) | `9bf31375917ffdf9a9437ed562e96a1e2b43e23dcb4a42204032bb289ff12b6d`
[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.6.8/kubernetes-server-linux-arm64.tar.gz) | `51d84e7b1ace983b13639f1fe4bf1b11212d178e6a75b769de9bdac97d1fa7ae`
[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.6.8/kubernetes-server-linux-arm.tar.gz) | `b704de70774c6c0feb13a7b47d8d757e9a0438406b7fd1d33d0c5cb991d179b0`
[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.6.8/kubernetes-server-linux-ppc64le.tar.gz) | `f36f086481656fcb659a456ca832d62274e40defc1a3ed1dcc1e5ea7a696729b`
[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.6.8/kubernetes-server-linux-s390x.tar.gz) | `348f8a733556fcceaaa27d316c3e2ea01039c860988a434d7c9a850bc2412546`
### Node Binaries
filename | sha256 hash
-------- | -----------
[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.6.8/kubernetes-node-linux-amd64.tar.gz) | `e38255961c73e021bcca08890918f23cce39831536bf74496aa369049a1eb165`
[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.6.8/kubernetes-node-linux-arm64.tar.gz) | `be06c10320f3f996a48845eef9572353f9a0bd56330338c4cad6aca1fcc4fac4`
[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.6.8/kubernetes-node-linux-arm.tar.gz) | `06c6ecd885fbb4889791e78f50cdcb9920ee8f1e866d4fa921bc2096dbfbbd4b`
[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.6.8/kubernetes-node-linux-ppc64le.tar.gz) | `74e88435549cc46f3fc082300bf373c7d824921bd01eabf789a1b09e1a17a04a`
[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.6.8/kubernetes-node-linux-s390x.tar.gz) | `7ebe22e74653650ac0cedbfc482f5ff08713c40747018dac7506b36bb78ee8fc`
[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.6.8/kubernetes-node-windows-amd64.tar.gz) | `66b66655976647f50db3eda61849cbb26bcb06ad20a866328f24aef862758bb4`
## Changelog since v1.6.7
### Other notable changes
* Revert deprecation of vCenter port in vSphere Cloud Provider. ([#49689](https://github.com/kubernetes/kubernetes/pull/49689), [@divyenpatel](https://github.com/divyenpatel))
* kubeadm: Add preflight check for localhost resolution. ([#48875](https://github.com/kubernetes/kubernetes/pull/48875), [@craigtracey](https://github.com/craigtracey))
* Fix panic when using `kubeadm init` with vsphere cloud-provider. ([#44661](https://github.com/kubernetes/kubernetes/pull/44661), [@xiangpengzhao](https://github.com/xiangpengzhao))
* kubectl: Fix bug that showed terminated/evicted pods even without `--show-all`. ([#48786](https://github.com/kubernetes/kubernetes/pull/48786), [@janetkuo](https://github.com/janetkuo))
* Never prevent deletion of resources as part of namespace lifecycle ([#48733](https://github.com/kubernetes/kubernetes/pull/48733), [@liggitt](https://github.com/liggitt))
* AWS cloudprovider plugin: Fix for large clusters (200+ nodes). Also fix bug with volumes not getting detached from a node after reboot. ([#48312](https://github.com/kubernetes/kubernetes/pull/48312), [@gnufied](https://github.com/gnufied))
* Fix Pods using Portworx volumes getting stuck in ContainerCreating phase. ([#48898](https://github.com/kubernetes/kubernetes/pull/48898), [@harsh-px](https://github.com/harsh-px))
* RBAC role and role-binding reconciliation now ensures namespaces exist when reconciling on startup. ([#48480](https://github.com/kubernetes/kubernetes/pull/48480), [@liggitt](https://github.com/liggitt))
# v1.7.3
[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.7/examples)
@@ -820,7 +890,11 @@ filename | sha256 hash
* Fix Cinder to support http status 300 in pagination ([#47602](https://github.com/kubernetes/kubernetes/pull/47602), [@rootfs](https://github.com/rootfs))
* Automated cherry pick of [#49079](https://github.com/kubernetes/kubernetes/pull/49079) upstream release 1.7 ([#49254](https://github.com/kubernetes/kubernetes/pull/49254), [@feiskyer](https://github.com/feiskyer))
* Fixed GlusterFS volumes taking too long to time out ([#48709](https://github.com/kubernetes/kubernetes/pull/48709), [@jsafrane](https://github.com/jsafrane))
* The IP address and port for kube-proxy metrics server is now configurable via flag `--metrics-bind-address` ([#48625](https://github.com/kubernetes/kubernetes/pull/48625), [@mrhohn](https://github.com/mrhohn))
* Special notice for kube-proxy in 1.7+ (including 1.7.0):
* Healthz server (/healthz) will be served on 0.0.0.0:10256 by default.
* Metrics server (/metrics and /proxyMode) will be served on 127.0.0.1:10249 by default.
* Metrics server will continue serving /healthz.
# v1.7.2

View File

@@ -53,7 +53,6 @@ aliases:
- dchen1107
- derekwaynecarr
- dims
- euank
- feiskyer
- mtaufen
- ncdc

View File

@@ -21339,6 +21339,160 @@
}
]
},
"/apis/apps/v1beta1/namespaces/{namespace}/statefulsets/{name}/scale": {
"get": {
"description": "read scale of the specified StatefulSet",
"consumes": [
"*/*"
],
"produces": [
"application/json",
"application/yaml",
"application/vnd.kubernetes.protobuf"
],
"schemes": [
"https"
],
"tags": [
"apps_v1beta1"
],
"operationId": "readAppsV1beta1NamespacedStatefulSetScale",
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/io.k8s.api.apps.v1beta1.Scale"
}
},
"401": {
"description": "Unauthorized"
}
},
"x-kubernetes-action": "get",
"x-kubernetes-group-version-kind": {
"group": "apps",
"kind": "Scale",
"version": "v1beta1"
}
},
"put": {
"description": "replace scale of the specified StatefulSet",
"consumes": [
"*/*"
],
"produces": [
"application/json",
"application/yaml",
"application/vnd.kubernetes.protobuf"
],
"schemes": [
"https"
],
"tags": [
"apps_v1beta1"
],
"operationId": "replaceAppsV1beta1NamespacedStatefulSetScale",
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/io.k8s.api.apps.v1beta1.Scale"
}
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/io.k8s.api.apps.v1beta1.Scale"
}
},
"401": {
"description": "Unauthorized"
}
},
"x-kubernetes-action": "put",
"x-kubernetes-group-version-kind": {
"group": "apps",
"kind": "Scale",
"version": "v1beta1"
}
},
"patch": {
"description": "partially update scale of the specified StatefulSet",
"consumes": [
"application/json-patch+json",
"application/merge-patch+json",
"application/strategic-merge-patch+json"
],
"produces": [
"application/json",
"application/yaml",
"application/vnd.kubernetes.protobuf"
],
"schemes": [
"https"
],
"tags": [
"apps_v1beta1"
],
"operationId": "patchAppsV1beta1NamespacedStatefulSetScale",
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
}
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/io.k8s.api.apps.v1beta1.Scale"
}
},
"401": {
"description": "Unauthorized"
}
},
"x-kubernetes-action": "patch",
"x-kubernetes-group-version-kind": {
"group": "apps",
"kind": "Scale",
"version": "v1beta1"
}
},
"parameters": [
{
"uniqueItems": true,
"type": "string",
"description": "name of the Scale",
"name": "name",
"in": "path",
"required": true
},
{
"uniqueItems": true,
"type": "string",
"description": "object name and auth scope, such as for teams and projects",
"name": "namespace",
"in": "path",
"required": true
},
{
"uniqueItems": true,
"type": "string",
"description": "If 'true', then the output is pretty printed.",
"name": "pretty",
"in": "query"
}
]
},
"/apis/apps/v1beta1/namespaces/{namespace}/statefulsets/{name}/status": {
"get": {
"description": "read status of the specified StatefulSet",
@@ -23752,78 +23906,6 @@
}
]
},
"/apis/apps/v1beta2/namespaces/{namespace}/deployments/{name}/rollback": {
"post": {
"description": "create rollback of a Deployment",
"consumes": [
"*/*"
],
"produces": [
"application/json",
"application/yaml",
"application/vnd.kubernetes.protobuf"
],
"schemes": [
"https"
],
"tags": [
"apps_v1beta2"
],
"operationId": "createAppsV1beta2NamespacedDeploymentRollback",
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/io.k8s.api.apps.v1beta2.DeploymentRollback"
}
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/io.k8s.api.apps.v1beta2.DeploymentRollback"
}
},
"401": {
"description": "Unauthorized"
}
},
"x-kubernetes-action": "post",
"x-kubernetes-group-version-kind": {
"group": "apps",
"kind": "DeploymentRollback",
"version": "v1beta2"
}
},
"parameters": [
{
"uniqueItems": true,
"type": "string",
"description": "name of the DeploymentRollback",
"name": "name",
"in": "path",
"required": true
},
{
"uniqueItems": true,
"type": "string",
"description": "object name and auth scope, such as for teams and projects",
"name": "namespace",
"in": "path",
"required": true
},
{
"uniqueItems": true,
"type": "string",
"description": "If 'true', then the output is pretty printed.",
"name": "pretty",
"in": "query"
}
]
},
"/apis/apps/v1beta2/namespaces/{namespace}/deployments/{name}/scale": {
"get": {
"description": "read scale of the specified Deployment",
@@ -25360,6 +25442,160 @@
}
]
},
"/apis/apps/v1beta2/namespaces/{namespace}/statefulsets/{name}/scale": {
"get": {
"description": "read scale of the specified StatefulSet",
"consumes": [
"*/*"
],
"produces": [
"application/json",
"application/yaml",
"application/vnd.kubernetes.protobuf"
],
"schemes": [
"https"
],
"tags": [
"apps_v1beta2"
],
"operationId": "readAppsV1beta2NamespacedStatefulSetScale",
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/io.k8s.api.apps.v1beta2.Scale"
}
},
"401": {
"description": "Unauthorized"
}
},
"x-kubernetes-action": "get",
"x-kubernetes-group-version-kind": {
"group": "apps",
"kind": "Scale",
"version": "v1beta2"
}
},
"put": {
"description": "replace scale of the specified StatefulSet",
"consumes": [
"*/*"
],
"produces": [
"application/json",
"application/yaml",
"application/vnd.kubernetes.protobuf"
],
"schemes": [
"https"
],
"tags": [
"apps_v1beta2"
],
"operationId": "replaceAppsV1beta2NamespacedStatefulSetScale",
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/io.k8s.api.apps.v1beta2.Scale"
}
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/io.k8s.api.apps.v1beta2.Scale"
}
},
"401": {
"description": "Unauthorized"
}
},
"x-kubernetes-action": "put",
"x-kubernetes-group-version-kind": {
"group": "apps",
"kind": "Scale",
"version": "v1beta2"
}
},
"patch": {
"description": "partially update scale of the specified StatefulSet",
"consumes": [
"application/json-patch+json",
"application/merge-patch+json",
"application/strategic-merge-patch+json"
],
"produces": [
"application/json",
"application/yaml",
"application/vnd.kubernetes.protobuf"
],
"schemes": [
"https"
],
"tags": [
"apps_v1beta2"
],
"operationId": "patchAppsV1beta2NamespacedStatefulSetScale",
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
}
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/io.k8s.api.apps.v1beta2.Scale"
}
},
"401": {
"description": "Unauthorized"
}
},
"x-kubernetes-action": "patch",
"x-kubernetes-group-version-kind": {
"group": "apps",
"kind": "Scale",
"version": "v1beta2"
}
},
"parameters": [
{
"uniqueItems": true,
"type": "string",
"description": "name of the Scale",
"name": "name",
"in": "path",
"required": true
},
{
"uniqueItems": true,
"type": "string",
"description": "object name and auth scope, such as for teams and projects",
"name": "namespace",
"in": "path",
"required": true
},
{
"uniqueItems": true,
"type": "string",
"description": "If 'true', then the output is pretty printed.",
"name": "pretty",
"in": "query"
}
]
},
"/apis/apps/v1beta2/namespaces/{namespace}/statefulsets/{name}/status": {
"get": {
"description": "read status of the specified StatefulSet",
@@ -50956,7 +51192,7 @@
]
},
"io.k8s.api.apps.v1beta1.DeploymentRollback": {
"description": "DeploymentRollback stores the information required to rollback a deployment.",
"description": "DEPRECATED. DeploymentRollback stores the information required to rollback a deployment.",
"required": [
"name",
"rollbackTo"
@@ -51025,7 +51261,7 @@
"format": "int32"
},
"rollbackTo": {
"description": "The config this deployment is rolling back to. Will be cleared after rollback is done.",
"description": "DEPRECATED. The config this deployment is rolling back to. Will be cleared after rollback is done.",
"$ref": "#/definitions/io.k8s.api.apps.v1beta1.RollbackConfig"
},
"selector": {
@@ -51105,6 +51341,7 @@
}
},
"io.k8s.api.apps.v1beta1.RollbackConfig": {
"description": "DEPRECATED.",
"properties": {
"revision": {
"description": "The revision to rollback to. If set to 0, rollback to the last revision.",
@@ -51535,7 +51772,7 @@
"$ref": "#/definitions/io.k8s.api.apps.v1beta2.RollingUpdateDaemonSet"
},
"type": {
"description": "Type of daemon set update. Can be \"RollingUpdate\" or \"OnDelete\". Default is OnDelete.",
"description": "Type of daemon set update. Can be \"RollingUpdate\" or \"OnDelete\". Default is RollingUpdate.",
"type": "string"
}
}
@@ -51639,45 +51876,6 @@
}
]
},
"io.k8s.api.apps.v1beta2.DeploymentRollback": {
"description": "WIP: This is not ready to be used and we plan to make breaking changes to it. DeploymentRollback stores the information required to rollback a deployment.",
"required": [
"name",
"rollbackTo"
],
"properties": {
"apiVersion": {
"description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources",
"type": "string"
},
"kind": {
"description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
"type": "string"
},
"name": {
"description": "Required: This must match the Name of a deployment.",
"type": "string"
},
"rollbackTo": {
"description": "The config of this deployment rollback.",
"$ref": "#/definitions/io.k8s.api.apps.v1beta2.RollbackConfig"
},
"updatedAnnotations": {
"description": "The annotations to be updated to a deployment",
"type": "object",
"additionalProperties": {
"type": "string"
}
}
},
"x-kubernetes-group-version-kind": [
{
"group": "apps",
"kind": "DeploymentRollback",
"version": "v1beta2"
}
]
},
"io.k8s.api.apps.v1beta2.DeploymentSpec": {
"description": "WIP: This is not ready to be used and we plan to make breaking changes to it. DeploymentSpec is the specification of the desired behavior of the Deployment.",
"required": [
@@ -51708,10 +51906,6 @@
"type": "integer",
"format": "int32"
},
"rollbackTo": {
"description": "The config this deployment is rolling back to. Will be cleared after rollback is done.",
"$ref": "#/definitions/io.k8s.api.apps.v1beta2.RollbackConfig"
},
"selector": {
"description": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment.",
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"
@@ -51948,16 +52142,6 @@
}
}
},
"io.k8s.api.apps.v1beta2.RollbackConfig": {
"description": "WIP: This is not ready to be used and we plan to make breaking changes to it.",
"properties": {
"revision": {
"description": "The revision to rollback to. If set to 0, rollback to the last revision.",
"type": "integer",
"format": "int64"
}
}
},
"io.k8s.api.apps.v1beta2.RollingUpdateDaemonSet": {
"description": "WIP: This is not ready to be used and we plan to make breaking changes to it. Spec to control the desired behavior of daemon set rolling update.",
"properties": {
@@ -51984,7 +52168,7 @@
"description": "WIP: This is not ready to be used and we plan to make breaking changes to it. RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.",
"properties": {
"partition": {
"description": "Partition indicates the ordinal at which the StatefulSet should be partitioned.",
"description": "Partition indicates the ordinal at which the StatefulSet should be partitioned. Default value is 0.",
"type": "integer",
"format": "int32"
}
@@ -52219,7 +52403,7 @@
"$ref": "#/definitions/io.k8s.api.apps.v1beta2.RollingUpdateStatefulSetStrategy"
},
"type": {
"description": "Type indicates the type of the StatefulSetUpdateStrategy.",
"description": "Type indicates the type of the StatefulSetUpdateStrategy. Default is RollingUpdate.",
"type": "string"
}
}
@@ -52593,6 +52777,10 @@
"description": "ResourceAuthorizationAttributes describes information for a resource access request",
"$ref": "#/definitions/io.k8s.api.authorization.v1.ResourceAttributes"
},
"uid": {
"description": "UID information about the requesting user.",
"type": "string"
},
"user": {
"description": "User is the user you're testing for. If you specify \"User\" but not \"Groups\", then is it interpreted as \"What if User were not a member of any groups",
"type": "string"
@@ -52808,6 +52996,10 @@
"description": "ResourceAuthorizationAttributes describes information for a resource access request",
"$ref": "#/definitions/io.k8s.api.authorization.v1beta1.ResourceAttributes"
},
"uid": {
"description": "UID information about the requesting user.",
"type": "string"
},
"user": {
"description": "User is the user you're testing for. If you specify \"User\" but not \"Group\", then is it interpreted as \"What if User were not a member of any groups",
"type": "string"
@@ -58382,7 +58574,7 @@
]
},
"io.k8s.api.extensions.v1beta1.DeploymentRollback": {
"description": "DeploymentRollback stores the information required to rollback a deployment.",
"description": "DEPRECATED. DeploymentRollback stores the information required to rollback a deployment.",
"required": [
"name",
"rollbackTo"
@@ -58451,7 +58643,7 @@
"format": "int32"
},
"rollbackTo": {
"description": "The config this deployment is rolling back to. Will be cleared after rollback is done.",
"description": "DEPRECATED. The config this deployment is rolling back to. Will be cleared after rollback is done.",
"$ref": "#/definitions/io.k8s.api.extensions.v1beta1.RollbackConfig"
},
"selector": {
@@ -59194,6 +59386,7 @@
}
},
"io.k8s.api.extensions.v1beta1.RollbackConfig": {
"description": "DEPRECATED.",
"properties": {
"revision": {
"description": "The revision to rollback to. If set to 0, rollback to the last revision.",
@@ -61378,7 +61571,7 @@
"type": "string"
},
"retryAfterSeconds": {
"description": "If specified, the time in seconds before the operation should be retried.",
"description": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action.",
"type": "integer",
"format": "int32"
},

View File

@@ -1546,7 +1546,7 @@
"retryAfterSeconds": {
"type": "integer",
"format": "int32",
"description": "If specified, the time in seconds before the operation should be retried."
"description": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action."
}
}
},

View File

@@ -2982,6 +2982,171 @@
}
]
},
{
"path": "/apis/apps/v1beta1/namespaces/{namespace}/statefulsets/{name}/scale",
"description": "API at /apis/apps/v1beta1",
"operations": [
{
"type": "v1beta1.Scale",
"method": "GET",
"summary": "read scale of the specified StatefulSet",
"nickname": "readNamespacedStatefulSetScale",
"parameters": [
{
"type": "string",
"paramType": "query",
"name": "pretty",
"description": "If 'true', then the output is pretty printed.",
"required": false,
"allowMultiple": false
},
{
"type": "string",
"paramType": "path",
"name": "namespace",
"description": "object name and auth scope, such as for teams and projects",
"required": true,
"allowMultiple": false
},
{
"type": "string",
"paramType": "path",
"name": "name",
"description": "name of the Scale",
"required": true,
"allowMultiple": false
}
],
"responseMessages": [
{
"code": 200,
"message": "OK",
"responseModel": "v1beta1.Scale"
}
],
"produces": [
"application/json",
"application/yaml",
"application/vnd.kubernetes.protobuf"
],
"consumes": [
"*/*"
]
},
{
"type": "v1beta1.Scale",
"method": "PUT",
"summary": "replace scale of the specified StatefulSet",
"nickname": "replaceNamespacedStatefulSetScale",
"parameters": [
{
"type": "string",
"paramType": "query",
"name": "pretty",
"description": "If 'true', then the output is pretty printed.",
"required": false,
"allowMultiple": false
},
{
"type": "v1beta1.Scale",
"paramType": "body",
"name": "body",
"description": "",
"required": true,
"allowMultiple": false
},
{
"type": "string",
"paramType": "path",
"name": "namespace",
"description": "object name and auth scope, such as for teams and projects",
"required": true,
"allowMultiple": false
},
{
"type": "string",
"paramType": "path",
"name": "name",
"description": "name of the Scale",
"required": true,
"allowMultiple": false
}
],
"responseMessages": [
{
"code": 200,
"message": "OK",
"responseModel": "v1beta1.Scale"
}
],
"produces": [
"application/json",
"application/yaml",
"application/vnd.kubernetes.protobuf"
],
"consumes": [
"*/*"
]
},
{
"type": "v1beta1.Scale",
"method": "PATCH",
"summary": "partially update scale of the specified StatefulSet",
"nickname": "patchNamespacedStatefulSetScale",
"parameters": [
{
"type": "string",
"paramType": "query",
"name": "pretty",
"description": "If 'true', then the output is pretty printed.",
"required": false,
"allowMultiple": false
},
{
"type": "v1.Patch",
"paramType": "body",
"name": "body",
"description": "",
"required": true,
"allowMultiple": false
},
{
"type": "string",
"paramType": "path",
"name": "namespace",
"description": "object name and auth scope, such as for teams and projects",
"required": true,
"allowMultiple": false
},
{
"type": "string",
"paramType": "path",
"name": "name",
"description": "name of the Scale",
"required": true,
"allowMultiple": false
}
],
"responseMessages": [
{
"code": 200,
"message": "OK",
"responseModel": "v1beta1.Scale"
}
],
"produces": [
"application/json",
"application/yaml",
"application/vnd.kubernetes.protobuf"
],
"consumes": [
"application/json-patch+json",
"application/merge-patch+json",
"application/strategic-merge-patch+json"
]
}
]
},
{
"path": "/apis/apps/v1beta1/namespaces/{namespace}/statefulsets/{name}/status",
"description": "API at /apis/apps/v1beta1",
@@ -3460,7 +3625,7 @@
"retryAfterSeconds": {
"type": "integer",
"format": "int32",
"description": "If specified, the time in seconds before the operation should be retried."
"description": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action."
}
}
},
@@ -3645,7 +3810,7 @@
},
"rollbackTo": {
"$ref": "v1beta1.RollbackConfig",
"description": "The config this deployment is rolling back to. Will be cleared after rollback is done."
"description": "DEPRECATED. The config this deployment is rolling back to. Will be cleared after rollback is done."
},
"progressDeadlineSeconds": {
"type": "integer",
@@ -5689,6 +5854,7 @@
},
"v1beta1.RollbackConfig": {
"id": "v1beta1.RollbackConfig",
"description": "DEPRECATED.",
"properties": {
"revision": {
"type": "integer",
@@ -5781,7 +5947,7 @@
},
"v1beta1.DeploymentRollback": {
"id": "v1beta1.DeploymentRollback",
"description": "DeploymentRollback stores the information required to rollback a deployment.",
"description": "DEPRECATED. DeploymentRollback stores the information required to rollback a deployment.",
"required": [
"name",
"rollbackTo"

View File

@@ -1895,67 +1895,6 @@
}
]
},
{
"path": "/apis/apps/v1beta2/namespaces/{namespace}/deployments/{name}/rollback",
"description": "API at /apis/apps/v1beta2",
"operations": [
{
"type": "v1beta2.DeploymentRollback",
"method": "POST",
"summary": "create rollback of a Deployment",
"nickname": "createNamespacedDeploymentRollback",
"parameters": [
{
"type": "string",
"paramType": "query",
"name": "pretty",
"description": "If 'true', then the output is pretty printed.",
"required": false,
"allowMultiple": false
},
{
"type": "v1beta2.DeploymentRollback",
"paramType": "body",
"name": "body",
"description": "",
"required": true,
"allowMultiple": false
},
{
"type": "string",
"paramType": "path",
"name": "namespace",
"description": "object name and auth scope, such as for teams and projects",
"required": true,
"allowMultiple": false
},
{
"type": "string",
"paramType": "path",
"name": "name",
"description": "name of the DeploymentRollback",
"required": true,
"allowMultiple": false
}
],
"responseMessages": [
{
"code": 200,
"message": "OK",
"responseModel": "v1beta2.DeploymentRollback"
}
],
"produces": [
"application/json",
"application/yaml",
"application/vnd.kubernetes.protobuf"
],
"consumes": [
"*/*"
]
}
]
},
{
"path": "/apis/apps/v1beta2/namespaces/{namespace}/deployments/{name}/scale",
"description": "API at /apis/apps/v1beta2",
@@ -4338,6 +4277,171 @@
}
]
},
{
"path": "/apis/apps/v1beta2/namespaces/{namespace}/statefulsets/{name}/scale",
"description": "API at /apis/apps/v1beta2",
"operations": [
{
"type": "v1beta2.Scale",
"method": "GET",
"summary": "read scale of the specified StatefulSet",
"nickname": "readNamespacedStatefulSetScale",
"parameters": [
{
"type": "string",
"paramType": "query",
"name": "pretty",
"description": "If 'true', then the output is pretty printed.",
"required": false,
"allowMultiple": false
},
{
"type": "string",
"paramType": "path",
"name": "namespace",
"description": "object name and auth scope, such as for teams and projects",
"required": true,
"allowMultiple": false
},
{
"type": "string",
"paramType": "path",
"name": "name",
"description": "name of the Scale",
"required": true,
"allowMultiple": false
}
],
"responseMessages": [
{
"code": 200,
"message": "OK",
"responseModel": "v1beta2.Scale"
}
],
"produces": [
"application/json",
"application/yaml",
"application/vnd.kubernetes.protobuf"
],
"consumes": [
"*/*"
]
},
{
"type": "v1beta2.Scale",
"method": "PUT",
"summary": "replace scale of the specified StatefulSet",
"nickname": "replaceNamespacedStatefulSetScale",
"parameters": [
{
"type": "string",
"paramType": "query",
"name": "pretty",
"description": "If 'true', then the output is pretty printed.",
"required": false,
"allowMultiple": false
},
{
"type": "v1beta2.Scale",
"paramType": "body",
"name": "body",
"description": "",
"required": true,
"allowMultiple": false
},
{
"type": "string",
"paramType": "path",
"name": "namespace",
"description": "object name and auth scope, such as for teams and projects",
"required": true,
"allowMultiple": false
},
{
"type": "string",
"paramType": "path",
"name": "name",
"description": "name of the Scale",
"required": true,
"allowMultiple": false
}
],
"responseMessages": [
{
"code": 200,
"message": "OK",
"responseModel": "v1beta2.Scale"
}
],
"produces": [
"application/json",
"application/yaml",
"application/vnd.kubernetes.protobuf"
],
"consumes": [
"*/*"
]
},
{
"type": "v1beta2.Scale",
"method": "PATCH",
"summary": "partially update scale of the specified StatefulSet",
"nickname": "patchNamespacedStatefulSetScale",
"parameters": [
{
"type": "string",
"paramType": "query",
"name": "pretty",
"description": "If 'true', then the output is pretty printed.",
"required": false,
"allowMultiple": false
},
{
"type": "v1.Patch",
"paramType": "body",
"name": "body",
"description": "",
"required": true,
"allowMultiple": false
},
{
"type": "string",
"paramType": "path",
"name": "namespace",
"description": "object name and auth scope, such as for teams and projects",
"required": true,
"allowMultiple": false
},
{
"type": "string",
"paramType": "path",
"name": "name",
"description": "name of the Scale",
"required": true,
"allowMultiple": false
}
],
"responseMessages": [
{
"code": 200,
"message": "OK",
"responseModel": "v1beta2.Scale"
}
],
"produces": [
"application/json",
"application/yaml",
"application/vnd.kubernetes.protobuf"
],
"consumes": [
"application/json-patch+json",
"application/merge-patch+json",
"application/strategic-merge-patch+json"
]
}
]
},
{
"path": "/apis/apps/v1beta2/namespaces/{namespace}/statefulsets/{name}/status",
"description": "API at /apis/apps/v1beta2",
@@ -4812,7 +4916,7 @@
"retryAfterSeconds": {
"type": "integer",
"format": "int32",
"description": "If specified, the time in seconds before the operation should be retried."
"description": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action."
}
}
},
@@ -6881,7 +6985,7 @@
"properties": {
"type": {
"type": "string",
"description": "Type of daemon set update. Can be \"RollingUpdate\" or \"OnDelete\". Default is OnDelete."
"description": "Type of daemon set update. Can be \"RollingUpdate\" or \"OnDelete\". Default is RollingUpdate."
},
"rollingUpdate": {
"$ref": "v1beta2.RollingUpdateDaemonSet",
@@ -7117,10 +7221,6 @@
"type": "boolean",
"description": "Indicates that the deployment is paused."
},
"rollbackTo": {
"$ref": "v1beta2.RollbackConfig",
"description": "The config this deployment is rolling back to. Will be cleared after rollback is done."
},
"progressDeadlineSeconds": {
"type": "integer",
"format": "int32",
@@ -7156,17 +7256,6 @@
}
}
},
"v1beta2.RollbackConfig": {
"id": "v1beta2.RollbackConfig",
"description": "WIP: This is not ready to be used and we plan to make breaking changes to it.",
"properties": {
"revision": {
"type": "integer",
"format": "int64",
"description": "The revision to rollback to. If set to 0, rollback to the last revision."
}
}
},
"v1beta2.DeploymentStatus": {
"id": "v1beta2.DeploymentStatus",
"description": "WIP: This is not ready to be used and we plan to make breaking changes to it. DeploymentStatus is the most recently observed status of the Deployment.",
@@ -7249,36 +7338,6 @@
}
}
},
"v1beta2.DeploymentRollback": {
"id": "v1beta2.DeploymentRollback",
"description": "WIP: This is not ready to be used and we plan to make breaking changes to it. DeploymentRollback stores the information required to rollback a deployment.",
"required": [
"name",
"rollbackTo"
],
"properties": {
"kind": {
"type": "string",
"description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds"
},
"apiVersion": {
"type": "string",
"description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources"
},
"name": {
"type": "string",
"description": "Required: This must match the Name of a deployment."
},
"updatedAnnotations": {
"type": "object",
"description": "The annotations to be updated to a deployment"
},
"rollbackTo": {
"$ref": "v1beta2.RollbackConfig",
"description": "The config of this deployment rollback."
}
}
},
"v1beta2.Scale": {
"id": "v1beta2.Scale",
"description": "WIP: This is not ready to be used and we plan to make breaking changes to it. Scale represents a scaling request for a resource.",
@@ -7671,7 +7730,7 @@
"properties": {
"type": {
"type": "string",
"description": "Type indicates the type of the StatefulSetUpdateStrategy."
"description": "Type indicates the type of the StatefulSetUpdateStrategy. Default is RollingUpdate."
},
"rollingUpdate": {
"$ref": "v1beta2.RollingUpdateStatefulSetStrategy",
@@ -7686,7 +7745,7 @@
"partition": {
"type": "integer",
"format": "int32",
"description": "Partition indicates the ordinal at which the StatefulSet should be partitioned."
"description": "Partition indicates the ordinal at which the StatefulSet should be partitioned. Default value is 0."
}
}
},

View File

@@ -336,7 +336,7 @@
"retryAfterSeconds": {
"type": "integer",
"format": "int32",
"description": "If specified, the time in seconds before the operation should be retried."
"description": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action."
}
}
},

View File

@@ -336,7 +336,7 @@
"retryAfterSeconds": {
"type": "integer",
"format": "int32",
"description": "If specified, the time in seconds before the operation should be retried."
"description": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action."
}
}
},

View File

@@ -434,7 +434,7 @@
"retryAfterSeconds": {
"type": "integer",
"format": "int32",
"description": "If specified, the time in seconds before the operation should be retried."
"description": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action."
}
}
},
@@ -482,6 +482,10 @@
"extra": {
"type": "object",
"description": "Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer it needs a reflection here."
},
"uid": {
"type": "string",
"description": "UID information about the requesting user."
}
}
},

View File

@@ -434,7 +434,7 @@
"retryAfterSeconds": {
"type": "integer",
"format": "int32",
"description": "If specified, the time in seconds before the operation should be retried."
"description": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action."
}
}
},
@@ -482,6 +482,10 @@
"extra": {
"type": "object",
"description": "Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer it needs a reflection here."
},
"uid": {
"type": "string",
"description": "UID information about the requesting user."
}
}
},

View File

@@ -1343,7 +1343,7 @@
"retryAfterSeconds": {
"type": "integer",
"format": "int32",
"description": "If specified, the time in seconds before the operation should be retried."
"description": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action."
}
}
},

View File

@@ -1343,7 +1343,7 @@
"retryAfterSeconds": {
"type": "integer",
"format": "int32",
"description": "If specified, the time in seconds before the operation should be retried."
"description": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action."
}
}
},

View File

@@ -1343,7 +1343,7 @@
"retryAfterSeconds": {
"type": "integer",
"format": "int32",
"description": "If specified, the time in seconds before the operation should be retried."
"description": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action."
}
}
},

View File

@@ -2369,7 +2369,7 @@
"retryAfterSeconds": {
"type": "integer",
"format": "int32",
"description": "If specified, the time in seconds before the operation should be retried."
"description": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action."
}
}
},

View File

@@ -1034,7 +1034,7 @@
"retryAfterSeconds": {
"type": "integer",
"format": "int32",
"description": "If specified, the time in seconds before the operation should be retried."
"description": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action."
}
}
},

View File

@@ -6453,7 +6453,7 @@
"retryAfterSeconds": {
"type": "integer",
"format": "int32",
"description": "If specified, the time in seconds before the operation should be retried."
"description": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action."
}
}
},
@@ -8759,7 +8759,7 @@
},
"rollbackTo": {
"$ref": "v1beta1.RollbackConfig",
"description": "The config this deployment is rolling back to. Will be cleared after rollback is done."
"description": "DEPRECATED. The config this deployment is rolling back to. Will be cleared after rollback is done."
},
"progressDeadlineSeconds": {
"type": "integer",
@@ -8798,6 +8798,7 @@
},
"v1beta1.RollbackConfig": {
"id": "v1beta1.RollbackConfig",
"description": "DEPRECATED.",
"properties": {
"revision": {
"type": "integer",
@@ -8890,7 +8891,7 @@
},
"v1beta1.DeploymentRollback": {
"id": "v1beta1.DeploymentRollback",
"description": "DeploymentRollback stores the information required to rollback a deployment.",
"description": "DEPRECATED. DeploymentRollback stores the information required to rollback a deployment.",
"required": [
"name",
"rollbackTo"

View File

@@ -1174,7 +1174,7 @@
"retryAfterSeconds": {
"type": "integer",
"format": "int32",
"description": "If specified, the time in seconds before the operation should be retried."
"description": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action."
}
}
},

View File

@@ -1340,7 +1340,7 @@
"retryAfterSeconds": {
"type": "integer",
"format": "int32",
"description": "If specified, the time in seconds before the operation should be retried."
"description": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action."
}
}
},

View File

@@ -3212,7 +3212,7 @@
"retryAfterSeconds": {
"type": "integer",
"format": "int32",
"description": "If specified, the time in seconds before the operation should be retried."
"description": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action."
}
}
},

View File

@@ -3212,7 +3212,7 @@
"retryAfterSeconds": {
"type": "integer",
"format": "int32",
"description": "If specified, the time in seconds before the operation should be retried."
"description": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action."
}
}
},

View File

@@ -940,7 +940,7 @@
"retryAfterSeconds": {
"type": "integer",
"format": "int32",
"description": "If specified, the time in seconds before the operation should be retried."
"description": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action."
}
}
},

View File

@@ -1172,7 +1172,7 @@
"retryAfterSeconds": {
"type": "integer",
"format": "int32",
"description": "If specified, the time in seconds before the operation should be retried."
"description": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action."
}
}
},

View File

@@ -935,7 +935,7 @@
"retryAfterSeconds": {
"type": "integer",
"format": "int32",
"description": "If specified, the time in seconds before the operation should be retried."
"description": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action."
}
}
},

View File

@@ -935,7 +935,7 @@
"retryAfterSeconds": {
"type": "integer",
"format": "int32",
"description": "If specified, the time in seconds before the operation should be retried."
"description": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action."
}
}
},

View File

@@ -17580,7 +17580,7 @@
"retryAfterSeconds": {
"type": "integer",
"format": "int32",
"description": "If specified, the time in seconds before the operation should be retried."
"description": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action."
}
}
},

View File

@@ -5,10 +5,10 @@ Building Kubernetes is easy if you take advantage of the containerized build env
## Requirements
1. Docker, using one of the following configurations:
1. **Mac OS X** You can either use Docker for Mac or docker-machine. See installation instructions [here](https://docs.docker.com/docker-for-mac/).
* **Mac OS X** You can either use Docker for Mac or docker-machine. See installation instructions [here](https://docs.docker.com/docker-for-mac/).
**Note**: You will want to set the Docker VM to have at least 3GB of initial memory or building will likely fail. (See: [#11852]( http://issue.k8s.io/11852)).
2. **Linux with local Docker** Install Docker according to the [instructions](https://docs.docker.com/installation/#installation) for your OS.
3. **Remote Docker engine** Use a big machine in the cloud to build faster. This is a little trickier so look at the section later on.
* **Linux with local Docker** Install Docker according to the [instructions](https://docs.docker.com/installation/#installation) for your OS.
* **Remote Docker engine** Use a big machine in the cloud to build faster. This is a little trickier so look at the section later on.
2. **Optional** [Google Cloud SDK](https://developers.google.com/cloud/sdk/)
You must install and configure Google Cloud SDK if you want to upload your release to Google Cloud Storage and may safely omit this otherwise.

View File

@@ -31,4 +31,6 @@ apt-get clean -y
rm -rf \
/var/cache/debconf/* \
/var/lib/apt/lists/* \
/var/log/*
/var/log/* \
/tmp/* \
/var/tmp/*

View File

@@ -1,18 +1,7 @@
{
"GoPrefix": "k8s.io/kubernetes",
"SrcDirs": [
"./pkg",
"./cmd",
"./third_party",
"./plugin",
"./test",
"./federation",
"./examples"
],
"SkippedPaths": [
"^_.*",
"^staging.*",
"_vendor"
"^_.*"
],
"AddSourcesRules": true,
"VendorMultipleBuildFiles": true,

View File

@@ -63,6 +63,7 @@ filegroup(
"//hack:all-srcs",
"//pkg:all-srcs",
"//plugin:all-srcs",
"//staging:all-srcs",
"//test:all-srcs",
"//third_party:all-srcs",
"//vendor:all-srcs",

View File

@@ -17,6 +17,9 @@ filegroup(
":package-srcs",
"//cluster/addons:all-srcs",
"//cluster/gce:all-srcs",
"//cluster/images/etcd-version-monitor:all-srcs",
"//cluster/images/etcd/attachlease:all-srcs",
"//cluster/images/etcd/rollback:all-srcs",
"//cluster/saltbase:all-srcs",
],
tags = ["automanaged"],

View File

@@ -33,6 +33,9 @@ filegroup(
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
srcs = [
":package-srcs",
"//cluster/addons/fluentd-elasticsearch/es-image:all-srcs",
],
tags = ["automanaged"],
)

View File

@@ -64,6 +64,26 @@ spec:
- name: usr-ca-certs
mountPath: /usr/share/ca-certificates
readOnly: true
- name: prom-to-sd
image: gcr.io/google-containers/prometheus-to-sd:v0.2.1
command:
- /monitor
- --source=heapster:http://localhost:8082?whitelisted=stackdriver_requests_count,stackdriver_timeseries_count
- --stackdriver-prefix=container.googleapis.com/internal/addons
- --pod-id=$(POD_NAME)
- --namespace-id=$(POD_NAMESPACE)
volumeMounts:
- name: ssl-certs
mountPath: /etc/ssl/certs
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- image: gcr.io/google_containers/addon-resizer:2.0
name: heapster-nanny
resources:

View File

@@ -20,7 +20,7 @@ spec:
spec:
containers:
- name: kubernetes-dashboard
image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.6.1
image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.6.3
resources:
# keep request = limit to keep this container in guaranteed class
limits:

View File

@@ -1,45 +1,82 @@
# Elasticsearch Add-On
This add-on consists of a combination of
[Elasticsearch](https://www.elastic.co/products/elasticsearch), [Fluentd](http://www.fluentd.org/)
and [Kibana](https://www.elastic.co/products/elasticsearch). Elasticsearch is a search engine
that is responsible for storing our logs and allowing for them to be queried. Fluentd sends
log messages from Kubernetes to Elasticsearch, whereas Kibana is a graphical interface for
viewing and querying the logs stored in Elasticsearch.
This add-on consists of a combination of [Elasticsearch][elasticsearch],
[Fluentd][fluentd] and [Kibana][kibana]. Elasticsearch is a search engine
that is responsible for storing our logs and allowing for them to be queried.
Fluentd sends log messages from Kubernetes to Elasticsearch, whereas Kibana
is a graphical interface for viewing and querying the logs stored in
Elasticsearch.
**Note:** this addon should **not** be used as-is in production. This is
an example and you should treat is as such. Please see at least the
[Security](#security) and the [Storage](#storage) sections for more
information.
## Elasticsearch
Elasticsearch is deployed as a
[StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/), which
is like a Deployment, but allows for maintaining state on storage volumes.
### Authentication
Elasticsearch has basic authentication enabled by default, in our configuration the credentials
are at their default values, i.e. username 'elastic' and password 'changeme'. In order to change
them, please read up on [the official documentation](https://www.elastic.co/guide/en/x-pack/current/setting-up-authentication.html#reset-built-in-user-passwords).
Elasticsearch is deployed as a [StatefulSet][statefulSet], which is like
a Deployment, but allows for maintaining state on storage volumes.
### Security
Elasticsearch has capabilities to enable authorization using
[X-Pack plugin][xPack]. See configuration parameter `xpack.security.enabled`
in Elasticsearch and Kibana configurations. It can also be set via
`XPACK_SECURITY_ENABLED` env variable. After enabling the feature,
follow [official documentation][setupCreds] to set up credentials in
Elasticsearch and Kibana. Don't forget to propagate those credentials also to
Fluentd in its [configuration][fluentdCreds], using for example
[environment variables][fluentdEnvVar]. You can utilize [ConfigMaps][configMap]
and [Secrets][secret] to store credentials in the Kubernetes apiserver.
### Initialization
The Elasticsearch Statefulset manifest specifies that there shall be an
[init container](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) executing
before Elasticsearch containers themselves, in order to ensure that the kernel state variable
`vm.max_map_count` is at least 262144, since this is a requirement of Elasticsearch.
You may remove the init container if you know that your host OS meets this requirement.
[init container][initContainer] executing before Elasticsearch containers
themselves, in order to ensure that the kernel state variable
`vm.max_map_count` is at least 262144, since this is a requirement of
Elasticsearch. You may remove the init container if you know that your host
OS meets this requirement.
### Storage
The Elasticsearch StatefulSet will claim a storage volume 'elasticsearch-logging',
of the standard
[StorageClass](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#storageclasses),
that by default will be 100 Gi per replica. Please adjust this to your needs (including
possibly choosing a more suitable StorageClass).
The Elasticsearch StatefulSet will use the [EmptyDir][emptyDir] volume to
store data. EmptyDir is erased when the pod terminates, here it is used only
for testing purposes. **Important:** please change the storage to persistent
volume claim before actually using this StatefulSet in your setup!
## Fluentd
Fluentd is deployed as a
[DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) which spawns a
pod on each node that reads logs, generated by kubelet, container runtime and containers and
sends them to Elasticsearch.
*Please note that for Fluentd to work, every Kubernetes node must be labeled*
`beta.kubernetes.io/fluentd-ds-ready=true`, as otherwise Fluentd will ignore them.
Fluentd is deployed as a [DaemonSet][daemonSet] which spawns a pod on each
node that reads logs, generated by kubelet, container runtime and containers
and sends them to Elasticsearch.
Learn more at: https://kubernetes.io/docs/tasks/debug-application-cluster/logging-elasticsearch-kibana
**Note:** in order for Fluentd to work, every Kubernetes node must be labeled
with `beta.kubernetes.io/fluentd-ds-ready=true`, as otherwise the Fluentd
DaemonSet will ignore them.
Learn more in the [official Kubernetes documentation][k8sElasticsearchDocs].
### Known problems
Since Fluentd talks to the Elasticsearch service inside the cluster, instances
on masters won't work, because masters have no kube-proxy. Don't mark masters
with a label mentioned in the previous paragraph or add a taint on them to
avoid Fluentd pods scheduling there.
[fluentd]: http://www.fluentd.org/
[elasticsearch]: https://www.elastic.co/products/elasticsearch
[kibana]: https://www.elastic.co/products/kibana
[xPack]: https://www.elastic.co/products/x-pack
[setupCreds]: https://www.elastic.co/guide/en/x-pack/current/setting-up-authentication.html#reset-built-in-user-passwords
[fluentdCreds]: https://github.com/uken/fluent-plugin-elasticsearch#user-password-path-scheme-ssl_verify
[fluentdEnvVar]: https://docs.fluentd.org/v0.12/articles/faq#how-can-i-use-environment-variables-to-configure-parameters-dynamically
[configMap]: https://kubernetes.io/docs/tasks/configure-pod-container/configmap/
[secret]: https://kubernetes.io/docs/concepts/configuration/secret/
[statefulSet]: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset
[initContainer]: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
[emptyDir]: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
[daemonSet]: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
[k8sElasticsearchDocs]: https://kubernetes.io/docs/tasks/debug-application-cluster/logging-elasticsearch-kibana
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/fluentd-elasticsearch/README.md?pixel)]()

View File

@@ -1,7 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: environment
namespace: kube-system
data:
elasticsearch-user: elastic

View File

@@ -1,8 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: environment
namespace: kube-system
type: Opaque
data:
elasticsearch-password: Y2hhbmdlbWU=

View File

@@ -1,17 +0,0 @@
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: elasticsearch-logging
labels:
k8s-app: elasticsearch-logging
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- ""
resources:
- "services"
- "namespaces"
- "endpoints"
verbs:
- "get"

View File

@@ -1,18 +0,0 @@
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
namespace: kube-system
name: elasticsearch-logging
labels:
k8s-app: elasticsearch-logging
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
subjects:
- kind: ServiceAccount
name: elasticsearch-logging
namespace: kube-system
apiGroup: ""
roleRef:
kind: ClusterRole
name: elasticsearch-logging
apiGroup: ""

View File

@@ -0,0 +1,43 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
)
go_binary(
name = "es-image",
library = ":go_default_library",
tags = ["automanaged"],
)
go_library(
name = "go_default_library",
srcs = ["elasticsearch_logging_discovery.go"],
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/client/clientset_generated/internalclientset:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
"//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@@ -14,22 +14,12 @@
FROM docker.elastic.co/elasticsearch/elasticsearch:5.5.1
USER root
RUN mkdir /data
RUN chown -R elasticsearch:elasticsearch /data
WORKDIR /usr/share/elasticsearch
VOLUME ["/data"]
EXPOSE 9200 9300
USER elasticsearch
COPY elasticsearch_logging_discovery bin/
COPY config/elasticsearch.yml config/
COPY config/log4j2.properties config/
COPY run.sh bin/
COPY elasticsearch_logging_discovery run.sh bin/
COPY config/elasticsearch.yml config/log4j2.properties config/
USER root
RUN chown -R elasticsearch:elasticsearch config
RUN chown -R elasticsearch:elasticsearch ./
CMD ["bin/run.sh"]

View File

@@ -12,19 +12,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.
.PHONY: elasticsearch_logging_discovery build push
.PHONY: binary build push
# The current value of the tag to be used for building and
# pushing an image to gcr.io
TAG = v5.5.1
PREFIX = gcr.io/google-containers
IMAGE = elasticsearch
TAG = v5.5.1-1
build: elasticsearch_logging_discovery
docker build --pull -t gcr.io/google_containers/elasticsearch:$(TAG) .
build:
docker build --pull -t $(PREFIX)/$(IMAGE):$(TAG) .
push:
gcloud docker -- push gcr.io/google_containers/elasticsearch:$(TAG)
gcloud docker -- push $(PREFIX)/$(IMAGE):$(TAG)
elasticsearch_logging_discovery:
binary:
CGO_ENABLED=0 GOOS=linux go build -a -ldflags "-w" elasticsearch_logging_discovery.go
clean:

View File

@@ -12,3 +12,6 @@ path.data: /data
network.host: 0.0.0.0
discovery.zen.minimum_master_nodes: ${MINIMUM_MASTER_NODES}
xpack.security.enabled: false
xpack.monitoring.enabled: false

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: elasticsearch-logging
namespace: kube-system
labels:
k8s-app: elasticsearch-logging
version: v1
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile

View File

@@ -1,11 +1,60 @@
apiVersion: apps/v1beta1
kind: StatefulSet
# RBAC authn and authz
apiVersion: v1
kind: ServiceAccount
metadata:
name: elasticsearch-logging-v1
name: elasticsearch-logging
namespace: kube-system
labels:
k8s-app: elasticsearch-logging
version: v1
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: elasticsearch-logging
labels:
k8s-app: elasticsearch-logging
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- ""
resources:
- "services"
- "namespaces"
- "endpoints"
verbs:
- "get"
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
namespace: kube-system
name: elasticsearch-logging
labels:
k8s-app: elasticsearch-logging
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
subjects:
- kind: ServiceAccount
name: elasticsearch-logging
namespace: kube-system
apiGroup: ""
roleRef:
kind: ClusterRole
name: elasticsearch-logging
apiGroup: ""
---
# Elasticsearch deployment itself
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: elasticsearch-logging
namespace: kube-system
labels:
k8s-app: elasticsearch-logging
version: v5.5.1
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
@@ -14,17 +63,17 @@ spec:
selector:
matchLabels:
k8s-app: elasticsearch-logging
version: v1
version: v5.5.1
template:
metadata:
labels:
k8s-app: elasticsearch-logging
version: v1
version: v5.5.1
kubernetes.io/cluster-service: "true"
spec:
serviceAccountName: elasticsearch-logging
containers:
- image: gcr.io/google_containers/elasticsearch:v5.5.1
- image: gcr.io/google-containers/elasticsearch:v5.5.1-1
name: elasticsearch-logging
resources:
# need more cpu upon initialization, therefore burstable class
@@ -47,17 +96,15 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumes:
- name: elasticsearch-logging
emptyDir: {}
# Elasticsearch requires vm.max_map_count to be at least 262144.
# If your OS already sets up this number to a higher value, feel free
# to remove this init container.
initContainers:
- image: alpine:3.6
command: ["/sbin/sysctl", "-w", "vm.max_map_count=262144"]
name: elasticsearch-logging-init
securityContext:
privileged: true
volumeClaimTemplates:
- metadata:
name: elasticsearch-logging
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 100Gi

View File

@@ -1,18 +0,0 @@
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: fluentd-es
labels:
k8s-app: fluentd-es
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- ""
resources:
- "namespaces"
- "pods"
verbs:
- "get"
- "watch"
- "list"

View File

@@ -1,17 +0,0 @@
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: fluentd-es
labels:
k8s-app: fluentd-es
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
subjects:
- kind: ServiceAccount
name: fluentd-es
namespace: kube-system
apiGroup: ""
roleRef:
kind: ClusterRole
name: fluentd-es
apiGroup: ""

View File

@@ -0,0 +1,362 @@
kind: ConfigMap
apiVersion: v1
data:
containers.input.conf: |-
# This configuration file for Fluentd / td-agent is used
# to watch changes to Docker log files. The kubelet creates symlinks that
# capture the pod name, namespace, container name & Docker container ID
# to the docker logs for pods in the /var/log/containers directory on the host.
# If running this fluentd configuration in a Docker container, the /var/log
# directory should be mounted in the container.
#
# These logs are then submitted to Elasticsearch which assumes the
# installation of the fluent-plugin-elasticsearch & the
# fluent-plugin-kubernetes_metadata_filter plugins.
# See https://github.com/uken/fluent-plugin-elasticsearch &
# https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter for
# more information about the plugins.
#
# Example
# =======
# A line in the Docker log file might look like this JSON:
#
# {"log":"2014/09/25 21:15:03 Got request with path wombat\n",
# "stream":"stderr",
# "time":"2014-09-25T21:15:03.499185026Z"}
#
# The time_format specification below makes sure we properly
# parse the time format produced by Docker. This will be
# submitted to Elasticsearch and should appear like:
# $ curl 'http://elasticsearch-logging:9200/_search?pretty'
# ...
# {
# "_index" : "logstash-2014.09.25",
# "_type" : "fluentd",
# "_id" : "VBrbor2QTuGpsQyTCdfzqA",
# "_score" : 1.0,
# "_source":{"log":"2014/09/25 22:45:50 Got request with path wombat\n",
# "stream":"stderr","tag":"docker.container.all",
# "@timestamp":"2014-09-25T22:45:50+00:00"}
# },
# ...
#
# The Kubernetes fluentd plugin is used to write the Kubernetes metadata to the log
# record & add labels to the log record if properly configured. This enables users
# to filter & search logs on any metadata.
# For example a Docker container's logs might be in the directory:
#
# /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b
#
# and in the file:
#
# 997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log
#
# where 997599971ee6... is the Docker ID of the running container.
# The Kubernetes kubelet makes a symbolic link to this file on the host machine
# in the /var/log/containers directory which includes the pod name and the Kubernetes
# container name:
#
# synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
# ->
# /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log
#
# The /var/log directory on the host is mapped to the /var/log directory in the container
# running this instance of Fluentd and we end up collecting the file:
#
# /var/log/containers/synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
#
# This results in the tag:
#
# var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
#
# The Kubernetes fluentd plugin is used to extract the namespace, pod name & container name
# which are added to the log message as a kubernetes field object & the Docker container ID
# is also added under the docker field object.
# The final tag is:
#
# kubernetes.var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
#
# And the final log record look like:
#
# {
# "log":"2014/09/25 21:15:03 Got request with path wombat\n",
# "stream":"stderr",
# "time":"2014-09-25T21:15:03.499185026Z",
# "kubernetes": {
# "namespace": "default",
# "pod_name": "synthetic-logger-0.25lps-pod",
# "container_name": "synth-lgr"
# },
# "docker": {
# "container_id": "997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b"
# }
# }
#
# This makes it easier for users to search for logs by pod name or by
# the name of the Kubernetes container regardless of how many times the
# Kubernetes pod has been restarted (resulting in a several Docker container IDs).
# Example:
# {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"}
<source>
type tail
path /var/log/containers/*.log
pos_file /var/log/es-containers.log.pos
time_format %Y-%m-%dT%H:%M:%S.%NZ
tag kubernetes.*
format json
read_from_head true
</source>
system.input.conf: |-
# Example:
# 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081
<source>
type tail
format /^(?<time>[^ ]* [^ ,]*)[^\[]*\[[^\]]*\]\[(?<severity>[^ \]]*) *\] (?<message>.*)$/
time_format %Y-%m-%d %H:%M:%S
path /var/log/salt/minion
pos_file /var/log/es-salt.pos
tag salt
</source>
# Example:
# Dec 21 23:17:22 gke-foo-1-1-4b5cbd14-node-4eoj startupscript: Finished running startup script /var/run/google.startup.script
<source>
type tail
format syslog
path /var/log/startupscript.log
pos_file /var/log/es-startupscript.log.pos
tag startupscript
</source>
# Examples:
# time="2016-02-04T06:51:03.053580605Z" level=info msg="GET /containers/json"
# time="2016-02-04T07:53:57.505612354Z" level=error msg="HTTP Error" err="No such image: -f" statusCode=404
<source>
type tail
format /^time="(?<time>[^)]*)" level=(?<severity>[^ ]*) msg="(?<message>[^"]*)"( err="(?<error>[^"]*)")?( statusCode=($<status_code>\d+))?/
path /var/log/docker.log
pos_file /var/log/es-docker.log.pos
tag docker
</source>
# Example:
# 2016/02/04 06:52:38 filePurge: successfully removed file /var/etcd/data/member/wal/00000000000006d0-00000000010a23d1.wal
<source>
type tail
# Not parsing this, because it doesn't have anything particularly useful to
# parse out of it (like severities).
format none
path /var/log/etcd.log
pos_file /var/log/es-etcd.log.pos
tag etcd
</source>
# Multi-line parsing is required for all the kube logs because very large log
# statements, such as those that include entire object bodies, get split into
# multiple lines by glog.
# Example:
# I0204 07:32:30.020537 3368 server.go:1048] POST /stats/container/: (13.972191ms) 200 [[Go-http-client/1.1] 10.244.1.3:40537]
<source>
type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/kubelet.log
pos_file /var/log/es-kubelet.log.pos
tag kubelet
</source>
# Example:
# I1118 21:26:53.975789 6 proxier.go:1096] Port "nodePort for kube-system/default-http-backend:http" (:31429/tcp) was open before and is still needed
<source>
type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/kube-proxy.log
pos_file /var/log/es-kube-proxy.log.pos
tag kube-proxy
</source>
# Example:
# I0204 07:00:19.604280 5 handlers.go:131] GET /api/v1/nodes: (1.624207ms) 200 [[kube-controller-manager/v1.1.3 (linux/amd64) kubernetes/6a81b50] 127.0.0.1:38266]
<source>
type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/kube-apiserver.log
pos_file /var/log/es-kube-apiserver.log.pos
tag kube-apiserver
</source>
# Example:
# I0204 06:55:31.872680 5 servicecontroller.go:277] LB already exists and doesn't need update for service kube-system/kube-ui
<source>
type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/kube-controller-manager.log
pos_file /var/log/es-kube-controller-manager.log.pos
tag kube-controller-manager
</source>
# Example:
# W0204 06:49:18.239674 7 reflector.go:245] pkg/scheduler/factory/factory.go:193: watch of *api.Service ended with: 401: The event in requested index is outdated and cleared (the requested history has been cleared [2578313/2577886]) [2579312]
<source>
type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/kube-scheduler.log
pos_file /var/log/es-kube-scheduler.log.pos
tag kube-scheduler
</source>
# Example:
# I1104 10:36:20.242766 5 rescheduler.go:73] Running Rescheduler
<source>
type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/rescheduler.log
pos_file /var/log/es-rescheduler.log.pos
tag rescheduler
</source>
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
<source>
type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/glbc.log
pos_file /var/log/es-glbc.log.pos
tag glbc
</source>
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
<source>
type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/cluster-autoscaler.log
pos_file /var/log/es-cluster-autoscaler.log.pos
tag cluster-autoscaler
</source>
# Logs from systemd-journal for interesting services.
<source>
type systemd
filters [{ "_SYSTEMD_UNIT": "docker.service" }]
pos_file /var/log/gcp-journald-docker.pos
read_from_head true
tag docker
</source>
<source>
type systemd
filters [{ "_SYSTEMD_UNIT": "kubelet.service" }]
pos_file /var/log/gcp-journald-kubelet.pos
read_from_head true
tag kubelet
</source>
<source>
type systemd
filters [{ "_SYSTEMD_UNIT": "node-problem-detector.service" }]
pos_file /var/log/gcp-journald-node-problem-detector.pos
read_from_head true
tag node-problem-detector
</source>
forward.input.conf: |-
# Takes the messages sent over TCP
<source>
type forward
</source>
monitoring.conf: |-
# Prometheus Exporter Plugin
# input plugin that exports metrics
<source>
@type prometheus
</source>
<source>
@type monitor_agent
</source>
# input plugin that collects metrics from MonitorAgent
<source>
@type prometheus_monitor
<labels>
host ${hostname}
</labels>
</source>
# input plugin that collects metrics for output plugin
<source>
@type prometheus_output_monitor
<labels>
host ${hostname}
</labels>
</source>
# input plugin that collects metrics for in_tail plugin
<source>
@type prometheus_tail_monitor
<labels>
host ${hostname}
</labels>
</source>
output.conf: |-
# Enriches records with Kubernetes metadata
<filter kubernetes.**>
type kubernetes_metadata
</filter>
<match **>
type elasticsearch
log_level info
include_tag_key true
host elasticsearch-logging
port 9200
logstash_format true
# Set the chunk limits.
buffer_chunk_limit 2M
buffer_queue_limit 8
flush_interval 5s
# Never wait longer than 5 minutes between retries.
max_retry_wait 30
# Disable the limit on the number of retries (retry forever).
disable_retry_limit
# Use multiple threads for processing.
num_threads 2
</match>
metadata:
name: fluentd-es-config-v0.1.0
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile

View File

@@ -1,20 +1,67 @@
apiVersion: extensions/v1beta1
kind: DaemonSet
apiVersion: v1
kind: ServiceAccount
metadata:
name: fluentd-es-v1.24
name: fluentd-es
namespace: kube-system
labels:
k8s-app: fluentd-es
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
version: v1.24
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: fluentd-es
labels:
k8s-app: fluentd-es
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- ""
resources:
- "namespaces"
- "pods"
verbs:
- "get"
- "watch"
- "list"
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: fluentd-es
labels:
k8s-app: fluentd-es
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
subjects:
- kind: ServiceAccount
name: fluentd-es
namespace: kube-system
apiGroup: ""
roleRef:
kind: ClusterRole
name: fluentd-es
apiGroup: ""
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: fluentd-es
namespace: kube-system
labels:
k8s-app: fluentd-es
version: v2.0.0
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
template:
metadata:
labels:
k8s-app: fluentd-es
kubernetes.io/cluster-service: "true"
version: v1.24
version: v2.0.0
# This annotation ensures that fluentd does not get evicted if the node
# supports critical pod annotation based priority scheme.
# Note that this does not guarantee admission on the nodes (#40573).
@@ -24,27 +71,13 @@ spec:
serviceAccountName: fluentd-es
containers:
- name: fluentd-es
image: gcr.io/google_containers/fluentd-elasticsearch:1.24
command:
- '/bin/sh'
- '-c'
- '/usr/sbin/td-agent $FLUENTD_ARGS'
image: gcr.io/google-containers/fluentd-elasticsearch:v2.0.0
env:
- name: FLUENTD_ARGS
value: -q
- name: FLUENT_ELASTICSEARCH_USER
valueFrom:
configMapKeyRef:
name: environment
key: elasticsearch-user
- name: FLUENT_ELASTICSEARCH_PASSWORD
valueFrom:
secretKeyRef:
name: environment
key: elasticsearch-password
value: --no-supervisor -q
resources:
limits:
memory: 200Mi
memory: 500Mi
requests:
cpu: 100m
memory: 200Mi
@@ -54,6 +87,11 @@ spec:
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
- name: libsystemddir
mountPath: /host/lib
readOnly: true
- name: config-volume
mountPath: /etc/fluent/config.d
nodeSelector:
beta.kubernetes.io/fluentd-ds-ready: "true"
terminationGracePeriodSeconds: 30
@@ -64,3 +102,10 @@ spec:
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
# It is needed to copy systemd library to decompress journals
- name: libsystemddir
hostPath:
path: /usr/lib64
- name: config-volume
configMap:
name: fluentd-es-config-v0.1.0

View File

@@ -15,28 +15,45 @@
# This Dockerfile will build an image that is configured
# to run Fluentd with an Elasticsearch plug-in and the
# provided configuration file.
# TODO(a-robinson): Use a lighter base image, e.g. some form of busybox.
# The image acts as an executable for the binary /usr/sbin/td-agent.
# Note that fluentd is run with root permssion to allow access to
# log files with root only access under /var/log/containers/*
# Please see http://docs.fluentd.org/articles/install-by-deb for more
# information about installing fluentd using deb package.
FROM gcr.io/google_containers/ubuntu-slim:0.6
FROM gcr.io/google-containers/debian-base-amd64:0.1
# Ensure there are enough file descriptors for running Fluentd.
RUN ulimit -n 65536
COPY Gemfile /Gemfile
# Disable prompts from apt.
ENV DEBIAN_FRONTEND noninteractive
# 1. Install & configure dependencies.
# 2. Install fluentd via ruby.
# 3. Remove build dependencies.
# 4. Cleanup leftover caches & files.
RUN BUILD_DEPS="make gcc g++ libc6-dev ruby-dev" \
&& clean-install $BUILD_DEPS \
ca-certificates \
libjemalloc1 \
liblz4-1 \
ruby \
&& echo 'gem: --no-document' >> /etc/gemrc \
&& gem install --file Gemfile \
&& apt-get purge -y --auto-remove \
-o APT::AutoRemove::RecommendsImportant=false \
$BUILD_DEPS \
&& rm -rf /tmp/* \
/var/lib/apt/lists/* \
/usr/lib/ruby/gems/*/cache/*.gem \
/var/log/* \
/var/tmp/* \
# Ensure fluent has enough file descriptors
&& ulimit -n 65536
# Copy the Fluentd configuration file.
COPY td-agent.conf /etc/td-agent/td-agent.conf
# Copy the Fluentd configuration file for logging Docker container logs.
COPY fluent.conf /etc/fluent/fluent.conf
COPY run.sh /run.sh
COPY build.sh /tmp/build.sh
RUN /tmp/build.sh
# Expose prometheus metrics.
EXPOSE 80
ENV LD_PRELOAD /opt/td-agent/embedded/lib/libjemalloc.so
ENV LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1
# Run the Fluentd service.
ENTRYPOINT ["td-agent"]
# Start Fluentd to pick up our config that watches Docker container logs.
CMD /run.sh $FLUENTD_ARGS

View File

@@ -0,0 +1,9 @@
source 'https://rubygems.org'
gem 'fluentd', '~>0.12.32'
gem 'activesupport', '~>4.2.6'
gem 'fluent-plugin-kubernetes_metadata_filter', '~>0.27.0'
gem 'fluent-plugin-elasticsearch', '~>1.9.5'
gem 'fluent-plugin-systemd', '~>0.0.8'
gem 'fluent-plugin-prometheus', '~>0.3.0'
gem 'oj', '~>2.18.1'

View File

@@ -14,12 +14,12 @@
.PHONY: build push
PREFIX = gcr.io/google_containers
PREFIX = gcr.io/google-containers
IMAGE = fluentd-elasticsearch
TAG = 1.24
TAG = v2.0.0
build:
docker build --pull -t $(PREFIX)/$(IMAGE):$(TAG) .
push:
gcloud docker --server=gcr.io -- push $(PREFIX)/$(IMAGE):$(TAG)
gcloud docker -- push $(PREFIX)/$(IMAGE):$(TAG)

View File

@@ -1,10 +1,14 @@
# Collecting Docker Log Files with Fluentd and Elasticsearch
This directory contains the source files needed to make a Docker image
that collects Docker container log files using [Fluentd](http://www.fluentd.org/)
and sends them to an instance of [Elasticsearch](http://www.elasticsearch.org/).
This image is designed to be used as part of the [Kubernetes](https://github.com/kubernetes/kubernetes)
cluster bring up process. The image resides at DockerHub under the name
[kubernetes/fluentd-elasticsearch](https://registry.hub.docker.com/u/kubernetes/fluentd-elasticsearch/).
that collects Docker container log files using [Fluentd][fluentd]
and sends them to an instance of [Elasticsearch][elasticsearch].
This image is designed to be used as part of the [Kubernetes][kubernetes]
cluster bring up process. The image resides at GCR under the name
[gcr.io/google-containers/fluentd-elasticsearch][image].
[fluentd]: http://www.fluentd.org/
[elasticsearch]: https://www.elastic.co/products/elasticsearch
[kubernetes]: https://kubernetes.io
[image]: https://gcr.io/google-containers/fluentd-elasticsearch
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/fluentd-elasticsearch/fluentd-es-image/README.md?pixel)]()

View File

@@ -1,48 +0,0 @@
#!/bin/sh
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Install prerequisites.
apt-get update
apt-get install -y -q --no-install-recommends \
curl ca-certificates make g++ sudo bash
# Install Fluentd.
/usr/bin/curl -sSL https://toolbelt.treasuredata.com/sh/install-ubuntu-xenial-td-agent2.sh | sh
# Change the default user and group to root.
# Needed to allow access to /var/log/docker/... files.
sed -i -e "s/USER=td-agent/USER=root/" -e "s/GROUP=td-agent/GROUP=root/" /etc/init.d/td-agent
# Install the Elasticsearch Fluentd plug-in.
# http://docs.fluentd.org/articles/plugin-management
td-agent-gem install --no-document fluent-plugin-kubernetes_metadata_filter -v 0.27.0
td-agent-gem install --no-document fluent-plugin-elasticsearch -v 1.9.5
td-agent-gem install --no-document fluent-plugin-prometheus -v 0.3.0
# Remove docs and postgres references
rm -rf /opt/td-agent/embedded/share/doc \
/opt/td-agent/embedded/share/gtk-doc \
/opt/td-agent/embedded/lib/postgresql \
/opt/td-agent/embedded/bin/postgres \
/opt/td-agent/embedded/share/postgresql
apt-get remove -y make g++
apt-get autoremove -y
apt-get clean -y
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*

View File

@@ -0,0 +1,8 @@
# This is the root config file, which only includes components of the actual configuration
# Do not collect fluentd's own logs to avoid infinite loops.
<match fluent.**>
type null
</match>
@include /etc/fluent/config.d/*.conf

View File

@@ -0,0 +1,29 @@
#!/bin/sh
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# These steps must be executed once the host /var and /lib volumes have
# been mounted, and therefore cannot be done in the docker build stage.
# For systems without journald
mkdir -p /var/log/journal
# Copy host libsystemd into image to avoid compatibility issues.
if [ ! -z "$(ls /host/lib/libsystemd* 2>/dev/null)" ]; then
rm /lib/x86_64-linux-gnu/libsystemd*
cp -a /host/lib/libsystemd* /lib/x86_64-linux-gnu/
fi
/usr/local/bin/fluentd $@

View File

@@ -1,344 +0,0 @@
# This configuration file for Fluentd / td-agent is used
# to watch changes to Docker log files. The kubelet creates symlinks that
# capture the pod name, namespace, container name & Docker container ID
# to the docker logs for pods in the /var/log/containers directory on the host.
# If running this fluentd configuration in a Docker container, the /var/log
# directory should be mounted in the container.
#
# These logs are then submitted to Elasticsearch which assumes the
# installation of the fluent-plugin-elasticsearch & the
# fluent-plugin-kubernetes_metadata_filter plugins.
# See https://github.com/uken/fluent-plugin-elasticsearch &
# https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter for
# more information about the plugins.
# Maintainer: Jimmi Dyson <jimmidyson@gmail.com>
#
# Example
# =======
# A line in the Docker log file might look like this JSON:
#
# {"log":"2014/09/25 21:15:03 Got request with path wombat\n",
# "stream":"stderr",
# "time":"2014-09-25T21:15:03.499185026Z"}
#
# The time_format specification below makes sure we properly
# parse the time format produced by Docker. This will be
# submitted to Elasticsearch and should appear like:
# $ curl 'http://elasticsearch-logging:9200/_search?pretty'
# ...
# {
# "_index" : "logstash-2014.09.25",
# "_type" : "fluentd",
# "_id" : "VBrbor2QTuGpsQyTCdfzqA",
# "_score" : 1.0,
# "_source":{"log":"2014/09/25 22:45:50 Got request with path wombat\n",
# "stream":"stderr","tag":"docker.container.all",
# "@timestamp":"2014-09-25T22:45:50+00:00"}
# },
# ...
#
# The Kubernetes fluentd plugin is used to write the Kubernetes metadata to the log
# record & add labels to the log record if properly configured. This enables users
# to filter & search logs on any metadata.
# For example a Docker container's logs might be in the directory:
#
# /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b
#
# and in the file:
#
# 997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log
#
# where 997599971ee6... is the Docker ID of the running container.
# The Kubernetes kubelet makes a symbolic link to this file on the host machine
# in the /var/log/containers directory which includes the pod name and the Kubernetes
# container name:
#
# synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
# ->
# /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log
#
# The /var/log directory on the host is mapped to the /var/log directory in the container
# running this instance of Fluentd and we end up collecting the file:
#
# /var/log/containers/synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
#
# This results in the tag:
#
# var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
#
# The Kubernetes fluentd plugin is used to extract the namespace, pod name & container name
# which are added to the log message as a kubernetes field object & the Docker container ID
# is also added under the docker field object.
# The final tag is:
#
# kubernetes.var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
#
# And the final log record look like:
#
# {
# "log":"2014/09/25 21:15:03 Got request with path wombat\n",
# "stream":"stderr",
# "time":"2014-09-25T21:15:03.499185026Z",
# "kubernetes": {
# "namespace": "default",
# "pod_name": "synthetic-logger-0.25lps-pod",
# "container_name": "synth-lgr"
# },
# "docker": {
# "container_id": "997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b"
# }
# }
#
# This makes it easier for users to search for logs by pod name or by
# the name of the Kubernetes container regardless of how many times the
# Kubernetes pod has been restarted (resulting in a several Docker container IDs).
#
# TODO: Propagate the labels associated with a container along with its logs
# so users can query logs using labels as well as or instead of the pod name
# and container name. This is simply done via configuration of the Kubernetes
# fluentd plugin but requires secrets to be enabled in the fluent pod. This is a
# problem yet to be solved as secrets are not usable in static pods which the fluentd
# pod must be until a per-node controller is available in Kubernetes.
# Prevent fluentd from handling records containing its own logs. Otherwise
# it can lead to an infinite loop, when error in sending one message generates
# another message which also fails to be sent and so on.
<match fluent.**>
type null
</match>
# Example:
# {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"}
<source>
type tail
path /var/log/containers/*.log
pos_file /var/log/es-containers.log.pos
time_format %Y-%m-%dT%H:%M:%S.%NZ
tag kubernetes.*
format json
read_from_head true
</source>
# Example:
# 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081
<source>
type tail
format /^(?<time>[^ ]* [^ ,]*)[^\[]*\[[^\]]*\]\[(?<severity>[^ \]]*) *\] (?<message>.*)$/
time_format %Y-%m-%d %H:%M:%S
path /var/log/salt/minion
pos_file /var/log/es-salt.pos
tag salt
</source>
# Example:
# Dec 21 23:17:22 gke-foo-1-1-4b5cbd14-node-4eoj startupscript: Finished running startup script /var/run/google.startup.script
<source>
type tail
format syslog
path /var/log/startupscript.log
pos_file /var/log/es-startupscript.log.pos
tag startupscript
</source>
# Examples:
# time="2016-02-04T06:51:03.053580605Z" level=info msg="GET /containers/json"
# time="2016-02-04T07:53:57.505612354Z" level=error msg="HTTP Error" err="No such image: -f" statusCode=404
<source>
type tail
format /^time="(?<time>[^)]*)" level=(?<severity>[^ ]*) msg="(?<message>[^"]*)"( err="(?<error>[^"]*)")?( statusCode=($<status_code>\d+))?/
path /var/log/docker.log
pos_file /var/log/es-docker.log.pos
tag docker
</source>
# Example:
# 2016/02/04 06:52:38 filePurge: successfully removed file /var/etcd/data/member/wal/00000000000006d0-00000000010a23d1.wal
<source>
type tail
# Not parsing this, because it doesn't have anything particularly useful to
# parse out of it (like severities).
format none
path /var/log/etcd.log
pos_file /var/log/es-etcd.log.pos
tag etcd
</source>
# Multi-line parsing is required for all the kube logs because very large log
# statements, such as those that include entire object bodies, get split into
# multiple lines by glog.
# Example:
# I0204 07:32:30.020537 3368 server.go:1048] POST /stats/container/: (13.972191ms) 200 [[Go-http-client/1.1] 10.244.1.3:40537]
<source>
type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/kubelet.log
pos_file /var/log/es-kubelet.log.pos
tag kubelet
</source>
# Example:
# I1118 21:26:53.975789 6 proxier.go:1096] Port "nodePort for kube-system/default-http-backend:http" (:31429/tcp) was open before and is still needed
<source>
type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/kube-proxy.log
pos_file /var/log/es-kube-proxy.log.pos
tag kube-proxy
</source>
# Example:
# I0204 07:00:19.604280 5 handlers.go:131] GET /api/v1/nodes: (1.624207ms) 200 [[kube-controller-manager/v1.1.3 (linux/amd64) kubernetes/6a81b50] 127.0.0.1:38266]
<source>
type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/kube-apiserver.log
pos_file /var/log/es-kube-apiserver.log.pos
tag kube-apiserver
</source>
# Example:
# I0204 06:55:31.872680 5 servicecontroller.go:277] LB already exists and doesn't need update for service kube-system/kube-ui
<source>
type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/kube-controller-manager.log
pos_file /var/log/es-kube-controller-manager.log.pos
tag kube-controller-manager
</source>
# Example:
# W0204 06:49:18.239674 7 reflector.go:245] pkg/scheduler/factory/factory.go:193: watch of *api.Service ended with: 401: The event in requested index is outdated and cleared (the requested history has been cleared [2578313/2577886]) [2579312]
<source>
type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/kube-scheduler.log
pos_file /var/log/es-kube-scheduler.log.pos
tag kube-scheduler
</source>
# Example:
# I1104 10:36:20.242766 5 rescheduler.go:73] Running Rescheduler
<source>
type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/rescheduler.log
pos_file /var/log/es-rescheduler.log.pos
tag rescheduler
</source>
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
<source>
type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/glbc.log
pos_file /var/log/es-glbc.log.pos
tag glbc
</source>
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
<source>
type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/cluster-autoscaler.log
pos_file /var/log/es-cluster-autoscaler.log.pos
tag cluster-autoscaler
</source>
<filter kubernetes.**>
type kubernetes_metadata
</filter>
# Prometheus Exporter Plugin
# input plugin that exports metrics
<source>
type prometheus
</source>
<source>
type monitor_agent
</source>
<source>
type forward
</source>
# input plugin that collects metrics from MonitorAgent
<source>
@type prometheus_monitor
<labels>
host ${hostname}
</labels>
</source>
# input plugin that collects metrics for output plugin
<source>
@type prometheus_output_monitor
<labels>
host ${hostname}
</labels>
</source>
# input plugin that collects metrics for in_tail plugin
<source>
@type prometheus_tail_monitor
<labels>
host ${hostname}
</labels>
</source>
<match **>
type elasticsearch
user "#{ENV['FLUENT_ELASTICSEARCH_USER']}"
password "#{ENV['FLUENT_ELASTICSEARCH_PASSWORD']}"
log_level info
include_tag_key true
host elasticsearch-logging
port 9200
logstash_format true
# Set the chunk limit the same as for fluentd-gcp.
buffer_chunk_limit 2M
# Cap buffer memory usage to 2MiB/chunk * 32 chunks = 64 MiB
buffer_queue_limit 32
flush_interval 5s
# Never wait longer than 5 minutes between retries.
max_retry_wait 30
# Disable the limit on the number of retries (retry forever).
disable_retry_limit
# Use multiple threads for processing.
num_threads 8
</match>

View File

@@ -1,9 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: fluentd-es
namespace: kube-system
labels:
k8s-app: fluentd-es
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile

View File

@@ -27,10 +27,14 @@ spec:
requests:
cpu: 100m
env:
- name: "ELASTICSEARCH_URL"
value: "http://elasticsearch-logging:9200"
- name: "SERVER_BASEPATH"
value: "/api/v1/proxy/namespaces/kube-system/services/kibana-logging"
- name: ELASTICSEARCH_URL
value: http://elasticsearch-logging:9200
- name: SERVER_BASEPATH
value: /api/v1/proxy/namespaces/kube-system/services/kibana-logging
- name: XPACK_MONITORING_ENABLED
value: "false"
- name: XPACK_SECURITY_ENABLED
value: "false"
ports:
- containerPort: 5601
name: ui

View File

@@ -28,15 +28,6 @@ spec:
containers:
- name: fluentd-gcp
image: gcr.io/google-containers/fluentd-gcp:2.0.8
# If fluentd consumes its own logs, the following situation may happen:
# fluentd fails to send a chunk to the server => writes it to the log =>
# tries to send this message to the server => fails to send a chunk and so on.
# Writing to a file, which is not exported to the back-end prevents it.
# It also allows to increase the fluentd verbosity by default.
command:
- '/bin/sh'
- '-c'
- '/run.sh $FLUENTD_ARGS'
env:
- name: FLUENTD_ARGS
value: --no-supervisor -q

View File

@@ -9,7 +9,7 @@ pkg_tar(
"container-linux/configure-helper.sh",
"gci/configure-helper.sh",
"gci/health-monitor.sh",
"gci/mounter/mounter",
"//cluster/gce/gci/mounter",
],
mode = "0755",
strip_prefix = ".",
@@ -32,7 +32,10 @@ filegroup(
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
srcs = [
":package-srcs",
"//cluster/gce/gci/mounter:all-srcs",
],
tags = ["automanaged"],
)

View File

@@ -285,3 +285,10 @@ fi
if [[ -n "${LOGROTATE_MAX_SIZE:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} LOGROTATE_MAX_SIZE"
fi
# Fluentd requirements
FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-300Mi}"
FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-100m}"
FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-200Mi}"
# Adding to PROVIDER_VARS, since this is GCP-specific.
PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST"

View File

@@ -152,6 +152,16 @@ CONTROLLER_MANAGER_TEST_LOG_LEVEL="${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-$TEST_CL
SCHEDULER_TEST_LOG_LEVEL="${SCHEDULER_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}"
KUBEPROXY_TEST_LOG_LEVEL="${KUBEPROXY_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}"
# TODO: change this and flex e2e test when default flex volume install path is changed for GCI
# Set flex dir to one that's readable from controller-manager container and writable by the flex e2e test.
if [[ "${MASTER_OS_DISTRIBUTION}" == "gci" ]]; then
CONTROLLER_MANAGER_TEST_VOLUME_PLUGIN_DIR="--flex-volume-plugin-dir=/etc/srv/kubernetes/kubelet-plugins/volume/exec"
fi
# Set flex dir to one that's readable from kubelet and writable by the flex e2e test.
if [[ "${NODE_OS_DISTRIBUTION}" == "gci" ]] || ([[ "${MASTER_OS_DISTRIBUTION}" == "gci" ]] && [[ "${REGISTER_MASTER_KUBELET}" == "false" ]]); then
KUBELET_TEST_VOLUME_PLUGIN_DIR="--volume-plugin-dir=/etc/srv/kubernetes/kubelet-plugins/volume/exec"
fi
TEST_CLUSTER_DELETE_COLLECTION_WORKERS="${TEST_CLUSTER_DELETE_COLLECTION_WORKERS:---delete-collection-workers=1}"
TEST_CLUSTER_MAX_REQUESTS_INFLIGHT="${TEST_CLUSTER_MAX_REQUESTS_INFLIGHT:-}"
TEST_CLUSTER_RESYNC_PERIOD="${TEST_CLUSTER_RESYNC_PERIOD:---min-resync-period=3m}"
@@ -159,7 +169,7 @@ TEST_CLUSTER_RESYNC_PERIOD="${TEST_CLUSTER_RESYNC_PERIOD:---min-resync-period=3m
# ContentType used by all components to communicate with apiserver.
TEST_CLUSTER_API_CONTENT_TYPE="${TEST_CLUSTER_API_CONTENT_TYPE:-}"
KUBELET_TEST_ARGS="${KUBELET_TEST_ARGS:-} --max-pods=110 --serialize-image-pulls=false ${TEST_CLUSTER_API_CONTENT_TYPE}"
KUBELET_TEST_ARGS="${KUBELET_TEST_ARGS:-} --max-pods=110 --serialize-image-pulls=false ${TEST_CLUSTER_API_CONTENT_TYPE} ${KUBELET_TEST_VOLUME_PLUGIN_DIR:-}"
if [[ "${NODE_OS_DISTRIBUTION}" == "gci" ]] || [[ "${NODE_OS_DISTRIBUTION}" == "ubuntu" ]]; then
NODE_KUBELET_TEST_ARGS=" --experimental-kernel-memcg-notification=true"
fi
@@ -167,7 +177,7 @@ if [[ "${MASTER_OS_DISTRIBUTION}" == "gci" ]] || [[ "${MASTER_OS_DISTRIBUTION}"
MASTER_KUBELET_TEST_ARGS=" --experimental-kernel-memcg-notification=true"
fi
APISERVER_TEST_ARGS="${APISERVER_TEST_ARGS:-} --runtime-config=extensions/v1beta1 ${TEST_CLUSTER_DELETE_COLLECTION_WORKERS} ${TEST_CLUSTER_MAX_REQUESTS_INFLIGHT}"
CONTROLLER_MANAGER_TEST_ARGS="${CONTROLLER_MANAGER_TEST_ARGS:-} ${TEST_CLUSTER_RESYNC_PERIOD} ${TEST_CLUSTER_API_CONTENT_TYPE}"
CONTROLLER_MANAGER_TEST_ARGS="${CONTROLLER_MANAGER_TEST_ARGS:-} ${TEST_CLUSTER_RESYNC_PERIOD} ${TEST_CLUSTER_API_CONTENT_TYPE} ${CONTROLLER_MANAGER_TEST_VOLUME_PLUGIN_DIR:-}"
SCHEDULER_TEST_ARGS="${SCHEDULER_TEST_ARGS:-} ${TEST_CLUSTER_API_CONTENT_TYPE}"
KUBEPROXY_TEST_ARGS="${KUBEPROXY_TEST_ARGS:-} ${TEST_CLUSTER_API_CONTENT_TYPE}"
@@ -334,3 +344,10 @@ fi
if [[ -n "${LOGROTATE_MAX_SIZE:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} LOGROTATE_MAX_SIZE"
fi
# Fluentd requirements
FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-300Mi}"
FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-100m}"
FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-200Mi}"
# Adding to PROVIDER_VARS, since this is GCP-specific.
PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST"

View File

@@ -1155,7 +1155,7 @@ function start-cluster-autoscaler {
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/cluster-autoscaler.manifest"
remove-salt-config-comments "${src_file}"
local params="${AUTOSCALER_MIG_CONFIG} ${CLOUD_CONFIG_OPT} ${AUTOSCALER_EXPANDER_CONFIG:-}"
local params="${AUTOSCALER_MIG_CONFIG} ${CLOUD_CONFIG_OPT} ${AUTOSCALER_EXPANDER_CONFIG:---expander=price}"
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}"
sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}"

View File

@@ -1558,7 +1558,7 @@ function start-cluster-autoscaler {
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/cluster-autoscaler.manifest"
remove-salt-config-comments "${src_file}"
local params="${AUTOSCALER_MIG_CONFIG} ${CLOUD_CONFIG_OPT} ${AUTOSCALER_EXPANDER_CONFIG:-}"
local params="${AUTOSCALER_MIG_CONFIG} ${CLOUD_CONFIG_OPT} ${AUTOSCALER_EXPANDER_CONFIG:---expander=price}"
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}"
sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}"
@@ -1596,6 +1596,26 @@ function setup-addon-manifests {
chmod 644 "${dst_dir}"/*
}
# Fluentd manifest is modified using kubectl, which may not be available at
# this point. Run this as a background process.
function wait-for-apiserver-and-update-fluentd {
until kubectl get nodes
do
sleep 10
done
kubectl set resources --dry-run --local -f ${fluentd_gcp_yaml} \
--limits=memory=${FLUENTD_GCP_MEMORY_LIMIT} \
--requests=cpu=${FLUENTD_GCP_CPU_REQUEST},memory=${FLUENTD_GCP_MEMORY_REQUEST} \
-o yaml > ${fluentd_gcp_yaml}.tmp
mv ${fluentd_gcp_yaml}.tmp ${fluentd_gcp_yaml}
}
# Trigger background process that will ultimately update fluentd resource
# requirements.
function start-fluentd-resource-update {
wait-for-apiserver-and-update-fluentd &
}
# Prepares the manifests of k8s addons, and starts the addon manager.
# Vars assumed:
# CLUSTER_NAME
@@ -1679,6 +1699,8 @@ function start-kube-addons {
if [[ "${ENABLE_NODE_LOGGING:-}" == "true" ]] && \
[[ "${LOGGING_DESTINATION:-}" == "gcp" ]]; then
setup-addon-manifests "addons" "fluentd-gcp"
local -r fluentd_gcp_yaml="${dst_dir}/fluentd-gcp/fluentd-gcp-ds.yaml"
start-fluentd-resource-update
fi
if [[ "${ENABLE_CLUSTER_UI:-}" == "true" ]]; then
setup-addon-manifests "addons" "dashboard"

View File

@@ -0,0 +1,34 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
)
go_binary(
name = "mounter",
library = ":go_default_library",
tags = ["automanaged"],
)
go_library(
name = "go_default_library",
srcs = ["mounter.go"],
tags = ["automanaged"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@@ -953,6 +953,7 @@ function delete-subnetworks() {
#
# Assumed vars:
# KUBE_TEMP: temporary directory
# NUM_NODES: #nodes in the cluster
#
# Args:
# $1: host name
@@ -1044,7 +1045,13 @@ function create-master() {
create-certs "${MASTER_RESERVED_IP}"
create-etcd-certs ${MASTER_NAME}
if [[ "${NUM_NODES}" -ge "50" ]]; then
# We block on master creation for large clusters to avoid doing too much
# unnecessary work in case master start-up fails (like creation of nodes).
create-master-instance "${MASTER_RESERVED_IP}"
else
create-master-instance "${MASTER_RESERVED_IP}" &
fi
}
# Adds master replica to etcd cluster.

View File

@@ -47,3 +47,10 @@ KUBE_DELETE_NETWORK=${KUBE_DELETE_NETWORK:-false}
# authentication) in metadata should be treated as canonical, and therefore disk
# copies ought to be recreated/clobbered.
METADATA_CLOBBERS_CONFIG=true
# Fluentd requirements
FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-300Mi}"
FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-100m}"
FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-200Mi}"
# Adding to PROVIDER_VARS, since this is GCP-specific.
PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST"

View File

@@ -0,0 +1,40 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
)
go_binary(
name = "etcd-version-monitor",
library = ":go_default_library",
tags = ["automanaged"],
)
go_library(
name = "go_default_library",
srcs = ["etcd-version-monitor.go"],
tags = ["automanaged"],
deps = [
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
"//vendor/github.com/prometheus/common/expfmt:go_default_library",
"//vendor/github.com/spf13/pflag:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@@ -0,0 +1,39 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
)
go_binary(
name = "attachlease",
library = ":go_default_library",
tags = ["automanaged"],
)
go_library(
name = "go_default_library",
srcs = ["attachlease.go"],
tags = ["automanaged"],
deps = [
"//vendor/github.com/coreos/etcd/clientv3:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/golang.org/x/net/context:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@@ -122,7 +122,6 @@ start_etcd() {
${ETCD_CMD} \
--name="etcd-$(hostname)" \
--debug \
--force-new-cluster \
--data-dir=${DATA_DIRECTORY} \
--listen-client-urls http://127.0.0.1:${ETCD_PORT} \
--advertise-client-urls http://127.0.0.1:${ETCD_PORT} \
@@ -154,7 +153,7 @@ ROLLBACK="${ROLLBACK:-/usr/local/bin/rollback}"
# If we are upgrading from 2.2.1 and this is the first try for upgrade,
# do the backup to allow restoring from it in case of failed upgrade.
BACKUP_DIR="${DATA_DIRECTORY}/migration-backup"
if [ "${CURRENT_VERSION}" = "2.2.1" -a ! "${CURRENT_VERSION}" != "${TARGET_VERSION}" -a -d "${BACKUP_DIR}" ]; then
if [ "${CURRENT_VERSION}" = "2.2.1" -a "${CURRENT_VERSION}" != "${TARGET_VERSION}" -a ! -d "${BACKUP_DIR}" ]; then
echo "Backup etcd before starting migration"
mkdir ${BACKUP_DIR}
ETCDCTL_CMD="/usr/local/bin/etcdctl-2.2.1"

View File

@@ -0,0 +1,51 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
)
go_binary(
name = "rollback",
library = ":go_default_library",
tags = ["automanaged"],
)
go_library(
name = "go_default_library",
srcs = ["rollback.go"],
tags = ["automanaged"],
deps = [
"//third_party/forked/etcd221/wal:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver/membership:go_default_library",
"//vendor/github.com/coreos/etcd/mvcc/backend:go_default_library",
"//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library",
"//vendor/github.com/coreos/etcd/pkg/pbutil:go_default_library",
"//vendor/github.com/coreos/etcd/pkg/types:go_default_library",
"//vendor/github.com/coreos/etcd/raft/raftpb:go_default_library",
"//vendor/github.com/coreos/etcd/snap:go_default_library",
"//vendor/github.com/coreos/etcd/store:go_default_library",
"//vendor/github.com/coreos/etcd/wal:go_default_library",
"//vendor/github.com/coreos/etcd/wal/walpb:go_default_library",
"//vendor/github.com/coreos/go-semver/semver:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@@ -173,7 +173,7 @@ function dump_masters() {
echo "Master SSH not supported for ${KUBERNETES_PROVIDER}"
return
else
if ! (detect-master &> /dev/null); then
if ! (detect-master); then
echo "Master not detected. Is the cluster up?"
return
fi

View File

@@ -1,19 +1,19 @@
apiVersion: v1
kind: Pod
metadata:
name: l7-lb-controller-v0.9.5
name: l7-lb-controller-v0.9.6
namespace: kube-system
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
labels:
k8s-app: gcp-lb-controller
version: v0.9.5
version: v0.9.6
kubernetes.io/name: "GLBC"
spec:
terminationGracePeriodSeconds: 600
hostNetwork: true
containers:
- image: gcr.io/google_containers/glbc:0.9.5
- image: gcr.io/google_containers/glbc:0.9.6
livenessProbe:
httpGet:
path: /healthz

View File

@@ -106,7 +106,7 @@ func StartTestServer(t *testing.T) (result *restclient.Config, tearDownForCaller
default:
}
result := client.CoreV1Client.RESTClient().Get().AbsPath("/healthz").Do()
result := client.CoreV1().RESTClient().Get().AbsPath("/healthz").Do()
status := 0
result.StatusCode(&status)
if status == 200 {

View File

@@ -163,7 +163,12 @@ func Run(s *options.CMServer) error {
ClientConfig: kubeconfig,
}
var clientBuilder controller.ControllerClientBuilder
if len(s.ServiceAccountKeyFile) > 0 && s.UseServiceAccountCredentials {
if s.UseServiceAccountCredentials {
if len(s.ServiceAccountKeyFile) > 0 {
// It's possible another controller process is creating the tokens for us.
// If one isn't, we'll timeout and exit when our client builder is unable to create the tokens.
glog.Warningf("--use-service-account-credentials was specified without providing a --service-account-private-key-file")
}
clientBuilder = controller.SAControllerClientBuilder{
ClientConfig: restclient.AnonymousClientConfig(kubeconfig),
CoreClient: kubeClient.CoreV1(),
@@ -342,6 +347,7 @@ func NewControllerInitializers() map[string]InitFunc {
func GetAvailableResources(clientBuilder controller.ControllerClientBuilder) (map[schema.GroupVersionResource]bool, error) {
var discoveryClient discovery.DiscoveryInterface
var healthzContent string
// If apiserver is not running we should wait for some time and fail only then. This is particularly
// important when we start apiserver and controller manager at the same time.
err := wait.PollImmediate(time.Second, 10*time.Second, func() (bool, error) {
@@ -352,17 +358,19 @@ func GetAvailableResources(clientBuilder controller.ControllerClientBuilder) (ma
}
healthStatus := 0
client.Discovery().RESTClient().Get().AbsPath("/healthz").Do().StatusCode(&healthStatus)
resp := client.Discovery().RESTClient().Get().AbsPath("/healthz").Do().StatusCode(&healthStatus)
if healthStatus != http.StatusOK {
glog.Errorf("Server isn't healthy yet. Waiting a little while.")
return false, nil
}
content, _ := resp.Raw()
healthzContent = string(content)
discoveryClient = client.Discovery()
return true, nil
})
if err != nil {
return nil, fmt.Errorf("failed to get api versions from server: %v", err)
return nil, fmt.Errorf("failed to get api versions from server: %v: %v", healthzContent, err)
}
resourceMap, err := discoveryClient.ServerResources()
@@ -464,7 +472,7 @@ func (c serviceAccountTokenControllerStarter) startServiceAccountTokenController
glog.Warningf("%q is disabled because there is no private key", saTokenControllerName)
return false, nil
}
privateKey, err := serviceaccount.ReadPrivateKey(ctx.Options.ServiceAccountKeyFile)
privateKey, err := certutil.PrivateKeyFromFile(ctx.Options.ServiceAccountKeyFile)
if err != nil {
return true, fmt.Errorf("error reading key for service account token controller: %v", err)
}

View File

@@ -122,7 +122,7 @@ type Options struct {
func AddFlags(options *Options, fs *pflag.FlagSet) {
fs.StringVar(&options.ConfigFile, "config", options.ConfigFile, "The path to the configuration file.")
fs.StringVar(&options.WriteConfigTo, "write-config-to", options.WriteConfigTo, "If set, write the default configuration values to this file and exit.")
fs.BoolVar(&options.CleanupAndExit, "cleanup-iptables", options.CleanupAndExit, "If true cleanup iptables rules and exit.")
fs.BoolVar(&options.CleanupAndExit, "cleanup-iptables", options.CleanupAndExit, "If true, cleanup iptables rules and exit.")
// All flags below here are deprecated and will eventually be removed.

View File

@@ -195,6 +195,7 @@ func TestProxyServerWithCleanupAndExit(t *testing.T) {
assert.Nil(t, err, "unexpected error in NewProxyServer, addr: %s", addr)
assert.NotNil(t, proxyserver, "nil proxy server obj, addr: %s", addr)
assert.NotNil(t, proxyserver.IptInterface, "nil iptables intf, addr: %s", addr)
assert.True(t, proxyserver.CleanupAndExit, "false CleanupAndExit, addr: %s", addr)
// Clean up config for next test case
configz.Delete("componentconfig")

View File

@@ -41,7 +41,7 @@ filegroup(
srcs = [
":package-srcs",
"//cmd/kubeadm/app:all-srcs",
"//cmd/kubeadm/test/cmd:all-srcs",
"//cmd/kubeadm/test:all-srcs",
],
tags = ["automanaged"],
)

View File

@@ -17,6 +17,7 @@ limitations under the License.
package kubeadm
import (
"fmt"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -38,11 +39,6 @@ type MasterConfiguration struct {
Token string
TokenTTL time.Duration
// SelfHosted enables an alpha deployment type where the apiserver, scheduler, and
// controller manager are managed by Kubernetes itself. This option is likely to
// become the default in the future.
SelfHosted bool
APIServerExtraArgs map[string]string
ControllerManagerExtraArgs map[string]string
SchedulerExtraArgs map[string]string
@@ -105,3 +101,7 @@ type NodeConfiguration struct {
TLSBootstrapToken string
Token string
}
func (cfg *MasterConfiguration) GetMasterEndpoint() string {
return fmt.Sprintf("https://%s:%d", cfg.API.AdvertiseAddress, cfg.API.BindPort)
}

View File

@@ -38,11 +38,6 @@ type MasterConfiguration struct {
Token string `json:"token"`
TokenTTL time.Duration `json:"tokenTTL"`
// SelfHosted enables an alpha deployment type where the apiserver, scheduler, and
// controller manager are managed by Kubernetes itself. This option is likely to
// become the default in the future.
SelfHosted bool `json:"selfHosted"`
APIServerExtraArgs map[string]string `json:"apiServerExtraArgs"`
ControllerManagerExtraArgs map[string]string `json:"controllerManagerExtraArgs"`
SchedulerExtraArgs map[string]string `json:"schedulerExtraArgs"`

View File

@@ -24,6 +24,7 @@ go_library(
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/apis/kubeadm/v1alpha1:go_default_library",
"//cmd/kubeadm/app/apis/kubeadm/validation:go_default_library",
"//cmd/kubeadm/app/cmd/features:go_default_library",
"//cmd/kubeadm/app/cmd/phases:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",
"//cmd/kubeadm/app/discovery:go_default_library",

View File

@@ -31,6 +31,12 @@ const (
// FeatureList represents a list of feature gates
type FeatureList map[utilfeature.Feature]utilfeature.FeatureSpec
// Enabled indicates whether a feature name has been enabled
func Enabled(featureList map[string]bool, featureName utilfeature.Feature) bool {
_, ok := featureList[string(featureName)]
return ok
}
// Supports indicates whether a feature name is supported on the given
// feature set
func Supports(featureList FeatureList, featureName string) bool {

View File

@@ -31,6 +31,7 @@ import (
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmapiext "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1"
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation"
"k8s.io/kubernetes/cmd/kubeadm/app/cmd/features"
cmdphases "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
addonsphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/addons"
@@ -147,10 +148,6 @@ func NewCmdInit(out io.Writer) *cobra.Command {
&skipTokenPrint, "skip-token-print", skipTokenPrint,
"Skip printing of the default bootstrap token generated by 'kubeadm init'",
)
cmd.PersistentFlags().BoolVar(
&cfg.SelfHosted, "self-hosted", cfg.SelfHosted,
"[experimental] If kubeadm should make this control plane self-hosted",
)
cmd.PersistentFlags().StringVar(
&cfg.Token, "token", cfg.Token,
@@ -224,27 +221,27 @@ func (i *Init) Validate(cmd *cobra.Command) error {
// Run executes master node provisioning, including certificates, needed static pod manifests, etc.
func (i *Init) Run(out io.Writer) error {
// PHASE 1: Generate certificates
err := cmdphases.CreatePKIAssets(i.cfg)
k8sVersion, err := version.ParseSemantic(i.cfg.KubernetesVersion)
if err != nil {
return fmt.Errorf("couldn't parse kubernetes version %q: %v", i.cfg.KubernetesVersion, err)
}
// PHASE 1: Generate certificates
if err := cmdphases.CreatePKIAssets(i.cfg); err != nil {
return err
}
// PHASE 2: Generate kubeconfig files for the admin and the kubelet
masterEndpoint := fmt.Sprintf("https://%s:%d", i.cfg.API.AdvertiseAddress, i.cfg.API.BindPort)
err = kubeconfigphase.CreateInitKubeConfigFiles(masterEndpoint, i.cfg.CertificatesDir, kubeadmconstants.KubernetesDir, i.cfg.NodeName)
if err != nil {
if err := kubeconfigphase.CreateInitKubeConfigFiles(kubeadmconstants.KubernetesDir, i.cfg); err != nil {
return err
}
// PHASE 3: Bootstrap the control plane
if err := controlplanephase.WriteStaticPodManifests(i.cfg); err != nil {
if err := controlplanephase.WriteStaticPodManifests(i.cfg, k8sVersion, kubeadmconstants.GetStaticPodDirectory()); err != nil {
return err
}
adminKubeConfigPath := filepath.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.AdminKubeConfigFileName)
client, err := kubeadmutil.CreateClientAndWaitForAPI(adminKubeConfigPath)
client, err := kubeadmutil.CreateClientAndWaitForAPI(kubeadmconstants.GetAdminKubeConfigPath())
if err != nil {
return err
}
@@ -263,25 +260,18 @@ func (i *Init) Run(out io.Writer) error {
return err
}
if err := tokenphase.CreateBootstrapConfigMapIfNotExists(client, adminKubeConfigPath); err != nil {
if err := tokenphase.CreateBootstrapConfigMapIfNotExists(client, kubeadmconstants.GetAdminKubeConfigPath()); err != nil {
return err
}
// PHASE 5: Install and deploy all addons, and configure things as necessary
k8sVersion, err := version.ParseSemantic(i.cfg.KubernetesVersion)
if err != nil {
return fmt.Errorf("couldn't parse kubernetes version %q: %v", i.cfg.KubernetesVersion, err)
}
// Create the necessary ServiceAccounts
err = apiconfigphase.CreateServiceAccounts(client)
if err != nil {
if err := apiconfigphase.CreateServiceAccounts(client); err != nil {
return err
}
err = apiconfigphase.CreateRBACRules(client, k8sVersion)
if err != nil {
if err := apiconfigphase.CreateRBACRules(client, k8sVersion); err != nil {
return err
}
@@ -290,7 +280,7 @@ func (i *Init) Run(out io.Writer) error {
}
// Is deployment type self-hosted?
if i.cfg.SelfHosted {
if features.Enabled(i.cfg.FeatureFlags, features.SelfHosting) {
// Temporary control plane is up, now we create our self hosted control
// plane components and remove the static manifests:
fmt.Println("[self-hosted] Creating self-hosted control plane...")

View File

@@ -40,15 +40,22 @@ go_library(
go_test(
name = "go_default_test",
srcs = ["certs_test.go"],
srcs = [
"certs_test.go",
"kubeconfig_test.go",
],
library = ":go_default_library",
tags = ["automanaged"],
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/apis/kubeadm/install:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",
"//cmd/kubeadm/app/phases/certs/pkiutil:go_default_library",
"//vendor/github.com/renstrom/dedent:go_default_library",
"//vendor/github.com/spf13/cobra:go_default_library",
"//cmd/kubeadm/test:go_default_library",
"//cmd/kubeadm/test/cmd:go_default_library",
"//cmd/kubeadm/test/kubeconfig:go_default_library",
"//pkg/util/node:go_default_library",
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
],
)

View File

@@ -34,6 +34,7 @@ import (
"k8s.io/kubernetes/pkg/api"
)
// NewCmdCerts return main command for certs phase
func NewCmdCerts() *cobra.Command {
cmd := &cobra.Command{
Use: "certs",
@@ -42,12 +43,12 @@ func NewCmdCerts() *cobra.Command {
RunE: subCmdRunE("certs"),
}
cmd.AddCommand(newSubCmdCerts()...)
cmd.AddCommand(getCertsSubCommands()...)
return cmd
}
// newSubCmdCerts returns sub commands for certs phase
func newSubCmdCerts() []*cobra.Command {
// getCertsSubCommands returns sub commands for certs phase
func getCertsSubCommands() []*cobra.Command {
cfg := &kubeadmapiext.MasterConfiguration{}
// Default values for the cobra help text
@@ -122,13 +123,13 @@ func newSubCmdCerts() []*cobra.Command {
return subCmds
}
// runCmdFunc creates a cobra.Command Run function, by composing the call to the given cmdFunc with necessary additional steps (e.g preparation of inpunt parameters)
// runCmdFunc creates a cobra.Command Run function, by composing the call to the given cmdFunc with necessary additional steps (e.g preparation of input parameters)
func runCmdFunc(cmdFunc func(cfg *kubeadmapi.MasterConfiguration) error, cfgPath *string, cfg *kubeadmapiext.MasterConfiguration) func(cmd *cobra.Command, args []string) {
// the following statement build a clousure that wraps a call to a CreateCertFunc, binding
// the following statement build a clousure that wraps a call to a cmdFunc, binding
// the function itself with the specific parameters of each sub command.
// Please note that specific parameter should be passed as value, while other parameters - passed as reference -
// are shared between sub commnands and gets access to current value e.g. flags value.
// are shared between sub commands and gets access to current value e.g. flags value.
return func(cmd *cobra.Command, args []string) {
internalcfg := &kubeadmapi.MasterConfiguration{}

View File

@@ -18,26 +18,24 @@ package phases
import (
"fmt"
"html/template"
"io/ioutil"
"os"
"path"
"strings"
"testing"
"github.com/renstrom/dedent"
"github.com/spf13/cobra"
// required for triggering api machinery startup when running unit tests
_ "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/install"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/pkiutil"
"k8s.io/kubernetes/pkg/util/node"
testutil "k8s.io/kubernetes/cmd/kubeadm/test"
cmdtestutil "k8s.io/kubernetes/cmd/kubeadm/test/cmd"
)
func TestSubCmdCertsCreateFiles(t *testing.T) {
subCmds := newSubCmdCerts()
subCmds := getCertsSubCommands()
var tests = []struct {
subCmds []string
@@ -81,71 +79,51 @@ func TestSubCmdCertsCreateFiles(t *testing.T) {
}
for _, test := range tests {
// Temporary folder for the test case
tmpdir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("Couldn't create tmpdir")
}
// Create temp folder for the test case
tmpdir := testutil.SetupTempDir(t)
defer os.RemoveAll(tmpdir)
// executes given sub commands
for _, subCmdName := range test.subCmds {
subCmd := getSubCmd(t, subCmdName, subCmds)
subCmd.SetArgs([]string{fmt.Sprintf("--cert-dir=%s", tmpdir)})
if err := subCmd.Execute(); err != nil {
t.Fatalf("Could not execute subcommand: %s", subCmdName)
}
certDirFlag := fmt.Sprintf("--cert-dir=%s", tmpdir)
cmdtestutil.RunSubCommand(t, subCmds, subCmdName, certDirFlag)
}
// verify expected files are there
assertFilesCount(t, tmpdir, len(test.expectedFiles))
for _, file := range test.expectedFiles {
assertFileExists(t, tmpdir, file)
}
testutil.AssertFileExists(t, tmpdir, test.expectedFiles...)
}
}
func TestSubCmdApiServerFlags(t *testing.T) {
subCmds := newSubCmdCerts()
subCmds := getCertsSubCommands()
// Temporary folder for the test case
tmpdir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("Couldn't create tmpdir")
}
// Create temp folder for the test case
tmpdir := testutil.SetupTempDir(t)
defer os.RemoveAll(tmpdir)
// creates ca cert
subCmd := getSubCmd(t, "ca", subCmds)
subCmd.SetArgs([]string{fmt.Sprintf("--cert-dir=%s", tmpdir)})
if err := subCmd.Execute(); err != nil {
t.Fatalf("Could not execute subcommand ca")
}
certDirFlag := fmt.Sprintf("--cert-dir=%s", tmpdir)
cmdtestutil.RunSubCommand(t, subCmds, "ca", certDirFlag)
// creates apiserver cert
subCmd = getSubCmd(t, "apiserver", subCmds)
subCmd.SetArgs([]string{
apiserverFlags := []string{
fmt.Sprintf("--cert-dir=%s", tmpdir),
"--apiserver-cert-extra-sans=foo,boo",
"--service-cidr=10.0.0.0/24",
"--service-dns-domain=mycluster.local",
"--apiserver-advertise-address=1.2.3.4",
})
if err := subCmd.Execute(); err != nil {
t.Fatalf("Could not execute subcommand apiserver")
}
cmdtestutil.RunSubCommand(t, subCmds, "apiserver", apiserverFlags...)
APIserverCert, err := pkiutil.TryLoadCertFromDisk(tmpdir, kubeadmconstants.APIServerCertAndKeyBaseName)
if err != nil {
t.Fatalf("Error loading API server certificate: %v", err)
}
hostname, err := os.Hostname()
if err != nil {
t.Errorf("couldn't get the hostname: %v", err)
}
for i, name := range []string{strings.ToLower(hostname), "kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.mycluster.local"} {
hostname := node.GetHostname("")
for i, name := range []string{hostname, "kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.mycluster.local"} {
if APIserverCert.DNSNames[i] != name {
t.Errorf("APIserverCert.DNSNames[%d] is %s instead of %s", i, APIserverCert.DNSNames[i], name)
}
@@ -157,9 +135,9 @@ func TestSubCmdApiServerFlags(t *testing.T) {
}
}
func TestSubCmdReadsConfig(t *testing.T) {
func TestSubCmdCertsReadsConfig(t *testing.T) {
subCmds := newSubCmdCerts()
subCmds := getCertsSubCommands()
var tests = []struct {
subCmds []string
@@ -184,88 +162,26 @@ func TestSubCmdReadsConfig(t *testing.T) {
}
for _, test := range tests {
// Temporary folder for the test case
tmpdir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("Couldn't create tmpdir")
}
// Create temp folder for the test case
tmpdir := testutil.SetupTempDir(t)
defer os.RemoveAll(tmpdir)
configPath := saveDummyCfg(t, tmpdir)
certdir := tmpdir
cfg := &kubeadmapi.MasterConfiguration{
API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4", BindPort: 1234},
CertificatesDir: certdir,
NodeName: "valid-node-name",
}
configPath := testutil.SetupMasterConfigurationFile(t, tmpdir, cfg)
// executes given sub commands
for _, subCmdName := range test.subCmds {
subCmd := getSubCmd(t, subCmdName, subCmds)
subCmd.SetArgs([]string{fmt.Sprintf("--config=%s", configPath)})
if err := subCmd.Execute(); err != nil {
t.Fatalf("Could not execute command: %s", subCmdName)
}
configFlag := fmt.Sprintf("--config=%s", configPath)
cmdtestutil.RunSubCommand(t, subCmds, subCmdName, configFlag)
}
// verify expected files are there
// NB. test.expectedFileCount + 1 because in this test case the tempdir where key/certificates
// are saved contains also the dummy configuration file
assertFilesCount(t, tmpdir, test.expectedFileCount+1)
testutil.AssertFilesCount(t, tmpdir, test.expectedFileCount)
}
}
func getSubCmd(t *testing.T, name string, subCmds []*cobra.Command) *cobra.Command {
for _, subCmd := range subCmds {
if subCmd.Name() == name {
return subCmd
}
}
t.Fatalf("Unable to find sub command %s", name)
return nil
}
func assertFilesCount(t *testing.T, dirName string, count int) {
files, err := ioutil.ReadDir(dirName)
if err != nil {
t.Fatalf("Couldn't read files from tmpdir: %s", err)
}
if len(files) != count {
t.Errorf("dir does contains %d, %d expected", len(files), count)
for _, f := range files {
t.Error(f.Name())
}
}
}
func assertFileExists(t *testing.T, dirName string, fileName string) {
path := path.Join(dirName, fileName)
if _, err := os.Stat(path); os.IsNotExist(err) {
t.Errorf("file %s does not exist", fileName)
}
}
func saveDummyCfg(t *testing.T, dirName string) string {
path := path.Join(dirName, "dummyconfig.yaml")
cfgTemplate := template.Must(template.New("init").Parse(dedent.Dedent(`
apiVersion: kubeadm.k8s.io/v1alpha1
kind: MasterConfiguration
certificatesDir: {{.CertificatesDir}}
`)))
f, err := os.Create(path)
if err != nil {
t.Errorf("error creating dummyconfig file %s: %v", path, err)
}
templateData := struct {
CertificatesDir string
}{
CertificatesDir: dirName,
}
err = cfgTemplate.Execute(f, templateData)
if err != nil {
t.Errorf("error generating dummyconfig file %s: %v", path, err)
}
f.Close()
return path
}

View File

@@ -22,98 +22,147 @@ import (
"github.com/spf13/cobra"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmapiext "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1"
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
kubeconfigphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig"
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
configutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config"
"k8s.io/kubernetes/pkg/api"
)
// NewCmdKubeConfig return main command for kubeconfig phase
func NewCmdKubeConfig(out io.Writer) *cobra.Command {
cmd := &cobra.Command{
Use: "kubeconfig",
Short: "Create KubeConfig files from given credentials.",
Short: "Generate all kubeconfig files necessary to establish the control plane and the admin kubeconfig file.",
RunE: subCmdRunE("kubeconfig"),
}
cmd.AddCommand(NewCmdToken(out))
cmd.AddCommand(NewCmdClientCerts(out))
cmd.AddCommand(getKubeConfigSubCommands(out, kubeadmconstants.KubernetesDir)...)
return cmd
}
func NewCmdToken(out io.Writer) *cobra.Command {
config := &kubeconfigphase.BuildConfigProperties{
MakeClientCerts: false,
// getKubeConfigSubCommands returns sub commands for kubeconfig phase
func getKubeConfigSubCommands(out io.Writer, outDir string) []*cobra.Command {
cfg := &kubeadmapiext.MasterConfiguration{}
// Default values for the cobra help text
api.Scheme.Default(cfg)
var cfgPath, token, clientName string
var subCmds []*cobra.Command
subCmdProperties := []struct {
use string
short string
cmdFunc func(outDir string, cfg *kubeadmapi.MasterConfiguration) error
}{
{
use: "all",
short: "Generate all kubeconfig files necessary to establish the control plane and the admin kubeconfig file.",
cmdFunc: kubeconfigphase.CreateInitKubeConfigFiles,
},
{
use: "admin",
short: "Generate a kubeconfig file for the admin to use and for kubeadm itself.",
cmdFunc: kubeconfigphase.CreateAdminKubeConfigFile,
},
{
use: "kubelet",
short: "Generate a kubeconfig file for the Kubelet to use. Please note that this should *only* be used for bootstrapping purposes. After your control plane is up, you should request all kubelet credentials from the CSR API.",
cmdFunc: kubeconfigphase.CreateKubeletKubeConfigFile,
},
{
use: "controller-manager",
short: "Generate a kubeconfig file for the Controller Manager to use.",
cmdFunc: kubeconfigphase.CreateControllerManagerKubeConfigFile,
},
{
use: "scheduler",
short: "Generate a kubeconfig file for the Scheduler to use.",
cmdFunc: kubeconfigphase.CreateSchedulerKubeConfigFile,
},
{
use: "user",
short: "Outputs a kubeconfig file for an additional user.",
cmdFunc: func(outDir string, cfg *kubeadmapi.MasterConfiguration) error {
if clientName == "" {
return fmt.Errorf("missing required argument client-name")
}
cmd := &cobra.Command{
Use: "token",
Short: "Output a valid KubeConfig file to STDOUT with a token as the authentication method.",
Run: func(cmd *cobra.Command, args []string) {
err := RunCreateWithToken(out, config)
kubeadmutil.CheckErr(err)
// if the kubeconfig file for an additional user has to use a token, use it
if token != "" {
return kubeconfigphase.WriteKubeConfigWithToken(out, cfg, clientName, token)
}
// Otherwise, write a kubeconfig file with a generate client cert
return kubeconfigphase.WriteKubeConfigWithClientCert(out, cfg, clientName)
},
},
}
addCommonFlags(cmd, config)
cmd.Flags().StringVar(&config.Token, "token", "", "The path to the directory where the certificates are.")
return cmd
}
func NewCmdClientCerts(out io.Writer) *cobra.Command {
config := &kubeconfigphase.BuildConfigProperties{
MakeClientCerts: true,
}
for _, properties := range subCmdProperties {
// Creates the UX Command
cmd := &cobra.Command{
Use: "client-certs",
Short: "Output a valid KubeConfig file to STDOUT with a client certificates as the authentication method.",
Run: func(cmd *cobra.Command, args []string) {
err := RunCreateWithClientCerts(out, config)
Use: properties.use,
Short: properties.short,
Run: runCmdFuncKubeConfig(properties.cmdFunc, &outDir, &cfgPath, cfg),
}
// Add flags to the command
if properties.use != "user" {
cmd.Flags().StringVar(&cfgPath, "config", cfgPath, "Path to kubeadm config file (WARNING: Usage of a configuration file is experimental)")
}
cmd.Flags().StringVar(&cfg.CertificatesDir, "cert-dir", cfg.CertificatesDir, "The path where to save and store the certificates")
cmd.Flags().StringVar(&cfg.API.AdvertiseAddress, "apiserver-advertise-address", cfg.API.AdvertiseAddress, "The IP address the API Server will advertise it's listening on. 0.0.0.0 means the default network interface's address.")
cmd.Flags().Int32Var(&cfg.API.BindPort, "apiserver-bind-port", cfg.API.BindPort, "Port for the API Server to bind to")
if properties.use == "all" || properties.use == "kubelet" {
cmd.Flags().StringVar(&cfg.NodeName, "node-name", cfg.NodeName, `Specify the node name`)
}
if properties.use == "user" {
cmd.Flags().StringVar(&token, "token", token, "The path to the directory where the certificates are.")
cmd.Flags().StringVar(&clientName, "client-name", clientName, "The name of the client for which the KubeConfig file will be generated.")
}
subCmds = append(subCmds, cmd)
}
return subCmds
}
// runCmdFuncKubeConfig creates a cobra.Command Run function, by composing the call to the given cmdFunc with necessary additional steps (e.g preparation of input parameters)
func runCmdFuncKubeConfig(cmdFunc func(outDir string, cfg *kubeadmapi.MasterConfiguration) error, outDir, cfgPath *string, cfg *kubeadmapiext.MasterConfiguration) func(cmd *cobra.Command, args []string) {
// the following statement build a clousure that wraps a call to a CreateKubeConfigFunc, binding
// the function itself with the specific parameters of each sub command.
// Please note that specific parameter should be passed as value, while other parameters - passed as reference -
// are shared between sub commands and gets access to current value e.g. flags value.
return func(cmd *cobra.Command, args []string) {
internalcfg := &kubeadmapi.MasterConfiguration{}
// Takes passed flags into account; the defaulting is executed once again enforcing assignement of
// static default values to cfg only for values not provided with flags
api.Scheme.Default(cfg)
api.Scheme.Convert(cfg, internalcfg, nil)
// Loads configuration from config file, if provided
// Nb. --config overrides command line flags
err := configutil.TryLoadMasterConfiguration(*cfgPath, internalcfg)
kubeadmutil.CheckErr(err)
},
}
addCommonFlags(cmd, config)
cmd.Flags().StringSliceVar(&config.Organization, "organization", []string{}, "The organization (group) the certificate should be in.")
return cmd
}
func addCommonFlags(cmd *cobra.Command, config *kubeconfigphase.BuildConfigProperties) {
cmd.Flags().StringVar(&config.CertDir, "cert-dir", kubeadmapiext.DefaultCertificatesDir, "The path to the directory where the certificates are.")
cmd.Flags().StringVar(&config.ClientName, "client-name", "", "The name of the client for which the KubeConfig file will be generated.")
cmd.Flags().StringVar(&config.APIServer, "server", "", "The location of the api server.")
}
// Applies dynamic defaults to settings not provided with flags
err = configutil.SetInitDynamicDefaults(internalcfg)
kubeadmutil.CheckErr(err)
func validateCommonFlags(config *kubeconfigphase.BuildConfigProperties) error {
if len(config.ClientName) == 0 {
return fmt.Errorf("The --client-name flag is required")
}
if len(config.APIServer) == 0 {
return fmt.Errorf("The --server flag is required")
}
return nil
}
// Validates cfg (flags/configs + defaults + dynamic defaults)
err = validation.ValidateMasterConfiguration(internalcfg).ToAggregate()
kubeadmutil.CheckErr(err)
// RunCreateWithToken generates a kubeconfig file from with a token as the authentication mechanism
func RunCreateWithToken(out io.Writer, config *kubeconfigphase.BuildConfigProperties) error {
if len(config.Token) == 0 {
return fmt.Errorf("The --token flag is required")
// Execute the cmdFunc
err = cmdFunc(*outDir, internalcfg)
kubeadmutil.CheckErr(err)
}
if err := validateCommonFlags(config); err != nil {
return err
}
kubeConfigBytes, err := kubeconfigphase.GetKubeConfigBytesFromSpec(*config)
if err != nil {
return err
}
fmt.Fprintln(out, string(kubeConfigBytes))
return nil
}
// RunCreateWithClientCerts generates a kubeconfig file from with client certs as the authentication mechanism
func RunCreateWithClientCerts(out io.Writer, config *kubeconfigphase.BuildConfigProperties) error {
if err := validateCommonFlags(config); err != nil {
return err
}
kubeConfigBytes, err := kubeconfigphase.GetKubeConfigBytesFromSpec(*config)
if err != nil {
return err
}
fmt.Fprintln(out, string(kubeConfigBytes))
return nil
}

View File

@@ -0,0 +1,383 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package phases
import (
"bytes"
"fmt"
"os"
"path/filepath"
"testing"
// required for triggering api machinery startup when running unit tests
_ "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/install"
"k8s.io/client-go/tools/clientcmd"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/pkiutil"
testutil "k8s.io/kubernetes/cmd/kubeadm/test"
cmdtestutil "k8s.io/kubernetes/cmd/kubeadm/test/cmd"
kubeconfigtestutil "k8s.io/kubernetes/cmd/kubeadm/test/kubeconfig"
)
func TestKubeConfigCSubCommandsHasFlags(t *testing.T) {
subCmds := getKubeConfigSubCommands(nil, "")
commonFlags := []string{
"cert-dir",
"apiserver-advertise-address",
"apiserver-bind-port",
}
var tests = []struct {
command string
additionalFlags []string
}{
{
command: "all",
additionalFlags: []string{
"config",
"node-name",
},
},
{
command: "admin",
additionalFlags: []string{
"config",
},
},
{
command: "kubelet",
additionalFlags: []string{
"config",
"node-name",
},
},
{
command: "controller-manager",
additionalFlags: []string{
"config",
},
},
{
command: "scheduler",
additionalFlags: []string{
"config",
},
},
{
command: "user",
additionalFlags: []string{
"token",
"client-name",
},
},
}
for _, test := range tests {
expectedFlags := append(commonFlags, test.additionalFlags...)
cmdtestutil.AssertSubCommandHasFlags(t, subCmds, test.command, expectedFlags...)
}
}
func TestKubeConfigSubCommandsThatCreateFilesWithFlags(t *testing.T) {
commonFlags := []string{
"--apiserver-advertise-address=1.2.3.4",
"--apiserver-bind-port=1234",
}
var tests = []struct {
command string
additionalFlags []string
expectedFiles []string
}{
{
command: "all",
additionalFlags: []string{"--node-name=valid-nome-name"},
expectedFiles: []string{
kubeadmconstants.AdminKubeConfigFileName,
kubeadmconstants.KubeletKubeConfigFileName,
kubeadmconstants.ControllerManagerKubeConfigFileName,
kubeadmconstants.SchedulerKubeConfigFileName,
},
},
{
command: "admin",
expectedFiles: []string{kubeadmconstants.AdminKubeConfigFileName},
},
{
command: "kubelet",
additionalFlags: []string{"--node-name=valid-nome-name"},
expectedFiles: []string{kubeadmconstants.KubeletKubeConfigFileName},
},
{
command: "controller-manager",
expectedFiles: []string{kubeadmconstants.ControllerManagerKubeConfigFileName},
},
{
command: "scheduler",
expectedFiles: []string{kubeadmconstants.SchedulerKubeConfigFileName},
},
}
var kubeConfigAssertions = map[string]struct {
clientName string
organizations []string
}{
kubeadmconstants.AdminKubeConfigFileName: {
clientName: "kubernetes-admin",
organizations: []string{kubeadmconstants.MastersGroup},
},
kubeadmconstants.KubeletKubeConfigFileName: {
clientName: "system:node:valid-nome-name",
organizations: []string{kubeadmconstants.NodesGroup},
},
kubeadmconstants.ControllerManagerKubeConfigFileName: {
clientName: kubeadmconstants.ControllerManagerUser,
},
kubeadmconstants.SchedulerKubeConfigFileName: {
clientName: kubeadmconstants.SchedulerUser,
},
}
for _, test := range tests {
// Create temp folder for the test case
tmpdir := testutil.SetupTempDir(t)
defer os.RemoveAll(tmpdir)
// Adds a pki folder with a ca certs to the temp folder
pkidir := testutil.SetupPkiDirWithCertificateAuthorithy(t, tmpdir)
// Retrives ca cert for assertions
caCert, _, err := pkiutil.TryLoadCertAndKeyFromDisk(pkidir, kubeadmconstants.CACertAndKeyBaseName)
if err != nil {
t.Fatalf("couldn't retrive ca cert: %v", err)
}
// Get subcommands working in the temporary directory
subCmds := getKubeConfigSubCommands(nil, tmpdir)
// Execute the subcommand
certDirFlag := fmt.Sprintf("--cert-dir=%s", pkidir)
allFlags := append(commonFlags, certDirFlag)
allFlags = append(allFlags, test.additionalFlags...)
cmdtestutil.RunSubCommand(t, subCmds, test.command, allFlags...)
// Checks that requested files are there
testutil.AssertFileExists(t, tmpdir, test.expectedFiles...)
// Checks contents of generated files
for _, file := range test.expectedFiles {
// reads generated files
config, err := clientcmd.LoadFromFile(filepath.Join(tmpdir, file))
if err != nil {
t.Errorf("Couldn't load generated kubeconfig file: %v", err)
}
// checks that CLI flags are properly propagated and kubeconfig properties are correct
kubeconfigtestutil.AssertKubeConfigCurrentCluster(t, config, "https://1.2.3.4:1234", caCert)
expectedClientName := kubeConfigAssertions[file].clientName
expectedOrganizations := kubeConfigAssertions[file].organizations
kubeconfigtestutil.AssertKubeConfigCurrentAuthInfoWithClientCert(t, config, caCert, expectedClientName, expectedOrganizations...)
}
}
}
func TestKubeConfigSubCommandsThatCreateFilesWithConfigFile(t *testing.T) {
var tests = []struct {
command string
expectedFiles []string
}{
{
command: "all",
expectedFiles: []string{
kubeadmconstants.AdminKubeConfigFileName,
kubeadmconstants.KubeletKubeConfigFileName,
kubeadmconstants.ControllerManagerKubeConfigFileName,
kubeadmconstants.SchedulerKubeConfigFileName,
},
},
{
command: "admin",
expectedFiles: []string{kubeadmconstants.AdminKubeConfigFileName},
},
{
command: "kubelet",
expectedFiles: []string{kubeadmconstants.KubeletKubeConfigFileName},
},
{
command: "controller-manager",
expectedFiles: []string{kubeadmconstants.ControllerManagerKubeConfigFileName},
},
{
command: "scheduler",
expectedFiles: []string{kubeadmconstants.SchedulerKubeConfigFileName},
},
}
var kubeConfigAssertions = map[string]struct {
clientName string
organizations []string
}{
kubeadmconstants.AdminKubeConfigFileName: {
clientName: "kubernetes-admin",
organizations: []string{kubeadmconstants.MastersGroup},
},
kubeadmconstants.KubeletKubeConfigFileName: {
clientName: "system:node:valid-node-name",
organizations: []string{kubeadmconstants.NodesGroup},
},
kubeadmconstants.ControllerManagerKubeConfigFileName: {
clientName: kubeadmconstants.ControllerManagerUser,
},
kubeadmconstants.SchedulerKubeConfigFileName: {
clientName: kubeadmconstants.SchedulerUser,
},
}
for _, test := range tests {
// Create temp folder for the test case
tmpdir := testutil.SetupTempDir(t)
defer os.RemoveAll(tmpdir)
// Adds a pki folder with a ca certs to the temp folder
pkidir := testutil.SetupPkiDirWithCertificateAuthorithy(t, tmpdir)
// Retrives ca cert for assertions
caCert, _, err := pkiutil.TryLoadCertAndKeyFromDisk(pkidir, kubeadmconstants.CACertAndKeyBaseName)
if err != nil {
t.Fatalf("couldn't retrive ca cert: %v", err)
}
// Adds a master configuration file
cfg := &kubeadmapi.MasterConfiguration{
API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4", BindPort: 1234},
CertificatesDir: pkidir,
NodeName: "valid-node-name",
}
cfgPath := testutil.SetupMasterConfigurationFile(t, tmpdir, cfg)
// Get subcommands working in the temporary directory
subCmds := getKubeConfigSubCommands(nil, tmpdir)
// Execute the subcommand
configFlag := fmt.Sprintf("--config=%s", cfgPath)
cmdtestutil.RunSubCommand(t, subCmds, test.command, configFlag)
// Checks that requested files are there
testutil.AssertFileExists(t, tmpdir, test.expectedFiles...)
// Checks contents of generated files
for _, file := range test.expectedFiles {
// reads generated files
config, err := clientcmd.LoadFromFile(filepath.Join(tmpdir, file))
if err != nil {
t.Errorf("Couldn't load generated kubeconfig file: %v", err)
}
// checks that config file properties are properly propagated and kubeconfig properties are correct
kubeconfigtestutil.AssertKubeConfigCurrentCluster(t, config, "https://1.2.3.4:1234", caCert)
expectedClientName := kubeConfigAssertions[file].clientName
expectedOrganizations := kubeConfigAssertions[file].organizations
kubeconfigtestutil.AssertKubeConfigCurrentAuthInfoWithClientCert(t, config, caCert, expectedClientName, expectedOrganizations...)
}
}
}
func TestKubeConfigSubCommandsThatWritesToOut(t *testing.T) {
// Temporary folders for the test case
tmpdir := testutil.SetupTempDir(t)
defer os.RemoveAll(tmpdir)
// Adds a pki folder with a ca cert to the temp folder
pkidir := testutil.SetupPkiDirWithCertificateAuthorithy(t, tmpdir)
// Retrives ca cert for assertions
caCert, _, err := pkiutil.TryLoadCertAndKeyFromDisk(pkidir, kubeadmconstants.CACertAndKeyBaseName)
if err != nil {
t.Fatalf("couldn't retrive ca cert: %v", err)
}
commonFlags := []string{
"--apiserver-advertise-address=1.2.3.4",
"--apiserver-bind-port=1234",
"--client-name=myUser",
fmt.Sprintf("--cert-dir=%s", pkidir),
}
var tests = []struct {
command string
withClientCert bool
withToken bool
additionalFlags []string
}{
{ // Test user subCommand withClientCert
command: "user",
withClientCert: true,
},
{ // Test user subCommand withToken
withToken: true,
command: "user",
additionalFlags: []string{"--token=123456"},
},
}
for _, test := range tests {
buf := new(bytes.Buffer)
// Get subcommands working in the temporary directory
subCmds := getKubeConfigSubCommands(buf, tmpdir)
// Execute the subcommand
allFlags := append(commonFlags, test.additionalFlags...)
cmdtestutil.RunSubCommand(t, subCmds, test.command, allFlags...)
// reads kubeconfig written to stdout
config, err := clientcmd.Load(buf.Bytes())
if err != nil {
t.Errorf("Couldn't read kubeconfig file from buffer: %v", err)
continue
}
// checks that CLI flags are properly propagated
kubeconfigtestutil.AssertKubeConfigCurrentCluster(t, config, "https://1.2.3.4:1234", caCert)
if test.withClientCert {
// checks that kubeconfig files have expected client cert
kubeconfigtestutil.AssertKubeConfigCurrentAuthInfoWithClientCert(t, config, caCert, "myUser")
}
if test.withToken {
// checks that kubeconfig files have expected token
kubeconfigtestutil.AssertKubeConfigCurrentAuthInfoWithToken(t, config, "myUser", "123456")
}
}
}

View File

@@ -193,7 +193,7 @@ func NewCmdTokenGenerate(out io.Writer) *cobra.Command {
}
// RunCreateToken generates a new bootstrap token and stores it as a secret on the server.
func RunCreateToken(out io.Writer, client *clientset.Clientset, token string, tokenDuration time.Duration, usages []string, description string) error {
func RunCreateToken(out io.Writer, client clientset.Interface, token string, tokenDuration time.Duration, usages []string, description string) error {
if len(token) == 0 {
var err error
@@ -230,7 +230,7 @@ func RunGenerateToken(out io.Writer) error {
}
// RunListTokens lists details on all existing bootstrap tokens on the server.
func RunListTokens(out io.Writer, errW io.Writer, client *clientset.Clientset) error {
func RunListTokens(out io.Writer, errW io.Writer, client clientset.Interface) error {
// First, build our selector for bootstrap tokens only
tokenSelector := fields.SelectorFromSet(
map[string]string{
@@ -312,7 +312,7 @@ func RunListTokens(out io.Writer, errW io.Writer, client *clientset.Clientset) e
}
// RunDeleteToken removes a bootstrap token from the server.
func RunDeleteToken(out io.Writer, client *clientset.Clientset, tokenIdOrToken string) error {
func RunDeleteToken(out io.Writer, client clientset.Interface, tokenIdOrToken string) error {
// Assume the given first argument is a token id and try to parse it
tokenId := tokenIdOrToken
if err := tokenutil.ParseTokenID(tokenIdOrToken); err != nil {

View File

@@ -5,6 +5,7 @@ licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
@@ -29,3 +30,10 @@ filegroup(
srcs = [":package-srcs"],
tags = ["automanaged"],
)
go_test(
name = "go_default_test",
srcs = ["constants_test.go"],
library = ":go_default_library",
tags = ["automanaged"],
)

View File

@@ -17,6 +17,7 @@ limitations under the License.
package constants
import (
"fmt"
"path/filepath"
"time"
@@ -97,6 +98,18 @@ const (
// MinExternalEtcdVersion indicates minimum external etcd version which kubeadm supports
MinExternalEtcdVersion = "3.0.14"
// DefaultEtcdVersion indicates the default etcd version that kubeadm uses
DefaultEtcdVersion = "3.0.17"
Etcd = "etcd"
KubeAPIServer = "kube-apiserver"
KubeControllerManager = "kube-controller-manager"
KubeScheduler = "kube-scheduler"
KubeProxy = "kube-proxy"
// SelfHostingPrefix describes the prefix workloads that are self-hosted by kubeadm has
SelfHostingPrefix = "self-hosted-"
)
var (
@@ -119,11 +132,29 @@ var (
// DefaultTokenUsages specifies the default functions a token will get
DefaultTokenUsages = []string{"signing", "authentication"}
// MasterComponents defines the master component names
MasterComponents = []string{KubeAPIServer, KubeControllerManager, KubeScheduler}
// MinimumControlPlaneVersion specifies the minimum control plane version kubeadm can deploy
MinimumControlPlaneVersion = version.MustParseSemantic("v1.7.0")
)
// BuildStaticManifestFilepath returns the location on the disk where the Static Pod should be present
func BuildStaticManifestFilepath(componentName string) string {
return filepath.Join(KubernetesDir, ManifestsSubDirName, componentName+".yaml")
// GetStaticPodDirectory returns the location on the disk where the Static Pod should be present
func GetStaticPodDirectory() string {
return filepath.Join(KubernetesDir, ManifestsSubDirName)
}
// GetStaticPodFilepath returns the location on the disk where the Static Pod should be present
func GetStaticPodFilepath(componentName, manifestsDir string) string {
return filepath.Join(manifestsDir, componentName+".yaml")
}
// GetAdminKubeConfigPath returns the location on the disk where admin kubeconfig is located by default
func GetAdminKubeConfigPath() string {
return filepath.Join(KubernetesDir, AdminKubeConfigFileName)
}
// AddSelfHostedPrefix adds the self-hosted- prefix to the component name
func AddSelfHostedPrefix(componentName string) string {
return fmt.Sprintf("%s%s", SelfHostingPrefix, componentName)
}

View File

@@ -0,0 +1,112 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package constants
import (
"testing"
)
func TestGetStaticPodDirectory(t *testing.T) {
expected := "/etc/kubernetes/manifests"
actual := GetStaticPodDirectory()
if actual != expected {
t.Errorf(
"failed GetStaticPodDirectory:\n\texpected: %s\n\t actual: %s",
expected,
actual,
)
}
}
func TestGetAdminKubeConfigPath(t *testing.T) {
expected := "/etc/kubernetes/admin.conf"
actual := GetAdminKubeConfigPath()
if actual != expected {
t.Errorf(
"failed GetAdminKubeConfigPath:\n\texpected: %s\n\t actual: %s",
expected,
actual,
)
}
}
func TestGetStaticPodFilepath(t *testing.T) {
var tests = []struct {
componentName, manifestsDir, expected string
}{
{
componentName: "kube-apiserver",
manifestsDir: "/etc/kubernetes/manifests",
expected: "/etc/kubernetes/manifests/kube-apiserver.yaml",
},
{
componentName: "kube-controller-manager",
manifestsDir: "/etc/kubernetes/manifests/",
expected: "/etc/kubernetes/manifests/kube-controller-manager.yaml",
},
{
componentName: "foo",
manifestsDir: "/etc/bar/",
expected: "/etc/bar/foo.yaml",
},
}
for _, rt := range tests {
actual := GetStaticPodFilepath(rt.componentName, rt.manifestsDir)
if actual != rt.expected {
t.Errorf(
"failed GetStaticPodFilepath:\n\texpected: %s\n\t actual: %s",
rt.expected,
actual,
)
}
}
}
func TestAddSelfHostedPrefix(t *testing.T) {
var tests = []struct {
componentName, expected string
}{
{
componentName: "kube-apiserver",
expected: "self-hosted-kube-apiserver",
},
{
componentName: "kube-controller-manager",
expected: "self-hosted-kube-controller-manager",
},
{
componentName: "kube-scheduler",
expected: "self-hosted-kube-scheduler",
},
{
componentName: "foo",
expected: "self-hosted-foo",
},
}
for _, rt := range tests {
actual := AddSelfHostedPrefix(rt.componentName)
if actual != rt.expected {
t.Errorf(
"failed AddSelfHostedPrefix:\n\texpected: %s\n\t actual: %s",
rt.expected,
actual,
)
}
}
}

View File

@@ -64,7 +64,7 @@ func GetValidatedClusterInfoObject(cfg *kubeadmapi.NodeConfiguration) (*clientcm
}
}
// isHTTPSURL checks whether the string is parsable as an URL
// isHTTPSURL checks whether the string is parsable as an URL and whether the Scheme is https
func isHTTPSURL(s string) bool {
u, err := url.Parse(s)
return err == nil && u.Scheme == "https"

View File

@@ -13,7 +13,7 @@ go_library(
srcs = ["images.go"],
tags = ["automanaged"],
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",
"//cmd/kubeadm/app/util:go_default_library",
],
)
@@ -23,7 +23,7 @@ go_test(
srcs = ["images_test.go"],
library = ":go_default_library",
tags = ["automanaged"],
deps = ["//cmd/kubeadm/app/apis/kubeadm:go_default_library"],
deps = ["//cmd/kubeadm/app/constants:go_default_library"],
)
filegroup(

View File

@@ -20,29 +20,19 @@ import (
"fmt"
"runtime"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
)
const (
KubeEtcdImage = "etcd"
KubeAPIServerImage = "apiserver"
KubeControllerManagerImage = "controller-manager"
KubeSchedulerImage = "scheduler"
etcdVersion = "3.0.17"
)
func GetCoreImage(image string, cfg *kubeadmapi.MasterConfiguration, overrideImage string) string {
func GetCoreImage(image, repoPrefix, k8sVersion, overrideImage string) string {
if overrideImage != "" {
return overrideImage
}
repoPrefix := cfg.ImageRepository
kubernetesImageTag := kubeadmutil.KubernetesVersionToImageTag(cfg.KubernetesVersion)
kubernetesImageTag := kubeadmutil.KubernetesVersionToImageTag(k8sVersion)
return map[string]string{
KubeEtcdImage: fmt.Sprintf("%s/%s-%s:%s", repoPrefix, "etcd", runtime.GOARCH, etcdVersion),
KubeAPIServerImage: fmt.Sprintf("%s/%s-%s:%s", repoPrefix, "kube-apiserver", runtime.GOARCH, kubernetesImageTag),
KubeControllerManagerImage: fmt.Sprintf("%s/%s-%s:%s", repoPrefix, "kube-controller-manager", runtime.GOARCH, kubernetesImageTag),
KubeSchedulerImage: fmt.Sprintf("%s/%s-%s:%s", repoPrefix, "kube-scheduler", runtime.GOARCH, kubernetesImageTag),
constants.Etcd: fmt.Sprintf("%s/%s-%s:%s", repoPrefix, "etcd", runtime.GOARCH, constants.DefaultEtcdVersion),
constants.KubeAPIServer: fmt.Sprintf("%s/%s-%s:%s", repoPrefix, "kube-apiserver", runtime.GOARCH, kubernetesImageTag),
constants.KubeControllerManager: fmt.Sprintf("%s/%s-%s:%s", repoPrefix, "kube-controller-manager", runtime.GOARCH, kubernetesImageTag),
constants.KubeScheduler: fmt.Sprintf("%s/%s-%s:%s", repoPrefix, "kube-scheduler", runtime.GOARCH, kubernetesImageTag),
}[image]
}

View File

@@ -21,15 +21,9 @@ import (
"runtime"
"testing"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
)
type getCoreImageTest struct {
i string
c *kubeadmapi.MasterConfiguration
o string
}
const (
testversion = "v10.1.2-alpha.1.100+0123456789abcdef+SOMETHING"
expected = "v10.1.2-alpha.1.100_0123456789abcdef_SOMETHING"
@@ -37,38 +31,43 @@ const (
)
func TestGetCoreImage(t *testing.T) {
var imageTest = []struct {
t getCoreImageTest
expected string
var tests = []struct {
image, repo, version, override, expected string
}{
{getCoreImageTest{o: "override"}, "override"},
{getCoreImageTest{
i: KubeEtcdImage,
c: &kubeadmapi.MasterConfiguration{ImageRepository: gcrPrefix}},
fmt.Sprintf("%s/%s-%s:%s", gcrPrefix, "etcd", runtime.GOARCH, etcdVersion),
{
override: "override",
expected: "override",
},
{getCoreImageTest{
i: KubeAPIServerImage,
c: &kubeadmapi.MasterConfiguration{ImageRepository: gcrPrefix, KubernetesVersion: testversion}},
fmt.Sprintf("%s/%s-%s:%s", gcrPrefix, "kube-apiserver", runtime.GOARCH, expected),
{
image: constants.Etcd,
repo: gcrPrefix,
expected: fmt.Sprintf("%s/%s-%s:%s", gcrPrefix, "etcd", runtime.GOARCH, constants.DefaultEtcdVersion),
},
{getCoreImageTest{
i: KubeControllerManagerImage,
c: &kubeadmapi.MasterConfiguration{ImageRepository: gcrPrefix, KubernetesVersion: testversion}},
fmt.Sprintf("%s/%s-%s:%s", gcrPrefix, "kube-controller-manager", runtime.GOARCH, expected),
{
image: constants.KubeAPIServer,
repo: gcrPrefix,
version: testversion,
expected: fmt.Sprintf("%s/%s-%s:%s", gcrPrefix, "kube-apiserver", runtime.GOARCH, expected),
},
{getCoreImageTest{
i: KubeSchedulerImage,
c: &kubeadmapi.MasterConfiguration{ImageRepository: gcrPrefix, KubernetesVersion: testversion}},
fmt.Sprintf("%s/%s-%s:%s", gcrPrefix, "kube-scheduler", runtime.GOARCH, expected),
{
image: constants.KubeControllerManager,
repo: gcrPrefix,
version: testversion,
expected: fmt.Sprintf("%s/%s-%s:%s", gcrPrefix, "kube-controller-manager", runtime.GOARCH, expected),
},
{
image: constants.KubeScheduler,
repo: gcrPrefix,
version: testversion,
expected: fmt.Sprintf("%s/%s-%s:%s", gcrPrefix, "kube-scheduler", runtime.GOARCH, expected),
},
}
for _, it := range imageTest {
actual := GetCoreImage(it.t.i, it.t.c, it.t.o)
if actual != it.expected {
for _, rt := range tests {
actual := GetCoreImage(rt.image, rt.repo, rt.version, rt.override)
if actual != rt.expected {
t.Errorf(
"failed GetCoreImage:\n\texpected: %s\n\t actual: %s",
it.expected,
rt.expected,
actual,
)
}

View File

@@ -24,15 +24,15 @@ import (
)
// ValidateAPIServer makes sure the server we're connecting to supports the Beta Certificates API
func ValidateAPIServer(client *clientset.Clientset) error {
version, err := client.DiscoveryClient.ServerVersion()
func ValidateAPIServer(client clientset.Interface) error {
version, err := client.Discovery().ServerVersion()
if err != nil {
return fmt.Errorf("failed to check server version: %v", err)
}
fmt.Printf("[bootstrap] Detected server version: %s\n", version.String())
// Check certificates API. If the server supports the version of the Certificates API we're using, we're good to go
serverGroups, err := client.DiscoveryClient.ServerGroups()
serverGroups, err := client.Discovery().ServerGroups()
if err != nil {
return fmt.Errorf("certificate API check failed: failed to retrieve a list of supported API objects [%v]", err)
}

View File

@@ -35,7 +35,7 @@ import (
)
// CreateEssentialAddons creates the kube-proxy and kube-dns addons
func CreateEssentialAddons(cfg *kubeadmapi.MasterConfiguration, client *clientset.Clientset) error {
func CreateEssentialAddons(cfg *kubeadmapi.MasterConfiguration, client clientset.Interface) error {
proxyConfigMapBytes, err := kubeadmutil.ParseTemplate(KubeProxyConfigMap, struct{ MasterEndpoint string }{
// Fetch this value from the kubeconfig file
MasterEndpoint: fmt.Sprintf("https://%s:%d", cfg.API.AdvertiseAddress, cfg.API.BindPort),
@@ -94,7 +94,7 @@ func CreateEssentialAddons(cfg *kubeadmapi.MasterConfiguration, client *clientse
return nil
}
func CreateKubeProxyAddon(configMapBytes, daemonSetbytes []byte, client *clientset.Clientset) error {
func CreateKubeProxyAddon(configMapBytes, daemonSetbytes []byte, client clientset.Interface) error {
kubeproxyConfigMap := &v1.ConfigMap{}
if err := kuberuntime.DecodeInto(api.Codecs.UniversalDecoder(), configMapBytes, kubeproxyConfigMap); err != nil {
return fmt.Errorf("unable to decode kube-proxy configmap %v", err)
@@ -127,7 +127,7 @@ func CreateKubeProxyAddon(configMapBytes, daemonSetbytes []byte, client *clients
return nil
}
func CreateKubeDNSAddon(deploymentBytes, serviceBytes []byte, client *clientset.Clientset) error {
func CreateKubeDNSAddon(deploymentBytes, serviceBytes []byte, client clientset.Interface) error {
kubednsDeployment := &extensions.Deployment{}
if err := kuberuntime.DecodeInto(api.Codecs.UniversalDecoder(), deploymentBytes, kubednsDeployment); err != nil {
return fmt.Errorf("unable to decode kube-dns deployment %v", err)
@@ -164,7 +164,7 @@ func CreateKubeDNSAddon(deploymentBytes, serviceBytes []byte, client *clientset.
}
// getDNSIP fetches the kubernetes service's ClusterIP and appends a "0" to it in order to get the DNS IP
func getDNSIP(client *clientset.Clientset) (net.IP, error) {
func getDNSIP(client clientset.Interface) (net.IP, error) {
k8ssvc, err := client.CoreV1().Services(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("couldn't fetch information about the kubernetes service: %v", err)

View File

@@ -74,7 +74,7 @@ func CreateServiceAccounts(clientset clientset.Interface) error {
}
// CreateRBACRules creates the essential RBAC rules for a minimally set-up cluster
func CreateRBACRules(clientset *clientset.Clientset, k8sVersion *version.Version) error {
func CreateRBACRules(clientset clientset.Interface, k8sVersion *version.Version) error {
if err := createRoles(clientset); err != nil {
return err
}
@@ -95,7 +95,7 @@ func CreateRBACRules(clientset *clientset.Clientset, k8sVersion *version.Version
return nil
}
func createRoles(clientset *clientset.Clientset) error {
func createRoles(clientset clientset.Interface) error {
roles := []rbac.Role{
{
ObjectMeta: metav1.ObjectMeta{
@@ -121,7 +121,7 @@ func createRoles(clientset *clientset.Clientset) error {
return nil
}
func createRoleBindings(clientset *clientset.Clientset) error {
func createRoleBindings(clientset clientset.Interface) error {
roleBindings := []rbac.RoleBinding{
{
ObjectMeta: metav1.ObjectMeta{
@@ -156,7 +156,7 @@ func createRoleBindings(clientset *clientset.Clientset) error {
return nil
}
func createClusterRoles(clientset *clientset.Clientset) error {
func createClusterRoles(clientset clientset.Interface) error {
clusterRoles := []rbac.ClusterRole{
{
ObjectMeta: metav1.ObjectMeta{
@@ -182,7 +182,7 @@ func createClusterRoles(clientset *clientset.Clientset) error {
return nil
}
func createClusterRoleBindings(clientset *clientset.Clientset) error {
func createClusterRoleBindings(clientset clientset.Interface) error {
clusterRoleBindings := []rbac.ClusterRoleBinding{
{
ObjectMeta: metav1.ObjectMeta{
@@ -249,7 +249,7 @@ func createClusterRoleBindings(clientset *clientset.Clientset) error {
return nil
}
func deletePermissiveNodesBindingWhenUsingNodeAuthorization(clientset *clientset.Clientset, k8sVersion *version.Version) error {
func deletePermissiveNodesBindingWhenUsingNodeAuthorization(clientset clientset.Interface, k8sVersion *version.Version) error {
nodesRoleBinding, err := clientset.RbacV1beta1().ClusterRoleBindings().Get(kubeadmconstants.NodesClusterRoleBinding, metav1.GetOptions{})
if err != nil {

View File

@@ -21,6 +21,7 @@ go_test(
"//cmd/kubeadm/app/constants:go_default_library",
"//pkg/util/version:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/yaml:go_default_library",
],

Some files were not shown because too many files have changed in this diff Show More