Register the kubelet on the master node with an apiserver. This option is

separated from the apiserver running locally on the master node so that it
can be optionally enabled or disabled as needed.

Also, fix the healthchecking configuration for the master components, which
was previously only working by coincidence:

If a kubelet doesn't register with a master, it never bothers to figure out
what its local address is. In which case it ends up constructing a URL like
http://:8080/healthz for the http probe. This happens to work on the master
because all of the pods are using host networking and explicitly binding to
127.0.0.1. Once the kubelet is registered with the master and it determines
the local node address, it tries to healthcheck on an address where the pod
isn't listening and the kubelet periodically restarts each master component
when the liveness probe fails.
This commit is contained in:
Robert Bailey
2015-08-04 11:14:46 -07:00
parent 1407aee8b0
commit 8df33bc1a7
17 changed files with 140 additions and 61 deletions

View File

@@ -1,7 +1,10 @@
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {"name":"etcd-server"},
"metadata": {
"name":"etcd-server",
"namespace": "kube-system"
},
"spec":{
"hostNetwork": true,
"containers":[

View File

@@ -99,7 +99,10 @@
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {"name":"kube-apiserver"},
"metadata": {
"name":"kube-apiserver",
"namespace": "kube-system"
},
"spec":{
"hostNetwork": true,
"containers":[
@@ -118,8 +121,9 @@
],
"livenessProbe": {
"httpGet": {
"path": "/healthz",
"port": 8080
"host": "127.0.0.1",
"port": 8080,
"path": "/healthz"
},
"initialDelaySeconds": 15,
"timeoutSeconds": 15

View File

@@ -44,7 +44,10 @@
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {"name":"kube-controller-manager"},
"metadata": {
"name":"kube-controller-manager",
"namespace": "kube-system"
},
"spec":{
"hostNetwork": true,
"containers":[
@@ -63,8 +66,9 @@
],
"livenessProbe": {
"httpGet": {
"path": "/healthz",
"port": 10252
"host": "127.0.0.1",
"port": 10252,
"path": "/healthz"
},
"initialDelaySeconds": 15,
"timeoutSeconds": 15

View File

@@ -8,7 +8,10 @@
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {"name":"kube-scheduler"},
"metadata": {
"name":"kube-scheduler",
"namespace": "kube-system"
},
"spec":{
"hostNetwork": true,
"containers":[
@@ -27,8 +30,9 @@
],
"livenessProbe": {
"httpGet": {
"path": "/healthz",
"port": 10251
"host": "127.0.0.1",
"port": 10251,
"path": "/healthz"
},
"initialDelaySeconds": 15,
"timeoutSeconds": 15

View File

@@ -22,15 +22,22 @@
{% set api_servers_with_port = api_servers + ":6443" -%}
{% endif -%}
# Disable registration for the kubelet running on the master on AWS, GCE, Vagrant. Also disable
# the debugging handlers (/run and /exec) to prevent arbitrary code execution on
# the master.
# TODO(roberthbailey): Make this configurable via an env var in config-default.sh
{% set debugging_handlers = "--enable-debugging-handlers=true" -%}
{% if grains.cloud in ['aws', 'gce', 'vagrant'] -%}
{% if grains['roles'][0] == 'kubernetes-master' -%}
{% set api_servers_with_port = "" -%}
{% if grains['roles'][0] == 'kubernetes-master' -%}
{% if grains.cloud in ['aws', 'gce', 'vagrant'] -%}
# Unless given a specific directive, disable registration for the kubelet
# running on the master.
{% if grains.kubelet_api_servers is defined -%}
{% set api_servers_with_port = "--api_servers=https://" + grains.kubelet_api_servers -%}
{% else -%}
{% set api_servers_with_port = "" -%}
{% endif -%}
# Disable the debugging handlers (/run and /exec) to prevent arbitrary
# code execution on the master.
# TODO(roberthbailey): Relax this constraint once the master is self-hosted.
{% set debugging_handlers = "--enable-debugging-handlers=false" -%}
{% endif -%}
{% endif -%}
@@ -88,7 +95,7 @@
{% set pod_cidr = "" %}
{% if grains['roles'][0] == 'kubernetes-master' and grains.get('cbr-cidr') %}
{% set pod_cidr = "--pod-cidr=" + grains['cbr-cidr'] %}
{% endif %}
{% endif %}
{% set test_args = "" -%}
{% if pillar['kubelet_test_args'] is defined -%}

View File

@@ -19,9 +19,9 @@
- group: root
- mode: 755
# The default here is that this file is blank. If this is the case, the kubelet
# won't be able to parse it as JSON and will try to use the kubernetes_auth file
# instead. You'll see a single error line in the kubelet start up file
# The default here is that this file is blank. If this is the case, the kubelet
# won't be able to parse it as JSON and it will not be able to publish events
# to the apiserver. You'll see a single error line in the kubelet start up file
# about this.
/var/lib/kubelet/kubeconfig:
file.managed:
@@ -31,19 +31,6 @@
- mode: 400
- makedirs: true
#
# --- This file is DEPRECATED ---
# The default here is that this file is blank. If this is the case, the kubelet
# won't be able to parse it as JSON and it'll not be able to publish events to
# the apiserver. You'll see a single error line in the kubelet start up file
# about this.
/var/lib/kubelet/kubernetes_auth:
file.managed:
- source: salt://kubelet/kubernetes_auth
- user: root
- group: root
- mode: 400
- makedirs: true
{% if pillar.get('is_systemd') %}
@@ -64,7 +51,7 @@ fix-service-kubelet:
- file: /usr/local/bin/kubelet
- file: {{ pillar.get('systemd_system_path') }}/kubelet.service
- file: {{ environment_file }}
- file: /var/lib/kubelet/kubernetes_auth
- file: /var/lib/kubelet/kubeconfig
{% else %}
@@ -91,4 +78,4 @@ kubelet:
- file: /usr/lib/systemd/system/kubelet.service
{% endif %}
- file: {{ environment_file }}
- file: /var/lib/kubelet/kubernetes_auth
- file: /var/lib/kubelet/kubeconfig