Merge branch 'master' of github.com:GoogleCloudPlatform/kubernetes into add-charms
This commit is contained in:
@@ -1,34 +0,0 @@
|
||||
# Makefile for launching syntheitc logging sources (any platform)
|
||||
# and for reporting the forwarding rules for the
|
||||
# Elasticsearch and Kibana pods for the GCE platform.
|
||||
|
||||
|
||||
.PHONY: up down logger-up logger-down logger10-up logger10-downget net
|
||||
|
||||
KUBECTL=../../../kubectl.sh
|
||||
|
||||
up: logger-up logger10-up
|
||||
|
||||
down: logger-down logger10-down
|
||||
|
||||
|
||||
logger-up:
|
||||
-${KUBECTL} create -f synthetic_0_25lps.yaml
|
||||
|
||||
logger-down:
|
||||
-${KUBECTL} delete pods synthetic-logger-0.25lps-pod
|
||||
|
||||
logger10-up:
|
||||
-${KUBECTL} create -f synthetic_10lps.yaml
|
||||
|
||||
logger10-down:
|
||||
-${KUBECTL} delete pods synthetic-logger-10lps-pod
|
||||
|
||||
get:
|
||||
${KUBECTL} get pods
|
||||
${KUBECTL} get replicationControllers
|
||||
${KUBECTL} get services
|
||||
|
||||
net:
|
||||
${KUBECTL} get services elasticsearch-logging -o json
|
||||
${KUBECTL} get services kibana-logging -o json
|
@@ -1,164 +0,0 @@
|
||||
# Elasticsearch/Kibana Logging Demonstration
|
||||
This directory contains two pod specifications which can be used as synthetic
|
||||
loggig sources. The pod specification in [synthetic_0_25lps.yaml](synthetic_0_25lps.yaml)
|
||||
describes a pod that just emits a log message once every 4 seconds:
|
||||
```
|
||||
# This pod specification creates an instance of a synthetic logger. The logger
|
||||
# is simply a program that writes out the hostname of the pod, a count which increments
|
||||
# by one on each iteration (to help notice missing log enteries) and the date using
|
||||
# a long format (RFC-3339) to nano-second precision. This program logs at a frequency
|
||||
# of 0.25 lines per second. The shellscript program is given directly to bash as -c argument
|
||||
# and could have been written out as:
|
||||
# i="0"
|
||||
# while true
|
||||
# do
|
||||
# echo -n "`hostname`: $i: "
|
||||
# date --rfc-3339 ns
|
||||
# sleep 4
|
||||
# i=$[$i+1]
|
||||
# done
|
||||
|
||||
apiVersion: v1beta1
|
||||
kind: Pod
|
||||
id: synthetic-logger-0.25lps-pod
|
||||
desiredState:
|
||||
manifest:
|
||||
version: v1beta1
|
||||
id: synth-logger-0.25lps
|
||||
containers:
|
||||
- name: synth-lgr
|
||||
image: ubuntu:14.04
|
||||
command: ["bash", "-c", "i=\"0\"; while true; do echo -n \"`hostname`: $i: \"; date --rfc-3339 ns; sleep 4; i=$[$i+1]; done"]
|
||||
labels:
|
||||
name: synth-logging-source
|
||||
```
|
||||
|
||||
The other YAML file [synthetic_10lps.yaml](synthetic_10lps.yaml) specifies a similar synthetic logger that emits 10 log messages every second. To run both synthetic loggers:
|
||||
```
|
||||
$ make up
|
||||
../../../kubectl.sh create -f synthetic_0_25lps.yaml
|
||||
Running: ../../../cluster/../cluster/gce/../../_output/dockerized/bin/linux/amd64/kubectl create -f synthetic_0_25lps.yaml
|
||||
synthetic-logger-0.25lps-pod
|
||||
../../../kubectl.sh create -f synthetic_10lps.yaml
|
||||
Running: ../../../cluster/../cluster/gce/../../_output/dockerized/bin/linux/amd64/kubectl create -f synthetic_10lps.yaml
|
||||
synthetic-logger-10lps-pod
|
||||
|
||||
```
|
||||
|
||||
Visiting the Kibana dashboard should make it clear that logs are being collected from the two synthetic loggers:
|
||||

|
||||
|
||||
You can report the running pods, replication controllers and services with another Makefile rule:
|
||||
```
|
||||
$ make get
|
||||
../../../kubectl.sh get pods
|
||||
Running: ../../../../cluster/gce/../../_output/dockerized/bin/linux/amd64/kubectl get pods
|
||||
POD CONTAINER(S) IMAGE(S) HOST LABELS STATUS
|
||||
7e1c7ce6-9764-11e4-898c-42010af03582 kibana-logging kubernetes/kibana kubernetes-minion-3.c.kubernetes-elk.internal/130.211.129.169 name=kibana-logging Running
|
||||
synthetic-logger-0.25lps-pod synth-lgr ubuntu:14.04 kubernetes-minion-2.c.kubernetes-elk.internal/146.148.41.87 name=synth-logging-source Running
|
||||
synthetic-logger-10lps-pod synth-lgr ubuntu:14.04 kubernetes-minion-1.c.kubernetes-elk.internal/146.148.42.44 name=synth-logging-source Running
|
||||
influx-grafana influxdb kubernetes/heapster_influxdb kubernetes-minion-3.c.kubernetes-elk.internal/130.211.129.169 name=influxdb Running
|
||||
grafana kubernetes/heapster_grafana
|
||||
elasticsearch elasticsearch
|
||||
heapster heapster kubernetes/heapster kubernetes-minion-2.c.kubernetes-elk.internal/146.148.41.87 name=heapster Running
|
||||
67cfcb1f-9764-11e4-898c-42010af03582 etcd quay.io/coreos/etcd:latest kubernetes-minion-3.c.kubernetes-elk.internal/130.211.129.169 k8s-app=skydns Running
|
||||
kube2sky kubernetes/kube2sky:1.0
|
||||
skydns kubernetes/skydns:2014-12-23-001
|
||||
6ba20338-9764-11e4-898c-42010af03582 elasticsearch-logging elasticsearch kubernetes-minion-3.c.kubernetes-elk.internal/130.211.129.169 name=elasticsearch-logging Running
|
||||
../../../cluster/kubectl.sh get replicationControllers
|
||||
Running: ../../../cluster/../cluster/gce/../../_output/dockerized/bin/linux/amd64/kubectl get replicationControllers
|
||||
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
|
||||
skydns etcd quay.io/coreos/etcd:latest k8s-app=skydns 1
|
||||
kube2sky kubernetes/kube2sky:1.0
|
||||
skydns kubernetes/skydns:2014-12-23-001
|
||||
elasticsearch-logging-controller elasticsearch-logging elasticsearch name=elasticsearch-logging 1
|
||||
kibana-logging-controller kibana-logging kubernetes/kibana name=kibana-logging 1
|
||||
../../.../kubectl.sh get services
|
||||
Running: ../../../cluster/../cluster/gce/../../_output/dockerized/bin/linux/amd64/kubectl get services
|
||||
NAME LABELS SELECTOR IP PORT
|
||||
kubernetes-ro component=apiserver,provider=kubernetes <none> 10.0.83.3 80
|
||||
kubernetes component=apiserver,provider=kubernetes <none> 10.0.79.4 443
|
||||
influx-master <none> name=influxdb 10.0.232.223 8085
|
||||
skydns k8s-app=skydns k8s-app=skydns 10.0.0.10 53
|
||||
elasticsearch-logging <none> name=elasticsearch-logging 10.0.25.103 9200
|
||||
kibana-logging <none> name=kibana-logging 10.0.208.114 5601
|
||||
|
||||
```
|
||||
The `net` rule in the Makefile will report information about the Elasticsearch and Kibana services including the public IP addresses of each service.
|
||||
```
|
||||
$ make net
|
||||
../../../kubectl.sh get services elasticsearch-logging -o json
|
||||
current-context: "kubernetes-satnam_kubernetes"
|
||||
Running: ../../../../cluster/gce/../../_output/dockerized/bin/linux/amd64/kubectl get services elasticsearch-logging -o json
|
||||
{
|
||||
"kind": "Service",
|
||||
"id": "elasticsearch-logging",
|
||||
"uid": "e5bf0a51-b87f-11e4-bd62-42010af01267",
|
||||
"creationTimestamp": "2015-02-19T21:40:18Z",
|
||||
"selfLink": "/api/v1beta1/services/elasticsearch-logging?namespace=default",
|
||||
"resourceVersion": 68,
|
||||
"apiVersion": "v1beta1",
|
||||
"namespace": "default",
|
||||
"port": 9200,
|
||||
"protocol": "TCP",
|
||||
"labels": {
|
||||
"name": "elasticsearch-logging"
|
||||
},
|
||||
"selector": {
|
||||
"name": "elasticsearch-logging"
|
||||
},
|
||||
"createExternalLoadBalancer": true,
|
||||
"publicIPs": [
|
||||
"104.154.81.135"
|
||||
],
|
||||
"containerPort": 9200,
|
||||
"portalIP": "10.0.58.62",
|
||||
"sessionAffinity": "None"
|
||||
}
|
||||
../../../kubectl.sh get services kibana-logging -o json
|
||||
current-context: "kubernetes-satnam_kubernetes"
|
||||
Running: ../../../../cluster/gce/../../_output/dockerized/bin/linux/amd64/kubectl get services kibana-logging -o json
|
||||
{
|
||||
"kind": "Service",
|
||||
"id": "kibana-logging",
|
||||
"uid": "e5bd4617-b87f-11e4-bd62-42010af01267",
|
||||
"creationTimestamp": "2015-02-19T21:40:18Z",
|
||||
"selfLink": "/api/v1beta1/services/kibana-logging?namespace=default",
|
||||
"resourceVersion": 67,
|
||||
"apiVersion": "v1beta1",
|
||||
"namespace": "default",
|
||||
"port": 5601,
|
||||
"protocol": "TCP",
|
||||
"labels": {
|
||||
"name": "kibana-logging"
|
||||
},
|
||||
"selector": {
|
||||
"name": "kibana-logging"
|
||||
},
|
||||
"createExternalLoadBalancer": true,
|
||||
"publicIPs": [
|
||||
"104.154.91.224"
|
||||
],
|
||||
"containerPort": 80,
|
||||
"portalIP": "10.0.124.153",
|
||||
"sessionAffinity": "None"
|
||||
}
|
||||
```
|
||||
For this example the Elasticsearch service is running at `http://104.154.81.135:9200`.
|
||||
```
|
||||
$ curl http://104.154.81.135:9200
|
||||
{
|
||||
"status" : 200,
|
||||
"name" : "Wombat",
|
||||
"cluster_name" : "elasticsearch",
|
||||
"version" : {
|
||||
"number" : "1.4.4",
|
||||
"build_hash" : "c88f77ffc81301dfa9dfd81ca2232f09588bd512",
|
||||
"build_timestamp" : "2015-02-19T13:05:36Z",
|
||||
"build_snapshot" : false,
|
||||
"lucene_version" : "4.10.3"
|
||||
},
|
||||
"tagline" : "You Know, for Search"
|
||||
}
|
||||
```
|
||||
Visiting the URL `http://104.154.91.224:5601` should show the Kibana viewer for the logging information stored in the Elasticsearch service running at `http://104.154.81.135:9200`.
|
Binary file not shown.
Before Width: | Height: | Size: 87 KiB |
@@ -1,29 +0,0 @@
|
||||
# This pod specification creates an instance of a synthetic logger. The logger
|
||||
# is simply a program that writes out the hostname of the pod, a count which increments
|
||||
# by one on each iteration (to help notice missing log enteries) and the date using
|
||||
# a long format (RFC-3339) to nano-second precision. This program logs at a frequency
|
||||
# of 0.25 lines per second. The shellscript program is given directly to bash as -c argument
|
||||
# and could have been written out as:
|
||||
# i="0"
|
||||
# while true
|
||||
# do
|
||||
# echo -n "`hostname`: $i: "
|
||||
# date --rfc-3339 ns
|
||||
# sleep 4
|
||||
# i=$[$i+1]
|
||||
# done
|
||||
|
||||
apiVersion: v1beta1
|
||||
kind: Pod
|
||||
id: synthetic-logger-0.25lps-pod
|
||||
desiredState:
|
||||
manifest:
|
||||
version: v1beta1
|
||||
id: synth-logger-0.25lps
|
||||
containers:
|
||||
- name: synth-lgr
|
||||
image: ubuntu:14.04
|
||||
command: ["bash", "-c", "i=\"0\"; while true; do echo -n \"`hostname`: $i: \"; date --rfc-3339 ns; sleep 4; i=$[$i+1]; done"]
|
||||
labels:
|
||||
name: synth-logging-source
|
||||
|
@@ -1,29 +0,0 @@
|
||||
# This pod specification creates an instance of a synthetic logger. The logger
|
||||
# is simply a program that writes out the hostname of the pod, a count which increments
|
||||
# by one on each iteration (to help notice missing log enteries) and the date using
|
||||
# a long format (RFC-3339) to nano-second precision. This program logs at a frequency
|
||||
# of 0.25 lines per second. The shellscript program is given directly to bash as -c argument
|
||||
# and could have been written out as:
|
||||
# i="0"
|
||||
# while true
|
||||
# do
|
||||
# echo -n "`hostname`: $i: "
|
||||
# date --rfc-3339 ns
|
||||
# sleep 4
|
||||
# i=$[$i+1]
|
||||
# done
|
||||
|
||||
apiVersion: v1beta1
|
||||
kind: Pod
|
||||
id: synthetic-logger-10lps-pod
|
||||
desiredState:
|
||||
manifest:
|
||||
version: v1beta1
|
||||
id: synth-logger-10lps
|
||||
containers:
|
||||
- name: synth-lgr
|
||||
image: ubuntu:14.04
|
||||
command: ["bash", "-c", "i=\"0\"; while true; do echo -n \"`hostname`: $i: \"; date --rfc-3339 ns; sleep 0.1; i=$[$i+1]; done"]
|
||||
labels:
|
||||
name: synth-logging-source
|
||||
|
25
cluster/addons/fluentd-gcp/fluentd-gcp-image/Dockerfile
Normal file
25
cluster/addons/fluentd-gcp/fluentd-gcp-image/Dockerfile
Normal file
@@ -0,0 +1,25 @@
|
||||
# This Dockerfile will build an image that is configured
|
||||
# to use Fluentd to collect all Docker container log files
|
||||
# and then cause them to be ingested using the Google Cloud
|
||||
# Logging API. This configuration assumes that the host performning
|
||||
# the collection is a VM that has been created with a logging.write
|
||||
# scope and that the Logging API has been enabled for the project
|
||||
# in the Google Developer Console.
|
||||
|
||||
FROM ubuntu:14.04
|
||||
MAINTAINER Satnam Singh "satnam@google.com"
|
||||
|
||||
# Disable prompts from apt.
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
ENV OPTS_APT -y --force-yes --no-install-recommends
|
||||
|
||||
RUN apt-get -q update && \
|
||||
apt-get -y install curl && \
|
||||
apt-get clean && \
|
||||
curl -s https://storage.googleapis.com/signals-agents/logging/google-fluentd-install.sh | sudo bash
|
||||
|
||||
# Copy the Fluentd configuration file for logging Docker container logs.
|
||||
COPY google-fluentd.conf /etc/google-fluentd/google-fluentd.conf
|
||||
|
||||
# Start Fluentd to pick up our config that watches Docker container logs.
|
||||
CMD /usr/sbin/google-fluentd -qq > /var/log/google-fluentd.log
|
16
cluster/addons/fluentd-gcp/fluentd-gcp-image/Makefile
Normal file
16
cluster/addons/fluentd-gcp/fluentd-gcp-image/Makefile
Normal file
@@ -0,0 +1,16 @@
|
||||
# The build rule builds a Docker image that logs all Docker contains logs to
|
||||
# Google Compute Platform using the Cloud Logging API. The push rule pushes
|
||||
# the image to DockerHub.
|
||||
# Satnam Singh (satnam@google.com)
|
||||
|
||||
.PHONY: build push
|
||||
|
||||
|
||||
TAG = 1.2
|
||||
|
||||
build:
|
||||
docker build -t gcr.io/google_containers/fluentd-gcp:$(TAG) .
|
||||
|
||||
push:
|
||||
gcloud preview docker push gcr.io/google_containers/fluentd-gcp:$(TAG)
|
||||
|
8
cluster/addons/fluentd-gcp/fluentd-gcp-image/README.md
Normal file
8
cluster/addons/fluentd-gcp/fluentd-gcp-image/README.md
Normal file
@@ -0,0 +1,8 @@
|
||||
# Collecting Docker Log Files with Fluentd and sending to GCP.
|
||||
This directory contains the source files needed to make a Docker image
|
||||
that collects Docker container log files using [Fluentd](http://www.fluentd.org/)
|
||||
and sends them to GCP.
|
||||
This image is designed to be used as part of the [Kubernetes](https://github.com/GoogleCloudPlatform/kubernetes)
|
||||
cluster bring up process. The image resides at DockerHub under the name
|
||||
[kubernetes/fluentd-gcp](https://registry.hub.docker.com/u/kubernetes/fluentd-gcp/).
|
||||
|
@@ -0,0 +1,51 @@
|
||||
# This Fluentd configuration file specifies the colleciton
|
||||
# of all Docker container log files under /var/lib/docker/containers/...
|
||||
# followed by ingestion using the Google Cloud Logging API.
|
||||
# This configuration assumes the correct installation of the the
|
||||
# Google fluentd plug-in. Currently the collector uses a text format
|
||||
# rather than JSON (which is the format used to store the Docker
|
||||
# log files). When the fluentd plug-in can accept JSON this
|
||||
# configuraiton file should be changed by specifying:
|
||||
# format json
|
||||
# in the source section.
|
||||
# This configuration file assumes that the VM host running
|
||||
# this configuraiton has been created with a logging.write scope.
|
||||
# Maintainer: Satnam Singh (satnam@google.com)
|
||||
|
||||
<source>
|
||||
type tail
|
||||
format none
|
||||
time_key time
|
||||
path /var/lib/docker/containers/*/*-json.log
|
||||
pos_file /var/lib/docker/containers/gcp-containers.log.pos
|
||||
time_format %Y-%m-%dT%H:%M:%S
|
||||
tag docker.*
|
||||
read_from_head true
|
||||
</source>
|
||||
|
||||
<match docker.**>
|
||||
type google_cloud
|
||||
flush_interval 5s
|
||||
# Never wait longer than 5 minutes between retries.
|
||||
max_retry_wait 300
|
||||
# Disable the limit on the number of retries (retry forever).
|
||||
disable_retry_limit
|
||||
</match>
|
||||
|
||||
<source>
|
||||
type tail
|
||||
format none
|
||||
time_key time
|
||||
path /varlog/kubelet.log
|
||||
pos_file /varlog/gcp-kubelet.log.pos
|
||||
tag kubelet
|
||||
</source>
|
||||
|
||||
<match kubelet>
|
||||
type google_cloud
|
||||
flush_interval 5s
|
||||
# Never wait longer than 5 minutes between retries.
|
||||
max_retry_wait 300
|
||||
# Disable the limit on the number of retries (retry forever).
|
||||
disable_retry_limit
|
||||
</match>
|
@@ -67,5 +67,13 @@ else
|
||||
ln -s /mnt/docker /var/lib/docker
|
||||
DOCKER_ROOT="/mnt/docker"
|
||||
DOCKER_OPTS="${DOCKER_OPTS} -g /mnt/docker"
|
||||
|
||||
# Move /var/lib/kubelet to /mnt if we have it
|
||||
# (the backing for empty-dir volumes can use a lot of space!)
|
||||
if [[ -d /var/lib/kubelet ]]; then
|
||||
mv /var/lib/kubelet /mnt/
|
||||
fi
|
||||
mkdir -p /mnt/kubelet
|
||||
ln -s /mnt/kubelet /var/lib/kubelet
|
||||
fi
|
||||
|
||||
|
@@ -58,5 +58,5 @@ EOF
|
||||
#
|
||||
# -M installs the master
|
||||
set +x
|
||||
curl -L --connect-timeout 20 --retry 6 --retry-delay 10 http://bootstrap.saltstack.com | sh -s -- -M -X
|
||||
curl -L --connect-timeout 20 --retry 6 --retry-delay 10 https://bootstrap.saltstack.com | sh -s -- -M -X
|
||||
set -x
|
||||
|
@@ -20,6 +20,7 @@
|
||||
# config-default.sh.
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
source "${KUBE_ROOT}/cluster/aws/${KUBE_CONFIG_FILE-"config-default.sh"}"
|
||||
source "${KUBE_ROOT}/cluster/common.sh"
|
||||
|
||||
# This removes the final character in bash (somehow)
|
||||
AWS_REGION=${ZONE%?}
|
||||
@@ -265,7 +266,7 @@ function upload-server-tars() {
|
||||
|
||||
|
||||
# Ensure that we have a password created for validating to the master. Will
|
||||
# read from the kubernetes auth-file for the current context if available.
|
||||
# read from kubeconfig for the current context if available.
|
||||
#
|
||||
# Assumed vars
|
||||
# KUBE_ROOT
|
||||
@@ -274,17 +275,11 @@ function upload-server-tars() {
|
||||
# KUBE_USER
|
||||
# KUBE_PASSWORD
|
||||
function get-password {
|
||||
# go template to extract the auth-path of the current-context user
|
||||
# Note: we save dot ('.') to $dot because the 'with' action overrides dot
|
||||
local template='{{$dot := .}}{{with $ctx := index $dot "current-context"}}{{range $element := (index $dot "contexts")}}{{ if eq .name $ctx }}{{ with $user := .context.user }}{{range $element := (index $dot "users")}}{{ if eq .name $user }}{{ index . "user" "auth-path" }}{{end}}{{end}}{{end}}{{end}}{{end}}{{end}}'
|
||||
local file=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o template --template="${template}")
|
||||
if [[ ! -z "$file" && -r "$file" ]]; then
|
||||
KUBE_USER=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["User"]')
|
||||
KUBE_PASSWORD=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["Password"]')
|
||||
return
|
||||
get-kubeconfig-basicauth
|
||||
if [[ -z "${KUBE_USER}" || -z "${KUBE_PASSWORD}" ]]; then
|
||||
KUBE_USER=admin
|
||||
KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))')
|
||||
fi
|
||||
KUBE_USER=admin
|
||||
KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))')
|
||||
}
|
||||
|
||||
# Adds a tag to an AWS resource
|
||||
@@ -609,44 +604,23 @@ function kube-up {
|
||||
|
||||
echo "Kubernetes cluster created."
|
||||
|
||||
local kube_cert="kubecfg.crt"
|
||||
local kube_key="kubecfg.key"
|
||||
local ca_cert="kubernetes.ca.crt"
|
||||
# TODO use token instead of kube_auth
|
||||
local kube_auth="kubernetes_auth"
|
||||
export KUBE_CERT="/tmp/$RANDOM-kubecfg.crt"
|
||||
export KUBE_KEY="/tmp/$RANDOM-kubecfg.key"
|
||||
export CA_CERT="/tmp/$RANDOM-kubernetes.ca.crt"
|
||||
export CONTEXT="aws_${INSTANCE_PREFIX}"
|
||||
|
||||
local kubectl="${KUBE_ROOT}/cluster/kubectl.sh"
|
||||
local context="${INSTANCE_PREFIX}"
|
||||
local user="${INSTANCE_PREFIX}-admin"
|
||||
local config_dir="${HOME}/.kube/${context}"
|
||||
|
||||
# TODO: generate ADMIN (and KUBELET) tokens and put those in the master's
|
||||
# config file. Distribute the same way the htpasswd is done.
|
||||
(
|
||||
mkdir -p "${config_dir}"
|
||||
umask 077
|
||||
ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" ubuntu@${KUBE_MASTER_IP} sudo cat /srv/kubernetes/kubecfg.crt >"${config_dir}/${kube_cert}" 2>$LOG
|
||||
ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" ubuntu@${KUBE_MASTER_IP} sudo cat /srv/kubernetes/kubecfg.key >"${config_dir}/${kube_key}" 2>$LOG
|
||||
ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" ubuntu@${KUBE_MASTER_IP} sudo cat /srv/kubernetes/ca.crt >"${config_dir}/${ca_cert}" 2>$LOG
|
||||
ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" "ubuntu@${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/kubecfg.crt >"${KUBE_CERT}" 2>"$LOG"
|
||||
ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" "ubuntu@${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/kubecfg.key >"${KUBE_KEY}" 2>"$LOG"
|
||||
ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" "ubuntu@${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/ca.crt >"${CA_CERT}" 2>"$LOG"
|
||||
|
||||
"${kubectl}" config set-cluster "${context}" --server="https://${KUBE_MASTER_IP}" --certificate-authority="${config_dir}/${ca_cert}" --global
|
||||
"${kubectl}" config set-credentials "${user}" --auth-path="${config_dir}/${kube_auth}" --global
|
||||
"${kubectl}" config set-context "${context}" --cluster="${context}" --user="${user}" --global
|
||||
"${kubectl}" config use-context "${context}" --global
|
||||
|
||||
cat << EOF > "${config_dir}/${kube_auth}"
|
||||
{
|
||||
"User": "$KUBE_USER",
|
||||
"Password": "$KUBE_PASSWORD",
|
||||
"CAFile": "${config_dir}/${ca_cert}",
|
||||
"CertFile": "${config_dir}/${kube_cert}",
|
||||
"KeyFile": "${config_dir}/${kube_key}"
|
||||
}
|
||||
EOF
|
||||
|
||||
chmod 0600 "${config_dir}/${kube_auth}" "${config_dir}/$kube_cert" \
|
||||
"${config_dir}/${kube_key}" "${config_dir}/${ca_cert}"
|
||||
echo "Wrote ${config_dir}/${kube_auth}"
|
||||
create-kubeconfig
|
||||
)
|
||||
|
||||
echo "Sanity checking cluster..."
|
||||
@@ -700,7 +674,7 @@ EOF
|
||||
echo
|
||||
echo -e "${color_yellow} https://${KUBE_MASTER_IP}"
|
||||
echo
|
||||
echo -e "${color_green}The user name and password to use is located in ${config_dir}/${kube_auth}${color_norm}"
|
||||
echo -e "${color_green}The user name and password to use is located in ${KUBECONFIG}.${color_norm}"
|
||||
echo
|
||||
}
|
||||
|
||||
|
@@ -31,7 +31,7 @@ log_level: debug
|
||||
log_level_logfile: debug
|
||||
EOF
|
||||
|
||||
hostnamef=$(hostname -f)
|
||||
hostnamef=$(uname -n)
|
||||
apt-get install -y ipcalc
|
||||
netmask=$(ipcalc $MINION_IP_RANGE | grep Netmask | awk '{ print $2 }')
|
||||
network=$(ipcalc $MINION_IP_RANGE | grep Address | awk '{ print $2 }')
|
||||
|
@@ -21,6 +21,7 @@
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
source "${KUBE_ROOT}/cluster/azure/${KUBE_CONFIG_FILE-"config-default.sh"}"
|
||||
source "${KUBE_ROOT}/cluster/common.sh"
|
||||
|
||||
function azure_call {
|
||||
local -a params=()
|
||||
@@ -242,30 +243,17 @@ function detect-master () {
|
||||
}
|
||||
|
||||
# Ensure that we have a password created for validating to the master. Will
|
||||
# read from $HOME/.kubernetres_auth if available.
|
||||
# read from kubeconfig current-context if available.
|
||||
#
|
||||
# Vars set:
|
||||
# KUBE_USER
|
||||
# KUBE_PASSWORD
|
||||
function get-password {
|
||||
local file="$HOME/.kubernetes_auth"
|
||||
if [[ -r "$file" ]]; then
|
||||
KUBE_USER=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["User"]')
|
||||
KUBE_PASSWORD=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["Password"]')
|
||||
return
|
||||
fi
|
||||
get-kubeconfig-basicauth
|
||||
if [[ -z "${KUBE_USER}" || -z "${KUBE_PASSWORD}" ]]; then
|
||||
KUBE_USER=admin
|
||||
KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))')
|
||||
|
||||
# Remove this code, since in all use cases I can see, we are overwriting this
|
||||
# at cluster creation time.
|
||||
cat << EOF > "$file"
|
||||
{
|
||||
"User": "$KUBE_USER",
|
||||
"Password": "$KUBE_PASSWORD"
|
||||
}
|
||||
EOF
|
||||
chmod 0600 "$file"
|
||||
fi
|
||||
}
|
||||
|
||||
# Generate authentication token for admin user. Will
|
||||
@@ -432,32 +420,22 @@ function kube-up {
|
||||
printf "\n"
|
||||
echo "Kubernetes cluster created."
|
||||
|
||||
local kube_cert=".kubecfg.crt"
|
||||
local kube_key=".kubecfg.key"
|
||||
local ca_cert=".kubernetes.ca.crt"
|
||||
export KUBE_CERT="/tmp/$RANDOM-kubecfg.crt"
|
||||
export KUBE_KEY="/tmp/$RANDOM-kubecfg.key"
|
||||
export CA_CERT="/tmp/$RANDOM-kubernetes.ca.crt"
|
||||
export CONTEXT="azure_${INSTANCE_PREFIX}"
|
||||
|
||||
# TODO: generate ADMIN (and KUBELET) tokens and put those in the master's
|
||||
# config file. Distribute the same way the htpasswd is done.
|
||||
(umask 077
|
||||
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \
|
||||
sudo cat /srv/kubernetes/kubecfg.crt >"${HOME}/${kube_cert}" 2>/dev/null
|
||||
sudo cat /srv/kubernetes/kubecfg.crt >"${KUBE_CERT}" 2>/dev/null
|
||||
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \
|
||||
sudo cat /srv/kubernetes/kubecfg.key >"${HOME}/${kube_key}" 2>/dev/null
|
||||
sudo cat /srv/kubernetes/kubecfg.key >"${KUBE_KEY}" 2>/dev/null
|
||||
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \
|
||||
sudo cat /srv/kubernetes/ca.crt >"${HOME}/${ca_cert}" 2>/dev/null
|
||||
sudo cat /srv/kubernetes/ca.crt >"${CA_CERT}" 2>/dev/null
|
||||
|
||||
cat << EOF > ~/.kubernetes_auth
|
||||
{
|
||||
"User": "$KUBE_USER",
|
||||
"Password": "$KUBE_PASSWORD",
|
||||
"CAFile": "$HOME/$ca_cert",
|
||||
"CertFile": "$HOME/$kube_cert",
|
||||
"KeyFile": "$HOME/$kube_key"
|
||||
}
|
||||
EOF
|
||||
|
||||
chmod 0600 ~/.kubernetes_auth "${HOME}/${kube_cert}" \
|
||||
"${HOME}/${kube_key}" "${HOME}/${ca_cert}"
|
||||
create-kubeconfig
|
||||
)
|
||||
|
||||
# Wait for salt on the minions
|
||||
@@ -482,7 +460,7 @@ EOF
|
||||
echo
|
||||
echo " https://${KUBE_MASTER_IP}"
|
||||
echo
|
||||
echo "The user name and password to use is located in ~/.kubernetes_auth."
|
||||
echo "The user name and password to use is located in ${KUBECONFIG}."
|
||||
echo
|
||||
}
|
||||
|
||||
|
@@ -22,33 +22,54 @@ set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
|
||||
DEFAULT_KUBECONFIG="${HOME}/.kube/config"
|
||||
|
||||
# Generate kubeconfig data for the created cluster.
|
||||
# Assumed vars:
|
||||
# KUBE_USER
|
||||
# KUBE_PASSWORD
|
||||
# KUBE_MASTER_IP
|
||||
# KUBECONFIG
|
||||
# CONTEXT
|
||||
#
|
||||
# The following can be omitted for --insecure-skip-tls-verify
|
||||
# KUBE_CERT
|
||||
# KUBE_KEY
|
||||
# CA_CERT
|
||||
# CONTEXT
|
||||
function create-kubeconfig() {
|
||||
local kubectl="${KUBE_ROOT}/cluster/kubectl.sh"
|
||||
|
||||
export KUBECONFIG=${KUBECONFIG:-$DEFAULT_KUBECONFIG}
|
||||
# KUBECONFIG determines the file we write to, but it may not exist yet
|
||||
if [[ ! -e "${KUBECONFIG}" ]]; then
|
||||
mkdir -p $(dirname "${KUBECONFIG}")
|
||||
touch "${KUBECONFIG}"
|
||||
fi
|
||||
"${kubectl}" config set-cluster "${CONTEXT}" --server="https://${KUBE_MASTER_IP}" \
|
||||
--certificate-authority="${CA_CERT}" \
|
||||
--embed-certs=true
|
||||
"${kubectl}" config set-credentials "${CONTEXT}" --username="${KUBE_USER}" \
|
||||
--password="${KUBE_PASSWORD}" \
|
||||
--client-certificate="${KUBE_CERT}" \
|
||||
--client-key="${KUBE_KEY}" \
|
||||
--embed-certs=true
|
||||
local cluster_args=(
|
||||
"--server=${KUBE_SERVER:-https://${KUBE_MASTER_IP}}"
|
||||
)
|
||||
if [[ -z "${CA_CERT:-}" ]]; then
|
||||
cluster_args+=("--insecure-skip-tls-verify=true")
|
||||
else
|
||||
cluster_args+=(
|
||||
"--certificate-authority=${CA_CERT}"
|
||||
"--embed-certs=true"
|
||||
)
|
||||
fi
|
||||
local user_args=(
|
||||
"--username=${KUBE_USER}"
|
||||
"--password=${KUBE_PASSWORD}"
|
||||
)
|
||||
if [[ ! -z "${KUBE_CERT:-}" && ! -z "${KUBE_KEY:-}" ]]; then
|
||||
user_args+=(
|
||||
"--client-certificate=${KUBE_CERT}"
|
||||
"--client-key=${KUBE_KEY}"
|
||||
"--embed-certs=true"
|
||||
)
|
||||
fi
|
||||
|
||||
"${kubectl}" config set-cluster "${CONTEXT}" "${cluster_args[@]}"
|
||||
"${kubectl}" config set-credentials "${CONTEXT}" "${user_args[@]}"
|
||||
"${kubectl}" config set-context "${CONTEXT}" --cluster="${CONTEXT}" --user="${CONTEXT}"
|
||||
"${kubectl}" config use-context "${CONTEXT}" --cluster="${CONTEXT}"
|
||||
|
||||
@@ -60,6 +81,7 @@ function create-kubeconfig() {
|
||||
# KUBECONFIG
|
||||
# CONTEXT
|
||||
function clear-kubeconfig() {
|
||||
export KUBECONFIG=${KUBECONFIG:-$DEFAULT_KUBECONFIG}
|
||||
local kubectl="${KUBE_ROOT}/cluster/kubectl.sh"
|
||||
"${kubectl}" config unset "clusters.${CONTEXT}"
|
||||
"${kubectl}" config unset "users.${CONTEXT}"
|
||||
@@ -85,6 +107,7 @@ function clear-kubeconfig() {
|
||||
# KUBE_USER,KUBE_PASSWORD will be empty if no current-context is set, or
|
||||
# the current-context user does not exist or contain basicauth entries.
|
||||
function get-kubeconfig-basicauth() {
|
||||
export KUBECONFIG=${KUBECONFIG:-$DEFAULT_KUBECONFIG}
|
||||
# Templates to safely extract the username,password for the current-context
|
||||
# user. The long chain of 'with' commands avoids indexing nil if any of the
|
||||
# entries ("current-context", "contexts"."current-context", "users", etc)
|
||||
|
@@ -63,7 +63,7 @@ function increment_ipv4 {
|
||||
}
|
||||
|
||||
node_count="${NUM_MINIONS}"
|
||||
next_node="10.244.0.0"
|
||||
next_node="${KUBE_GCE_CLUSTER_CLASS_B:-10.244}.0.0"
|
||||
node_subnet_size=24
|
||||
node_subnet_count=$((2 ** (32-$node_subnet_size)))
|
||||
subnets=()
|
||||
@@ -73,7 +73,7 @@ for ((node_num=0; node_num<node_count; node_num++)); do
|
||||
next_node=$(increment_ipv4 $next_node $node_subnet_count)
|
||||
done
|
||||
|
||||
CLUSTER_IP_RANGE="10.244.0.0/16"
|
||||
CLUSTER_IP_RANGE="${KUBE_GCE_CLUSTER_CLASS_B:-10.244}.0.0/16"
|
||||
MINION_IP_RANGES=($(eval echo "${subnets[@]}"))
|
||||
|
||||
MINION_SCOPES=("storage-ro" "compute-rw" "https://www.googleapis.com/auth/monitoring")
|
||||
|
@@ -33,10 +33,9 @@ INSTANCE_PREFIX="${KUBE_GCE_INSTANCE_PREFIX:-e2e-test-${USER}}"
|
||||
MASTER_NAME="${INSTANCE_PREFIX}-master"
|
||||
MASTER_TAG="${INSTANCE_PREFIX}-master"
|
||||
MINION_TAG="${INSTANCE_PREFIX}-minion"
|
||||
CLUSTER_IP_RANGE="10.245.0.0/16"
|
||||
MINION_IP_RANGES=($(eval echo "10.245.{1..${NUM_MINIONS}}.0/24"))
|
||||
CLUSTER_IP_RANGE="${KUBE_GCE_CLUSTER_CLASS_B:-10.245}.0.0/16"
|
||||
MINION_IP_RANGES=($(eval echo "${KUBE_GCE_CLUSTER_CLASS_B:-10.245}.{1..${NUM_MINIONS}}.0/24"))
|
||||
MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
|
||||
|
||||
MINION_SCOPES=("storage-ro" "compute-rw")
|
||||
# Increase the sleep interval value if concerned about API rate limits. 3, in seconds, is the default.
|
||||
POLL_SLEEP_INTERVAL=3
|
||||
|
@@ -134,7 +134,9 @@ install-salt() {
|
||||
URL_BASE="https://storage.googleapis.com/kubernetes-release/salt"
|
||||
|
||||
for deb in "${DEBS[@]}"; do
|
||||
download-or-bust "${URL_BASE}/${deb}"
|
||||
if [ ! -e "${deb}" ]; then
|
||||
download-or-bust "${URL_BASE}/${deb}"
|
||||
fi
|
||||
done
|
||||
|
||||
# Based on
|
||||
@@ -152,7 +154,7 @@ EOF
|
||||
|
||||
for deb in "${DEBS[@]}"; do
|
||||
echo "== Installing ${deb}, ignore dependency complaints (will fix later) =="
|
||||
dpkg --force-depends -i "${deb}"
|
||||
dpkg --skip-same-version --force-depends -i "${deb}"
|
||||
done
|
||||
|
||||
# This will install any of the unmet dependencies from above.
|
||||
@@ -172,6 +174,7 @@ EOF
|
||||
stop-salt-minion() {
|
||||
# This ensures it on next reboot
|
||||
echo manual > /etc/init/salt-minion.override
|
||||
update-rc.d salt-minion disable
|
||||
|
||||
if service salt-minion status >/dev/null; then
|
||||
echo "salt-minion started in defiance of runlevel policy, aborting startup." >&2
|
||||
@@ -205,18 +208,21 @@ mount-master-pd() {
|
||||
mkdir -p /mnt/master-pd/srv/kubernetes
|
||||
# Contains the cluster's initial config parameters and auth tokens
|
||||
mkdir -p /mnt/master-pd/srv/salt-overlay
|
||||
ln -s /mnt/master-pd/var/etcd /var/etcd
|
||||
ln -s /mnt/master-pd/srv/kubernetes /srv/kubernetes
|
||||
ln -s /mnt/master-pd/srv/salt-overlay /srv/salt-overlay
|
||||
|
||||
ln -s -f /mnt/master-pd/var/etcd /var/etcd
|
||||
ln -s -f /mnt/master-pd/srv/kubernetes /srv/kubernetes
|
||||
ln -s -f /mnt/master-pd/srv/salt-overlay /srv/salt-overlay
|
||||
|
||||
# This is a bit of a hack to get around the fact that salt has to run after the
|
||||
# PD and mounted directory are already set up. We can't give ownership of the
|
||||
# directory to etcd until the etcd user and group exist, but they don't exist
|
||||
# until salt runs if we don't create them here. We could alternatively make the
|
||||
# permissions on the directory more permissive, but this seems less bad.
|
||||
useradd -s /sbin/nologin -d /var/etcd etcd
|
||||
chown etcd /mnt/master-pd/var/etcd
|
||||
chgrp etcd /mnt/master-pd/var/etcd
|
||||
if ! id etcd &>/dev/null; then
|
||||
useradd -s /sbin/nologin -d /var/etcd etcd
|
||||
fi
|
||||
chown -R etcd /mnt/master-pd/var/etcd
|
||||
chgrp -R etcd /mnt/master-pd/var/etcd
|
||||
}
|
||||
|
||||
# Create the overlay files for the salt tree. We create these in a separate
|
||||
@@ -282,6 +288,14 @@ function create-salt-auth() {
|
||||
}
|
||||
|
||||
function download-release() {
|
||||
# TODO(zmerlynn): We should optimize for the reboot case here, but
|
||||
# unlike the .debs, we don't have version information in the
|
||||
# filenames here, nor do the URLs even provide useful information in
|
||||
# the dev environment case (because they're just a project
|
||||
# bucket). We should probably push a hash into the kube-env, and
|
||||
# store it when we download, and then when it's different infer that
|
||||
# a push occurred (otherwise it's a simple reboot).
|
||||
|
||||
echo "Downloading binary release tar ($SERVER_BINARY_TAR_URL)"
|
||||
download-or-bust "$SERVER_BINARY_TAR_URL"
|
||||
|
||||
|
@@ -390,20 +390,23 @@ function create-node-template {
|
||||
|
||||
# Robustly try to add metadata on an instance.
|
||||
# $1: The name of the instace.
|
||||
# $2: The metadata key=value pair to add.
|
||||
# $2...$n: The metadata key=value pairs to add.
|
||||
function add-instance-metadata {
|
||||
local -r instance=$1
|
||||
shift 1
|
||||
local -r kvs=( "$@" )
|
||||
detect-project
|
||||
local attempt=0
|
||||
while true; do
|
||||
if ! gcloud compute instances add-metadata "$1" \
|
||||
if ! gcloud compute instances add-metadata "${instance}" \
|
||||
--project "${PROJECT}" \
|
||||
--zone "${ZONE}" \
|
||||
--metadata "$2"; then
|
||||
--metadata "${kvs[@]}"; then
|
||||
if (( attempt > 5 )); then
|
||||
echo -e "${color_red}Failed to add instance metadata in $1 ${color_norm}"
|
||||
echo -e "${color_red}Failed to add instance metadata in ${instance} ${color_norm}"
|
||||
exit 2
|
||||
fi
|
||||
echo -e "${color_yellow}Attempt $(($attempt+1)) failed to add metadata in $1. Retrying.${color_norm}"
|
||||
echo -e "${color_yellow}Attempt $(($attempt+1)) failed to add metadata in ${instance}. Retrying.${color_norm}"
|
||||
attempt=$(($attempt+1))
|
||||
else
|
||||
break
|
||||
@@ -412,21 +415,25 @@ function add-instance-metadata {
|
||||
}
|
||||
|
||||
# Robustly try to add metadata on an instance, from a file.
|
||||
# $1: The name of the instace.
|
||||
# $2: The metadata key=file pair to add.
|
||||
# $1: The name of the instance.
|
||||
# $2...$n: The metadata key=file pairs to add.
|
||||
function add-instance-metadata-from-file {
|
||||
local -r instance=$1
|
||||
shift 1
|
||||
local -r kvs=( "$@" )
|
||||
detect-project
|
||||
local attempt=0
|
||||
while true; do
|
||||
if ! gcloud compute instances add-metadata "$1" \
|
||||
echo "${kvs[@]}"
|
||||
if ! gcloud compute instances add-metadata "${instance}" \
|
||||
--project "${PROJECT}" \
|
||||
--zone "${ZONE}" \
|
||||
--metadata-from-file "$2"; then
|
||||
--metadata-from-file "${kvs[@]}"; then
|
||||
if (( attempt > 5 )); then
|
||||
echo -e "${color_red}Failed to add instance metadata in $1 ${color_norm}"
|
||||
echo -e "${color_red}Failed to add instance metadata in ${instance} ${color_norm}"
|
||||
exit 2
|
||||
fi
|
||||
echo -e "${color_yellow}Attempt $(($attempt+1)) failed to add metadata in $1. Retrying.${color_norm}"
|
||||
echo -e "${color_yellow}Attempt $(($attempt+1)) failed to add metadata in ${instance}. Retrying.${color_norm}"
|
||||
attempt=$(($attempt+1))
|
||||
else
|
||||
break
|
||||
@@ -584,7 +591,16 @@ function kube-up {
|
||||
# https://github.com/GoogleCloudPlatform/kubernetes/issues/3168
|
||||
KUBELET_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
|
||||
create-master-instance &
|
||||
# Reserve the master's IP so that it can later be transferred to another VM
|
||||
# without disrupting the kubelets. IPs are associated with regions, not zones,
|
||||
# so extract the region name, which is the same as the zone but with the final
|
||||
# dash and characters trailing the dash removed.
|
||||
local REGION=${ZONE%-*}
|
||||
MASTER_RESERVED_IP=$(gcloud compute addresses create "${MASTER_NAME}-ip" \
|
||||
--project "${PROJECT}" \
|
||||
--region "${REGION}" -q --format yaml | awk '/^address:/ { print $2 }')
|
||||
|
||||
create-master-instance $MASTER_RESERVED_IP &
|
||||
|
||||
# Create a single firewall rule for all minions.
|
||||
create-firewall-rule "${MINION_TAG}-all" "${CLUSTER_IP_RANGE}" "${MINION_TAG}" &
|
||||
@@ -647,16 +663,6 @@ function kube-up {
|
||||
|
||||
detect-master
|
||||
|
||||
# Reserve the master's IP so that it can later be transferred to another VM
|
||||
# without disrupting the kubelets. IPs are associated with regions, not zones,
|
||||
# so extract the region name, which is the same as the zone but with the final
|
||||
# dash and characters trailing the dash removed.
|
||||
local REGION=${ZONE%-*}
|
||||
gcloud compute addresses create "${MASTER_NAME}-ip" \
|
||||
--project "${PROJECT}" \
|
||||
--addresses "${KUBE_MASTER_IP}" \
|
||||
--region "${REGION}"
|
||||
|
||||
echo "Waiting for cluster initialization."
|
||||
echo
|
||||
echo " This will continually check to see if the API for kubernetes is reachable."
|
||||
@@ -673,10 +679,9 @@ function kube-up {
|
||||
echo "Kubernetes cluster created."
|
||||
|
||||
# TODO use token instead of basic auth
|
||||
export KUBECONFIG="${HOME}/.kube/.kubeconfig"
|
||||
export KUBE_CERT="/tmp/kubecfg.crt"
|
||||
export KUBE_KEY="/tmp/kubecfg.key"
|
||||
export CA_CERT="/tmp/kubernetes.ca.crt"
|
||||
export KUBE_CERT="/tmp/$RANDOM-kubecfg.crt"
|
||||
export KUBE_KEY="/tmp/$RANDOM-kubecfg.key"
|
||||
export CA_CERT="/tmp/$RANDOM-kubernetes.ca.crt"
|
||||
export CONTEXT="${PROJECT}_${INSTANCE_PREFIX}"
|
||||
|
||||
# TODO: generate ADMIN (and KUBELET) tokens and put those in the master's
|
||||
@@ -832,7 +837,6 @@ function kube-down {
|
||||
--quiet \
|
||||
"${MASTER_NAME}-ip" || true
|
||||
|
||||
export KUBECONFIG="${HOME}/.kube/.kubeconfig"
|
||||
export CONTEXT="${PROJECT}_${INSTANCE_PREFIX}"
|
||||
clear-kubeconfig
|
||||
}
|
||||
@@ -853,8 +857,10 @@ function kube-push {
|
||||
find-release-tars
|
||||
upload-server-tars
|
||||
|
||||
echo "Updating master metadata ..."
|
||||
write-master-env
|
||||
add-instance-metadata-from-file "${KUBE_MASTER}" "kube-env=${KUBE_TEMP}/master-kube-env.yaml"
|
||||
add-instance-metadata-from-file "${KUBE_MASTER}" "kube-env=${KUBE_TEMP}/master-kube-env.yaml" "startup-script=${KUBE_ROOT}/cluster/gce/configure-vm.sh"
|
||||
|
||||
echo "Pushing to master (log at ${OUTPUT}/kube-push-${KUBE_MASTER}.log) ..."
|
||||
cat ${KUBE_ROOT}/cluster/gce/configure-vm.sh | gcloud compute ssh --ssh-flag="-o LogLevel=quiet" --project "${PROJECT}" --zone "${ZONE}" "${KUBE_MASTER}" --command "sudo bash -s -- --push" &> ${OUTPUT}/kube-push-"${KUBE_MASTER}".log
|
||||
|
||||
@@ -899,7 +905,7 @@ function kube-update-nodes() {
|
||||
echo "Updating node metadata... "
|
||||
write-node-env
|
||||
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
|
||||
add-instance-metadata-from-file "${MINION_NAMES[$i]}" "kube-env=${KUBE_TEMP}/node-kube-env.yaml" &
|
||||
add-instance-metadata-from-file "${MINION_NAMES[$i]}" "kube-env=${KUBE_TEMP}/node-kube-env.yaml" "startup-script=${KUBE_ROOT}/cluster/gce/configure-vm.sh" &
|
||||
done
|
||||
wait-for-jobs
|
||||
echo "Done"
|
||||
@@ -975,7 +981,7 @@ function restart-kube-proxy {
|
||||
|
||||
# Restart the kube-apiserver on a node ($1)
|
||||
function restart-apiserver {
|
||||
ssh-to-node "$1" "sudo /etc/init.d/kube-apiserver restart"
|
||||
ssh-to-node "$1" "sudo docker kill `sudo docker ps | grep /kube-apiserver | awk '{print $1}'`"
|
||||
}
|
||||
|
||||
# Perform preparations required to run e2e tests
|
||||
|
@@ -251,7 +251,7 @@ function restart-kube-proxy() {
|
||||
# Restart the kube-proxy on master ($1)
|
||||
function restart-apiserver() {
|
||||
echo "... in restart-kube-apiserver()" >&2
|
||||
ssh-to-node "$1" "sudo /etc/init.d/kube-apiserver restart"
|
||||
ssh-to-node "$1" "sudo docker kill `sudo docker ps | grep /kube-apiserver | awk '{print $1}'`"
|
||||
}
|
||||
|
||||
# Execute after running tests to perform any required clean-up. This is called
|
||||
|
@@ -1,6 +1,6 @@
|
||||
# build the hyperkube image.
|
||||
|
||||
VERSION=v0.14.2
|
||||
VERSION=v0.15.0
|
||||
|
||||
all:
|
||||
curl -O http://storage.googleapis.com/kubernetes-release/release/${VERSION}/bin/linux/amd64/hyperkube
|
||||
|
@@ -100,30 +100,12 @@ elif [[ ! -x "${KUBECTL_PATH}" ]]; then
|
||||
fi
|
||||
kubectl="${KUBECTL_PATH:-${kubectl}}"
|
||||
|
||||
# While GKE requires the kubectl binary, it's actually called through
|
||||
# gcloud. But we need to adjust the PATH so gcloud gets the right one.
|
||||
# GKE stores it's kubeconfig in a separate location.
|
||||
if [[ "$KUBERNETES_PROVIDER" == "gke" ]]; then
|
||||
detect-project &> /dev/null
|
||||
export PATH=$(get_absolute_dirname $kubectl):$PATH
|
||||
kubectl="${GCLOUD}"
|
||||
# GKE runs kubectl through gcloud.
|
||||
config=(
|
||||
"alpha"
|
||||
"container"
|
||||
"kubectl"
|
||||
"--project=${PROJECT}"
|
||||
"--zone=${ZONE}"
|
||||
"--cluster=${CLUSTER_NAME}"
|
||||
)
|
||||
elif [[ "$KUBERNETES_PROVIDER" == "vagrant" ]]; then
|
||||
# When we are using vagrant it has hard coded kubeconfig, and do not clobber public endpoints
|
||||
config=(
|
||||
"--kubeconfig=$HOME/.kubernetes_vagrant_kubeconfig"
|
||||
)
|
||||
elif [[ "$KUBERNETES_PROVIDER" == "libvirt-coreos" ]]; then
|
||||
detect-master > /dev/null
|
||||
config=(
|
||||
"--server=http://${KUBE_MASTER_IP}:8080"
|
||||
"--kubeconfig=${HOME}/.config/gcloud/kubernetes/kubeconfig"
|
||||
"--context=gke_${PROJECT}_${ZONE}_${CLUSTER_NAME}"
|
||||
)
|
||||
fi
|
||||
|
||||
|
@@ -18,7 +18,8 @@
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
readonly ROOT=$(dirname "${BASH_SOURCE}")
|
||||
source $ROOT/${KUBE_CONFIG_FILE:-"config-default.sh"}
|
||||
source "$ROOT/${KUBE_CONFIG_FILE:-"config-default.sh"}"
|
||||
source "$KUBE_ROOT/cluster/common.sh"
|
||||
|
||||
export LIBVIRT_DEFAULT_URI=qemu:///system
|
||||
|
||||
@@ -199,6 +200,7 @@ function wait-cluster-readiness {
|
||||
function kube-up {
|
||||
detect-master
|
||||
detect-minions
|
||||
get-password
|
||||
initialize-pool keep_base_image
|
||||
initialize-network
|
||||
|
||||
@@ -235,12 +237,9 @@ function kube-up {
|
||||
rm $domain_xml
|
||||
done
|
||||
|
||||
export KUBECONFIG="${HOME}/.kube/.kubeconfig"
|
||||
local kubectl="${KUBE_ROOT}/cluster/kubectl.sh"
|
||||
|
||||
"${kubectl}" config set-cluster libvirt-coreos --server=http://${KUBE_MASTER_IP-}:8080
|
||||
"${kubectl}" config set-context libvirt-coreos --cluster=libvirt-coreos
|
||||
"${kubectl}" config use-context libvirt-coreos --cluster=libvirt-coreos
|
||||
export KUBE_SERVER="http://192.168.10.1:8080"
|
||||
export CONTEXT="libvirt-coreos"
|
||||
create-kubeconfig
|
||||
|
||||
wait-cluster-readiness
|
||||
|
||||
@@ -331,8 +330,8 @@ function test-teardown {
|
||||
|
||||
# Set the {KUBE_USER} and {KUBE_PASSWORD} environment values required to interact with provider
|
||||
function get-password {
|
||||
export KUBE_USER=core
|
||||
echo "TODO get-password"
|
||||
export KUBE_USER=''
|
||||
export KUBE_PASSWORD=''
|
||||
}
|
||||
|
||||
# SSH to a node by name or IP ($1) and run a command ($2).
|
||||
|
@@ -20,6 +20,7 @@
|
||||
# config-default.sh.
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
source $(dirname ${BASH_SOURCE})/${KUBE_CONFIG_FILE-"config-default.sh"}
|
||||
source "${KUBE_ROOT}/cluster/common.sh"
|
||||
|
||||
verify-prereqs() {
|
||||
# Make sure that prerequisites are installed.
|
||||
@@ -50,29 +51,17 @@ verify-prereqs() {
|
||||
}
|
||||
|
||||
# Ensure that we have a password created for validating to the master. Will
|
||||
# read from $HOME/.kubernetres_auth if available.
|
||||
# read from kubeconfig current-context if available.
|
||||
#
|
||||
# Vars set:
|
||||
# KUBE_USER
|
||||
# KUBE_PASSWORD
|
||||
get-password() {
|
||||
local file="$HOME/.kubernetes_auth"
|
||||
if [[ -r "$file" ]]; then
|
||||
KUBE_USER=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["User"]')
|
||||
KUBE_PASSWORD=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["Password"]')
|
||||
return
|
||||
get-kubeconfig-basicauth
|
||||
if [[ -z "${KUBE_USER}" || -z "${KUBE_PASSWORD}" ]]; then
|
||||
KUBE_USER=admin
|
||||
KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))')
|
||||
fi
|
||||
KUBE_USER=admin
|
||||
KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))')
|
||||
|
||||
# Store password for reuse.
|
||||
cat << EOF > "$file"
|
||||
{
|
||||
"User": "$KUBE_USER",
|
||||
"Password": "$KUBE_PASSWORD"
|
||||
}
|
||||
EOF
|
||||
chmod 0600 "$file"
|
||||
}
|
||||
|
||||
rax-ssh-key() {
|
||||
@@ -329,6 +318,13 @@ kube-up() {
|
||||
|
||||
echo "Kubernetes cluster created."
|
||||
|
||||
export KUBE_CERT=""
|
||||
export KUBE_KEY=""
|
||||
export CA_CERT=""
|
||||
export CONTEXT="rackspace_${INSTANCE_PREFIX}"
|
||||
|
||||
create-kubeconfig
|
||||
|
||||
# Don't bail on errors, we want to be able to print some info.
|
||||
set +e
|
||||
|
||||
|
@@ -36,7 +36,7 @@ if [ "$cert_ip" == "_use_aws_external_ip_" ]; then
|
||||
fi
|
||||
|
||||
if [ "$cert_ip" == "_use_azure_dns_name_" ]; then
|
||||
cert_ip=$(hostname -f | awk -F. '{ print $2 }').cloudapp.net
|
||||
cert_ip=$(uname -n | awk -F. '{ print $2 }').cloudapp.net
|
||||
use_cn=true
|
||||
fi
|
||||
|
||||
|
14
cluster/saltbase/salt/kube-addons/default
Normal file
14
cluster/saltbase/salt/kube-addons/default
Normal file
@@ -0,0 +1,14 @@
|
||||
#TODO(erictune): once we make DNS a hard requirement for clusters, then this can be removed,
|
||||
# and APISERVER_URL="https://kubernetes:443"
|
||||
{% if grains.api_servers is defined -%}
|
||||
{% set api_server = "https://" + grains.api_servers + ":6443" -%}
|
||||
{% elif grains.apiservers is defined -%} # TODO(remove after 0.16.0): Deprecated form
|
||||
{% set api_server = "https://" + grains.apiservers + ":6443" -%}
|
||||
{% elif grains['roles'][0] == 'kubernetes-master' -%}
|
||||
{% set master_ipv4 = salt['grains.get']('fqdn_ip4')[0] -%}
|
||||
{% set api_server = "https://" + master_ipv4 + ":6443" -%}
|
||||
{% else -%}
|
||||
{% set ips = salt['mine.get']('roles:kubernetes-master', 'network.ip_addrs', 'grain').values() -%}
|
||||
{% set api_server = "https://" + ips[0][0] + ":6443" -%}
|
||||
{% endif -%}
|
||||
export APISERVER_URL={{ api_server }}
|
@@ -48,6 +48,20 @@
|
||||
- makedirs: True
|
||||
{% endif %}
|
||||
|
||||
{% if grains['os_family'] == 'RedHat' %}
|
||||
{% set environment_file = '/etc/sysconfig/kube-addons' %}
|
||||
{% else %}
|
||||
{% set environment_file = '/etc/default/kube-addons' %}
|
||||
{% endif %}
|
||||
|
||||
{{ environment_file }}:
|
||||
file.managed:
|
||||
- source: salt://kube-addons/default
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
|
||||
/etc/kubernetes/kube-addons.sh:
|
||||
file.managed:
|
||||
- source: salt://kube-addons/kube-addons.sh
|
||||
|
@@ -21,6 +21,9 @@ PIDFILE=/var/run/$NAME.pid
|
||||
SCRIPTNAME=/etc/init.d/$NAME
|
||||
KUBE_ADDONS_SH=/etc/kubernetes/kube-addons.sh
|
||||
|
||||
# Read configuration variable file if it is present
|
||||
[ -r /etc/default/$NAME ] && . /etc/default/$NAME
|
||||
|
||||
# Define LSB log_* functions.
|
||||
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
|
||||
# and status_of_proc is working.
|
||||
|
@@ -3,6 +3,7 @@ Description=Kubernetes Addon Object Manager
|
||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||
|
||||
[Service]
|
||||
EnvironmentFile=/etc/sysconfig/kube-addons
|
||||
ExecStart=/etc/kubernetes/kube-addons.sh
|
||||
|
||||
[Install]
|
||||
|
@@ -19,23 +19,47 @@
|
||||
# managed result is of that. Start everything below that directory.
|
||||
KUBECTL=/usr/local/bin/kubectl
|
||||
|
||||
function create-kubernetesauth-secret() {
|
||||
if [ -z "$APISERVER_URL" ] ; then
|
||||
echo "Must set APISERVER_URL"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
function create-kubeconfig-secret() {
|
||||
local -r token=$1
|
||||
local -r username=$2
|
||||
local -r safe_username=$(tr -s ':_' '--' <<< "${username}")
|
||||
|
||||
# Make secret with a kubernetes_auth file with a token.
|
||||
# Make a kubeconfig file with the token.
|
||||
# TODO(etune): put apiserver certs into secret too, and reference from authfile,
|
||||
# so that "Insecure" is not needed.
|
||||
kafile=$(echo "{\"BearerToken\": \"${token}\", \"Insecure\": true }" | base64 -w0)
|
||||
read -r -d '' secretjson <<EOF
|
||||
read -r -d '' kubeconfig <<EOF
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
users:
|
||||
- name: ${username}
|
||||
user:
|
||||
token: ${token}
|
||||
clusters:
|
||||
- name: local
|
||||
cluster:
|
||||
server: ${APISERVER_URL}
|
||||
insecure-skip-tls-verify: true
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
user: ${username}
|
||||
name: service-account-context
|
||||
current-context: service-account-context
|
||||
EOF
|
||||
local -r kubeconfig_base64=$(echo "${kubeconfig}" | base64 -w0)
|
||||
read -r -d '' secretyaml <<EOF
|
||||
apiVersion: v1beta1
|
||||
kind: Secret
|
||||
id: token-${safe_username}
|
||||
data:
|
||||
kubernetes-auth: ${kafile}
|
||||
kubeconfig: ${kubeconfig_base64}
|
||||
EOF
|
||||
create-resource-from-string "${secretjson}" 100 10 "Secret-for-token-for-user-${username}" &
|
||||
create-resource-from-string "${secretyaml}" 100 10 "Secret-for-token-for-user-${username}" &
|
||||
# TODO: label the secrets with special label so kubectl does not show these?
|
||||
}
|
||||
|
||||
@@ -56,7 +80,7 @@ function start_addon() {
|
||||
# $3 name of this object to use when logging about it.
|
||||
function create-resource-from-string() {
|
||||
local -r config_string=$1;
|
||||
local -r tries=$2;
|
||||
local tries=$2;
|
||||
local -r delay=$3;
|
||||
local -r config_name=$1;
|
||||
while [ ${tries} -gt 0 ]; do
|
||||
@@ -86,7 +110,7 @@ while read line; do
|
||||
IFS=',' read -a parts <<< "${line}"
|
||||
token=${parts[0]}
|
||||
username=${parts[1]}
|
||||
create-kubernetesauth-secret "${token}" "${username}"
|
||||
create-kubeconfig-secret "${token}" "${username}"
|
||||
done < /srv/kubernetes/known_tokens.csv
|
||||
|
||||
for obj in $(find /etc/kubernetes/addons -name \*.yaml); do
|
||||
|
@@ -1,58 +0,0 @@
|
||||
{% set daemon_args = "$DAEMON_ARGS" -%}
|
||||
{% if grains['os_family'] == 'RedHat' -%}
|
||||
{% set daemon_args = "" -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set master="--master=127.0.0.1:8080" -%}
|
||||
|
||||
{% set machines = ""-%}
|
||||
{% set cluster_name = "" -%}
|
||||
{% set minion_regexp = "--minion_regexp=.*" -%}
|
||||
{% set sync_nodes = "--sync_nodes=true" -%}
|
||||
|
||||
{% if pillar['node_instance_prefix'] is defined -%}
|
||||
{% set minion_regexp = "--minion_regexp='" + pillar['node_instance_prefix'] + ".*'" -%}
|
||||
{% endif -%}
|
||||
{% if pillar['instance_prefix'] is defined -%}
|
||||
{% set cluster_name = "--cluster_name=" + pillar['instance_prefix'] -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set cloud_provider = "" -%}
|
||||
{% set cloud_config = "" -%}
|
||||
|
||||
{% if grains.cloud is defined -%}
|
||||
{% set cloud_provider = "--cloud_provider=" + grains.cloud -%}
|
||||
|
||||
{% if grains.cloud == 'gce' -%}
|
||||
{% if grains.cloud_config is defined -%}
|
||||
{% set cloud_config = "--cloud_config=" + grains.cloud_config -%}
|
||||
{% endif -%}
|
||||
|
||||
{% elif grains.cloud == 'aws' -%}
|
||||
{% if grains.cloud_config is defined -%}
|
||||
{% set cloud_config = "--cloud_config=" + grains.cloud_config -%}
|
||||
{% endif -%}
|
||||
{% set machines = "--machines=" + ','.join(salt['mine.get']('roles:kubernetes-pool', 'network.ip_addrs', expr_form='grain').keys()) -%}
|
||||
|
||||
{% elif grains.cloud == 'azure' -%}
|
||||
MACHINES="{{ salt['mine.get']('roles:kubernetes-pool', 'grains.items', expr_form='grain').values()|join(',', attribute='hostnamef') }}"
|
||||
{% set machines = "--machines=$MACHINES" -%}
|
||||
|
||||
{% elif grains.cloud == 'vsphere' -%}
|
||||
# Collect IPs of minions as machines list.
|
||||
#
|
||||
# Use a bash array to build the value we need. Jinja 2.7 does support a 'map'
|
||||
# filter that would simplify this. However, some installations (specifically
|
||||
# Debian Wheezy) only install Jinja 2.6.
|
||||
MACHINE_IPS=()
|
||||
{% for addrs in salt['mine.get']('roles:kubernetes-pool', 'network.ip_addrs', expr_form='grain').values() -%}
|
||||
MACHINE_IPS+=( {{ addrs[0] }} )
|
||||
{% endfor -%}
|
||||
{% set machines = "--machines=$(echo ${MACHINE_IPS[@]} | xargs -n1 echo | paste -sd,)" -%}
|
||||
{% set minion_regexp = "" -%}
|
||||
|
||||
{% endif -%} # grains.cloud switch
|
||||
|
||||
{% endif -%} # grains.cloud is defined
|
||||
|
||||
DAEMON_ARGS="{{daemon_args}} {{master}} {{machines}} {{cluster_name}} {{ minion_regexp }} {{ cloud_provider }} {{ sync_nodes }} {{ cloud_config }} {{pillar['log_level']}}"
|
@@ -1,60 +1,15 @@
|
||||
{% if grains['os_family'] == 'RedHat' %}
|
||||
{% set environment_file = '/etc/sysconfig/kube-controller-manager' %}
|
||||
{% else %}
|
||||
{% set environment_file = '/etc/default/kube-controller-manager' %}
|
||||
{% endif %}
|
||||
|
||||
{{ environment_file }}:
|
||||
/etc/kubernetes/manifests/kube-controller-manager.manifest:
|
||||
file.managed:
|
||||
- source: salt://kube-controller-manager/default
|
||||
- source: salt://kube-controller-manager/kube-controller-manager.manifest
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
- dir_mode: 755
|
||||
|
||||
/usr/local/bin/kube-controller-manager:
|
||||
file.managed:
|
||||
- source: salt://kube-bins/kube-controller-manager
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
|
||||
{% if grains['os_family'] == 'RedHat' %}
|
||||
|
||||
/usr/lib/systemd/system/kube-controller-manager.service:
|
||||
file.managed:
|
||||
- source: salt://kube-controller-manager/kube-controller-manager.service
|
||||
- user: root
|
||||
- group: root
|
||||
|
||||
{% else %}
|
||||
|
||||
/etc/init.d/kube-controller-manager:
|
||||
file.managed:
|
||||
- source: salt://kube-controller-manager/initd
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
|
||||
{% endif %}
|
||||
|
||||
kube-controller-manager:
|
||||
group.present:
|
||||
- system: True
|
||||
user.present:
|
||||
- system: True
|
||||
- gid_from_name: True
|
||||
- shell: /sbin/nologin
|
||||
- home: /var/kube-controller-manager
|
||||
- require:
|
||||
- group: kube-controller-manager
|
||||
service.running:
|
||||
- enable: True
|
||||
- watch:
|
||||
- file: /usr/local/bin/kube-controller-manager
|
||||
- file: {{ environment_file }}
|
||||
{% if grains['os_family'] != 'RedHat' %}
|
||||
- file: /etc/init.d/kube-controller-manager
|
||||
{% endif %}
|
||||
|
||||
stop-legacy-kube_controller_manager:
|
||||
service.dead:
|
||||
- name: kube-controller-manager
|
||||
- enable: None
|
||||
|
||||
|
@@ -1,120 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
### BEGIN INIT INFO
|
||||
# Provides: kube-controller-manager
|
||||
# Required-Start: $local_fs $network $syslog
|
||||
# Required-Stop:
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: The Kubernetes controller manager
|
||||
# Description:
|
||||
# The Kubernetes controller manager is responsible for monitoring replication
|
||||
# controllers, and creating corresponding pods to achieve the desired state.
|
||||
### END INIT INFO
|
||||
|
||||
|
||||
# PATH should only include /usr/* if it runs after the mountnfs.sh script
|
||||
PATH=/sbin:/usr/sbin:/bin:/usr/bin
|
||||
DESC="The Kubernetes container manager"
|
||||
NAME=kube-controller-manager
|
||||
DAEMON=/usr/local/bin/kube-controller-manager
|
||||
DAEMON_ARGS=""
|
||||
DAEMON_LOG_FILE=/var/log/$NAME.log
|
||||
PIDFILE=/var/run/$NAME.pid
|
||||
SCRIPTNAME=/etc/init.d/$NAME
|
||||
DAEMON_USER=kube-controller-manager
|
||||
|
||||
# Exit if the package is not installed
|
||||
[ -x "$DAEMON" ] || exit 0
|
||||
|
||||
# Read configuration variable file if it is present
|
||||
[ -r /etc/default/$NAME ] && . /etc/default/$NAME
|
||||
|
||||
# Define LSB log_* functions.
|
||||
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
|
||||
# and status_of_proc is working.
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
#
|
||||
# Function that starts the daemon/service
|
||||
#
|
||||
do_start()
|
||||
{
|
||||
# Return
|
||||
# 0 if daemon has been started
|
||||
# 1 if daemon was already running
|
||||
# 2 if daemon could not be started
|
||||
start-stop-daemon --start --quiet --background --no-close \
|
||||
--make-pidfile --pidfile $PIDFILE \
|
||||
--exec $DAEMON -c $DAEMON_USER --test > /dev/null \
|
||||
|| return 1
|
||||
start-stop-daemon --start --quiet --background --no-close \
|
||||
--make-pidfile --pidfile $PIDFILE \
|
||||
--exec $DAEMON -c $DAEMON_USER -- \
|
||||
$DAEMON_ARGS >> $DAEMON_LOG_FILE 2>&1 \
|
||||
|| return 2
|
||||
}
|
||||
|
||||
#
|
||||
# Function that stops the daemon/service
|
||||
#
|
||||
do_stop()
|
||||
{
|
||||
# Return
|
||||
# 0 if daemon has been stopped
|
||||
# 1 if daemon was already stopped
|
||||
# 2 if daemon could not be stopped
|
||||
# other if a failure occurred
|
||||
start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --exec $DAEMON
|
||||
RETVAL="$?"
|
||||
[ "$RETVAL" = 2 ] && return 2
|
||||
# Many daemons don't delete their pidfiles when they exit.
|
||||
rm -f $PIDFILE
|
||||
return "$RETVAL"
|
||||
}
|
||||
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
log_daemon_msg "Starting $DESC" "$NAME"
|
||||
do_start
|
||||
case "$?" in
|
||||
0|1) log_end_msg 0 || exit 0 ;;
|
||||
2) log_end_msg 1 || exit 1 ;;
|
||||
esac
|
||||
;;
|
||||
stop)
|
||||
log_daemon_msg "Stopping $DESC" "$NAME"
|
||||
do_stop
|
||||
case "$?" in
|
||||
0|1) log_end_msg 0 ;;
|
||||
2) exit 1 ;;
|
||||
esac
|
||||
;;
|
||||
status)
|
||||
status_of_proc -p $PIDFILE "$DAEMON" "$NAME" && exit 0 || exit $?
|
||||
;;
|
||||
|
||||
restart|force-reload)
|
||||
log_daemon_msg "Restarting $DESC" "$NAME"
|
||||
do_stop
|
||||
case "$?" in
|
||||
0|1)
|
||||
do_start
|
||||
case "$?" in
|
||||
0) log_end_msg 0 ;;
|
||||
1) log_end_msg 1 ;; # Old process is still running
|
||||
*) log_end_msg 1 ;; # Failed to start
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
# Failed to stop
|
||||
log_end_msg 1
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
|
||||
exit 3
|
||||
;;
|
||||
esac
|
@@ -0,0 +1,140 @@
|
||||
{% set machines = ""-%}
|
||||
{% set cluster_name = "" -%}
|
||||
{% set minion_regexp = "--minion_regexp=.*" -%}
|
||||
{% set sync_nodes = "--sync_nodes=true" -%}
|
||||
|
||||
{% if pillar['node_instance_prefix'] is defined -%}
|
||||
{% set minion_regexp = "--minion_regexp='" + pillar['node_instance_prefix'] + ".*'" -%}
|
||||
{% endif -%}
|
||||
{% if pillar['instance_prefix'] is defined -%}
|
||||
{% set cluster_name = "--cluster_name=" + pillar['instance_prefix'] -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set cloud_provider = "" -%}
|
||||
{% set cloud_config = "" -%}
|
||||
|
||||
{% if grains.cloud is defined -%}
|
||||
{% set cloud_provider = "--cloud_provider=" + grains.cloud -%}
|
||||
|
||||
{% if grains.cloud == 'gce' -%}
|
||||
{% if grains.cloud_config is defined -%}
|
||||
{% set cloud_config = "--cloud_config=" + grains.cloud_config -%}
|
||||
{% endif -%}
|
||||
|
||||
{% elif grains.cloud == 'aws' -%}
|
||||
{% if grains.cloud_config is defined -%}
|
||||
{% set cloud_config = "--cloud_config=" + grains.cloud_config -%}
|
||||
{% endif -%}
|
||||
{% set machines = "--machines=" + ','.join(salt['mine.get']('roles:kubernetes-pool', 'network.ip_addrs', expr_form='grain').keys()) -%}
|
||||
|
||||
{% elif grains.cloud == 'azure' -%}
|
||||
{% set machines = "--machines=" + salt['mine.get']('roles:kubernetes-pool', 'grains.items', expr_form='grain').values()|join(',', attribute='hostnamef') -%}
|
||||
|
||||
{% elif grains.cloud == 'vsphere' -%}
|
||||
# Collect IPs of minions as machines list.
|
||||
{% set machines= "" -%}
|
||||
{% for addrs in salt['mine.get']('roles:kubernetes-pool', 'network.ip_addrs', expr_form='grain').values() -%}
|
||||
{% if loop.first -%}
|
||||
machines="--machines=";
|
||||
{% endif -%}
|
||||
{% set machines = machines + addrs[0] %}
|
||||
{% if not loop.last -%}
|
||||
{% set machines = machines + "," %}
|
||||
{% endif -%}
|
||||
{% endfor -%}
|
||||
{% set minion_regexp = "" -%}
|
||||
|
||||
{% endif -%}
|
||||
{% endif -%}
|
||||
|
||||
{
|
||||
"apiVersion": "v1beta3",
|
||||
"kind": "Pod",
|
||||
"metadata": {"name":"kube-controller-manager"},
|
||||
"spec":{
|
||||
"hostNetwork": true,
|
||||
"containers":[
|
||||
{
|
||||
"name": "kube-controller-manager",
|
||||
"image": "gcr.io/google_containers/kube-controller-manager:{{pillar['kube-controller-manager_docker_tag']}}",
|
||||
"command": [
|
||||
"/kube-controller-manager",
|
||||
"--master=127.0.0.1:8080",
|
||||
"{{machines}}",
|
||||
"{{cluster_name}}",
|
||||
"{{minion_regexp}}",
|
||||
"{{cloud_provider}}",
|
||||
"{{sync_nodes}}",
|
||||
"{{cloud_config}}",
|
||||
"{{pillar['log_level']}}"
|
||||
],
|
||||
"volumeMounts": [
|
||||
{ "name": "srvkube",
|
||||
"mountPath": "/srv/kubernetes",
|
||||
"readOnly": true},
|
||||
{ "name": "etcssl",
|
||||
"mountPath": "/etc/ssl",
|
||||
"readOnly": true},
|
||||
{ "name": "usrsharessl",
|
||||
"mountPath": "/usr/share/ssl",
|
||||
"readOnly": true},
|
||||
{ "name": "varssl",
|
||||
"mountPath": "/var/ssl",
|
||||
"readOnly": true},
|
||||
{ "name": "usrssl",
|
||||
"mountPath": "/usr/ssl",
|
||||
"readOnly": true},
|
||||
{ "name": "usrlibssl",
|
||||
"mountPath": "/usr/lib/ssl",
|
||||
"readOnly": true},
|
||||
{ "name": "usrlocalopenssl",
|
||||
"mountPath": "/usr/local/openssl",
|
||||
"readOnly": true},
|
||||
{ "name": "etcopenssl",
|
||||
"mountPath": "/etc/openssl",
|
||||
"readOnly": true},
|
||||
{ "name": "etcpkitls",
|
||||
"mountPath": "/etc/pki/tls",
|
||||
"readOnly": true}
|
||||
]
|
||||
}
|
||||
],
|
||||
"volumes":[
|
||||
{ "name": "srvkube",
|
||||
"hostPath": {
|
||||
"path": "/srv/kubernetes"}
|
||||
},
|
||||
{ "name": "etcssl",
|
||||
"hostPath": {
|
||||
"path": "/etc/ssl"}
|
||||
},
|
||||
{ "name": "usrsharessl",
|
||||
"hostPath": {
|
||||
"path": "/usr/share/ssl"}
|
||||
},
|
||||
{ "name": "varssl",
|
||||
"hostPath": {
|
||||
"path": "/var/ssl"}
|
||||
},
|
||||
{ "name": "usrssl",
|
||||
"hostPath": {
|
||||
"path": "/usr/ssl"}
|
||||
},
|
||||
{ "name": "usrlibssl",
|
||||
"hostPath": {
|
||||
"path": "/usr/lib/ssl"}
|
||||
},
|
||||
{ "name": "usrlocalopenssl",
|
||||
"hostPath": {
|
||||
"path": "/usr/local/openssl"}
|
||||
},
|
||||
{ "name": "etcopenssl",
|
||||
"hostPath": {
|
||||
"path": "/etc/openssl"}
|
||||
},
|
||||
{ "name": "etcpkitls",
|
||||
"hostPath": {
|
||||
"path": "/etc/pki/tls"}
|
||||
}
|
||||
]
|
||||
}}
|
@@ -1,11 +0,0 @@
|
||||
[Unit]
|
||||
Description=Kubernetes Controller Manager
|
||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||
|
||||
[Service]
|
||||
EnvironmentFile=-/etc/sysconfig/kube-controller-manager
|
||||
ExecStart=/usr/local/bin/kube-controller-manager "$DAEMON_ARGS"
|
||||
Restart=on-failure
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
@@ -17,8 +17,8 @@
|
||||
|
||||
{% set config = "--config=/etc/kubernetes/manifests" -%}
|
||||
{% set hostname_override = "" -%}
|
||||
{% if grains.minion_ip is defined -%}
|
||||
{% set hostname_override = " --hostname_override=" + grains.minion_ip -%}
|
||||
{% if grains.hostname_override is defined -%}
|
||||
{% set hostname_override = " --hostname_override=" + grains.hostname_override -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set cluster_dns = "" %}
|
||||
|
@@ -1,10 +0,0 @@
|
||||
check process kube-apiserver with pidfile /var/run/kube-apiserver.pid
|
||||
group kube-apiserver
|
||||
start program = "/etc/init.d/kube-apiserver start"
|
||||
stop program = "/etc/init.d/kube-apiserver stop"
|
||||
if failed
|
||||
host 127.0.0.1
|
||||
port 8080
|
||||
protocol http
|
||||
request "/index.html"
|
||||
then restart
|
@@ -22,6 +22,7 @@ export NUM_MINIONS
|
||||
|
||||
# The IP of the master
|
||||
export MASTER_IP="10.245.1.2"
|
||||
export KUBE_MASTER_IP="10.245.1.2"
|
||||
|
||||
export INSTANCE_PREFIX="kubernetes"
|
||||
export MASTER_NAME="${INSTANCE_PREFIX}-master"
|
||||
|
@@ -72,7 +72,7 @@ grains:
|
||||
roles:
|
||||
- kubernetes-pool
|
||||
cbr-cidr: '$(echo "$CONTAINER_SUBNET" | sed -e "s/'/''/g")'
|
||||
minion_ip: '$(echo "$MINION_IP" | sed -e "s/'/''/g")'
|
||||
hostname_override: '$(echo "$MINION_IP" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
|
||||
# we will run provision to update code each time we test, so we do not want to do salt install each time
|
||||
|
@@ -18,6 +18,7 @@
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
source "${KUBE_ROOT}/cluster/vagrant/${KUBE_CONFIG_FILE-"config-default.sh"}"
|
||||
source "${KUBE_ROOT}/cluster/common.sh"
|
||||
|
||||
function detect-master () {
|
||||
KUBE_MASTER_IP=$MASTER_IP
|
||||
@@ -33,13 +34,62 @@ function detect-minions {
|
||||
# Verify prereqs on host machine Also sets exports USING_KUBE_SCRIPTS=true so
|
||||
# that our Vagrantfile doesn't error out.
|
||||
function verify-prereqs {
|
||||
for x in vagrant VBoxManage; do
|
||||
for x in vagrant; do
|
||||
if ! which "$x" >/dev/null; then
|
||||
echo "Can't find $x in PATH, please fix and retry."
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
local vagrant_plugins=$(vagrant plugin list | sed '-es% .*$%%' '-es% *% %g' | tr ' ' $'\n')
|
||||
local providers=(
|
||||
# Format is:
|
||||
# provider_ctl_executable vagrant_provider_name vagrant_provider_plugin_re
|
||||
# either provider_ctl_executable or vagrant_provider_plugin_re can
|
||||
# be blank (i.e., '') if none is needed by Vagrant (see, e.g.,
|
||||
# virtualbox entry)
|
||||
vmrun vmware_fusion vagrant-vmware-fusion
|
||||
vmrun vmware_workstation vagrant-vmware-workstation
|
||||
prlctl parallels vagrant-parallels
|
||||
VBoxManage virtualbox ''
|
||||
)
|
||||
local provider_found=''
|
||||
local provider_bin
|
||||
local provider_name
|
||||
local provider_plugin_re
|
||||
|
||||
while [ "${#providers[@]}" -gt 0 ]; do
|
||||
provider_bin=${providers[0]}
|
||||
provider_name=${providers[1]}
|
||||
provider_plugin_re=${providers[2]}
|
||||
providers=("${providers[@]:3}")
|
||||
|
||||
# If the provider is explicitly set, look only for that provider
|
||||
if [ -n "${VAGRANT_DEFAULT_PROVIDER:-}" ] \
|
||||
&& [ "${VAGRANT_DEFAULT_PROVIDER}" != "${provider_name}" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
if ([ -z "${provider_bin}" ] \
|
||||
|| which "${provider_bin}" >/dev/null 2>&1) \
|
||||
&& ([ -z "${provider_plugin_re}" ] \
|
||||
|| [ -n "$(echo "${vagrant_plugins}" | grep -E "^${provider_plugin_re}$")" ]); then
|
||||
provider_found="${provider_name}"
|
||||
# Stop after finding the first viable provider
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [ -z "${provider_found}" ]; then
|
||||
if [ -n "${VAGRANT_DEFAULT_PROVIDER}" ]; then
|
||||
echo "Can't find the necessary components for the ${VAGRANT_DEFAULT_PROVIDER} vagrant provider, please fix and retry."
|
||||
else
|
||||
echo "Can't find the necessary components for any viable vagrant providers (e.g., virtualbox), please fix and retry."
|
||||
fi
|
||||
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Set VAGRANT_CWD to KUBE_ROOT so that we find the right Vagrantfile no
|
||||
# matter what directory the tools are called from.
|
||||
export VAGRANT_CWD="${KUBE_ROOT}"
|
||||
@@ -89,6 +139,7 @@ function create-provision-scripts {
|
||||
echo "DNS_REPLICAS='${DNS_REPLICAS:-}'"
|
||||
echo "RUNTIME_CONFIG='${RUNTIME_CONFIG:-}'"
|
||||
echo "ADMISSION_CONTROL='${ADMISSION_CONTROL:-}'"
|
||||
echo "VAGRANT_DEFAULT_PROVIDER='${VAGRANT_DEFAULT_PROVIDER:-}'"
|
||||
grep -v "^#" "${KUBE_ROOT}/cluster/vagrant/provision-master.sh"
|
||||
grep -v "^#" "${KUBE_ROOT}/cluster/vagrant/provision-network.sh"
|
||||
) > "${KUBE_TEMP}/master-start.sh"
|
||||
@@ -109,6 +160,7 @@ function create-provision-scripts {
|
||||
echo "MINION_CONTAINER_SUBNETS=(${MINION_CONTAINER_SUBNETS[@]})"
|
||||
echo "CONTAINER_SUBNET='${CONTAINER_SUBNET}'"
|
||||
echo "DOCKER_OPTS='${EXTRA_DOCKER_OPTS-}'"
|
||||
echo "VAGRANT_DEFAULT_PROVIDER='${VAGRANT_DEFAULT_PROVIDER:-}'"
|
||||
grep -v "^#" "${KUBE_ROOT}/cluster/vagrant/provision-minion.sh"
|
||||
grep -v "^#" "${KUBE_ROOT}/cluster/vagrant/provision-network.sh"
|
||||
) > "${KUBE_TEMP}/minion-start-${i}.sh"
|
||||
@@ -116,6 +168,9 @@ function create-provision-scripts {
|
||||
}
|
||||
|
||||
function verify-cluster {
|
||||
# TODO: How does the user know the difference between "tak[ing] some
|
||||
# time" and "loop[ing] forever"? Can we give more specific feedback on
|
||||
# whether "an error" has occurred?
|
||||
echo "Each machine instance has been created/updated."
|
||||
echo " Now waiting for the Salt provisioning process to complete on each machine."
|
||||
echo " This can take some time based on your network, disk, and cpu speed."
|
||||
@@ -124,7 +179,7 @@ function verify-cluster {
|
||||
# verify master has all required daemons
|
||||
echo "Validating master"
|
||||
local machine="master"
|
||||
local -a required_daemon=("salt-master" "salt-minion" "nginx" "kube-controller-manager" "kubelet")
|
||||
local -a required_daemon=("salt-master" "salt-minion" "nginx" "kubelet")
|
||||
local validated="1"
|
||||
until [[ "$validated" == "0" ]]; do
|
||||
validated="0"
|
||||
@@ -198,49 +253,18 @@ function kube-up {
|
||||
|
||||
vagrant up
|
||||
|
||||
local kube_cert=".kubecfg.vagrant.crt"
|
||||
local kube_key=".kubecfg.vagrant.key"
|
||||
local ca_cert=".kubernetes.vagrant.ca.crt"
|
||||
export KUBE_CERT="/tmp/$RANDOM-kubecfg.crt"
|
||||
export KUBE_KEY="/tmp/$RANDOM-kubecfg.key"
|
||||
export CA_CERT="/tmp/$RANDOM-kubernetes.ca.crt"
|
||||
export CONTEXT="vagrant"
|
||||
|
||||
(umask 077
|
||||
vagrant ssh master -- sudo cat /srv/kubernetes/kubecfg.crt >"${HOME}/${kube_cert}" 2>/dev/null
|
||||
vagrant ssh master -- sudo cat /srv/kubernetes/kubecfg.key >"${HOME}/${kube_key}" 2>/dev/null
|
||||
vagrant ssh master -- sudo cat /srv/kubernetes/ca.crt >"${HOME}/${ca_cert}" 2>/dev/null
|
||||
(
|
||||
umask 077
|
||||
vagrant ssh master -- sudo cat /srv/kubernetes/kubecfg.crt >"${KUBE_CERT}" 2>/dev/null
|
||||
vagrant ssh master -- sudo cat /srv/kubernetes/kubecfg.key >"${KUBE_KEY}" 2>/dev/null
|
||||
vagrant ssh master -- sudo cat /srv/kubernetes/ca.crt >"${CA_CERT}" 2>/dev/null
|
||||
|
||||
cat <<EOF >"${HOME}/.kubernetes_vagrant_auth"
|
||||
{
|
||||
"User": "$KUBE_USER",
|
||||
"Password": "$KUBE_PASSWORD",
|
||||
"CAFile": "$HOME/$ca_cert",
|
||||
"CertFile": "$HOME/$kube_cert",
|
||||
"KeyFile": "$HOME/$kube_key"
|
||||
}
|
||||
EOF
|
||||
|
||||
cat <<EOF >"${HOME}/.kubernetes_vagrant_kubeconfig"
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority: ${HOME}/$ca_cert
|
||||
server: https://${MASTER_IP}:443
|
||||
name: vagrant
|
||||
contexts:
|
||||
- context:
|
||||
cluster: vagrant
|
||||
namespace: default
|
||||
user: vagrant
|
||||
name: vagrant
|
||||
current-context: "vagrant"
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: vagrant
|
||||
user:
|
||||
auth-path: ${HOME}/.kubernetes_vagrant_auth
|
||||
EOF
|
||||
|
||||
chmod 0600 ~/.kubernetes_vagrant_auth "${HOME}/${kube_cert}" \
|
||||
"${HOME}/${kube_key}" "${HOME}/${ca_cert}"
|
||||
create-kubeconfig
|
||||
)
|
||||
|
||||
verify-cluster
|
||||
|
@@ -14,11 +14,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Bring up a Kubernetes cluster.
|
||||
#
|
||||
# If the full release name (gs://<bucket>/<release>) is passed in then we take
|
||||
# that directly. If not then we assume we are doing development stuff and take
|
||||
# the defaults in the release config.
|
||||
# Validates that the cluster is healthy.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
@@ -28,12 +24,9 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
source "${KUBE_ROOT}/cluster/kube-env.sh"
|
||||
source "${KUBE_ROOT}/cluster/${KUBERNETES_PROVIDER}/util.sh"
|
||||
|
||||
get-password
|
||||
detect-master > /dev/null
|
||||
detect-minions > /dev/null
|
||||
|
||||
MINIONS_FILE=/tmp/minions-$$
|
||||
trap 'rm -rf "${MINIONS_FILE}"' EXIT
|
||||
|
||||
# Make several attempts to deal with slow cluster birth.
|
||||
attempt=0
|
||||
while true; do
|
||||
@@ -54,62 +47,39 @@ done
|
||||
echo "Found ${found} nodes."
|
||||
cat -n "${MINIONS_FILE}"
|
||||
|
||||
# On vSphere, use minion IPs as their names
|
||||
if [[ "${KUBERNETES_PROVIDER}" == "vsphere" || "${KUBERNETES_PROVIDER}" == "vagrant" || "${KUBERNETES_PROVIDER}" == "libvirt-coreos" || "${KUBERNETES_PROVIDER}" == "juju" ]] ; then
|
||||
MINION_NAMES=("${KUBE_MINION_IP_ADDRESSES[@]}")
|
||||
fi
|
||||
attempt=0
|
||||
while true; do
|
||||
kubectl_output=$("${KUBE_ROOT}/cluster/kubectl.sh" get cs)
|
||||
|
||||
# On AWS we can't really name the minions, so just trust that if the number is right, the right names are there.
|
||||
if [[ "${KUBERNETES_PROVIDER}" == "aws" ]]; then
|
||||
MINION_NAMES=("$(cat ${MINIONS_FILE})")
|
||||
# /healthz validation isn't working for some reason on AWS. So just hope for the best.
|
||||
# TODO: figure out why and fix, it must be working in some form, or else clusters wouldn't work.
|
||||
echo "Kubelet health checking on AWS isn't currently supported, assuming everything is good..."
|
||||
echo -e "${color_green}Cluster validation succeeded${color_norm}"
|
||||
exit 0
|
||||
fi
|
||||
# The "kubectl componentstatuses" output is four columns like this:
|
||||
#
|
||||
# COMPONENT HEALTH MSG ERR
|
||||
# controller-manager Healthy ok nil
|
||||
#
|
||||
# Parse the output to capture the value of the second column("HEALTH"), then use grep to
|
||||
# count the number of times it doesn't match "success".
|
||||
# Because of the header, the actual unsuccessful count is 1 minus the count.
|
||||
|
||||
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
|
||||
# Grep returns an exit status of 1 when line is not found, so we need the : to always return a 0 exit status
|
||||
count=$(grep -c "${MINION_NAMES[$i]}" "${MINIONS_FILE}") || :
|
||||
if [[ "${count}" == "0" ]]; then
|
||||
echo -e "${color_red}Failed to find ${MINION_NAMES[$i]}, cluster is probably broken.${color_norm}"
|
||||
cat -n "${MINIONS_FILE}"
|
||||
exit 1
|
||||
fi
|
||||
non_success_count=$(echo "${kubectl_output}" | \
|
||||
sed -n 's/^\([[:alnum:][:punct:]]\+\)\s\+\([[:alnum:][:punct:]]\+\)\s\+.*/\2/p' | \
|
||||
grep 'Healthy' --invert-match -c)
|
||||
|
||||
name="${MINION_NAMES[$i]}"
|
||||
if [[ "$KUBERNETES_PROVIDER" != "vsphere" && "$KUBERNETES_PROVIDER" != "vagrant" && "$KUBERNETES_PROVIDER" != "libvirt-coreos" && "$KUBERNETES_PROVIDER" != "juju" ]]; then
|
||||
# Grab fully qualified name
|
||||
name=$(grep "${MINION_NAMES[$i]}\." "${MINIONS_FILE}")
|
||||
fi
|
||||
|
||||
# Make sure the kubelet is healthy.
|
||||
# Make several attempts to deal with slow cluster birth.
|
||||
attempt=0
|
||||
while true; do
|
||||
echo -n "Attempt $((attempt+1)) at checking Kubelet installation on node ${MINION_NAMES[$i]} ..."
|
||||
if [[ "$KUBERNETES_PROVIDER" != "libvirt-coreos" && "$KUBERNETES_PROVIDER" != "juju" ]]; then
|
||||
curl_output=$(curl -s --insecure --user "${KUBE_USER}:${KUBE_PASSWORD}" \
|
||||
"https://${KUBE_MASTER_IP}/api/v1beta1/proxy/minions/${name}/healthz")
|
||||
else
|
||||
curl_output=$(curl -s \
|
||||
"http://${KUBE_MASTER_IP}:8080/api/v1beta1/proxy/minions/${name}/healthz")
|
||||
fi
|
||||
if [[ "${curl_output}" != "ok" ]]; then
|
||||
if (( attempt > 5 )); then
|
||||
echo
|
||||
echo -e "${color_red}Kubelet failed to install on node ${MINION_NAMES[$i]}. Your cluster is unlikely to work correctly."
|
||||
echo -e "Please run ./cluster/kube-down.sh and re-create the cluster. (sorry!)${color_norm}"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo -e " ${color_green}[working]${color_norm}"
|
||||
break
|
||||
fi
|
||||
echo -e " ${color_yellow}[not working yet]${color_norm}"
|
||||
if ((non_success_count > 1)); then
|
||||
if ((attempt < 5)); then
|
||||
echo -e "${color_yellow}Cluster not working yet.${color_norm}"
|
||||
attempt=$((attempt+1))
|
||||
sleep 30
|
||||
done
|
||||
else
|
||||
echo -e " ${color_yellow}Validate output:${color_norm}"
|
||||
echo "${kubectl_output}"
|
||||
echo -e "${color_red}Validation returned one or more failed components. Cluster is probably broken.${color_norm}"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Validate output:"
|
||||
echo "${kubectl_output}"
|
||||
echo -e "${color_green}Cluster validation succeeded${color_norm}"
|
||||
|
@@ -37,7 +37,7 @@ echo "master: $KUBE_MASTER" > /etc/salt/minion.d/master.conf
|
||||
#
|
||||
cat <<EOF >/etc/salt/minion.d/grains.conf
|
||||
grains:
|
||||
minion_ip: $(ip route get 1.1.1.1 | awk '{print $7}')
|
||||
hostname_override: $(ip route get 1.1.1.1 | awk '{print $7}')
|
||||
roles:
|
||||
- kubernetes-pool
|
||||
- kubernetes-pool-vsphere
|
||||
|
@@ -21,6 +21,7 @@
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
source "${KUBE_ROOT}/cluster/vsphere/config-common.sh"
|
||||
source "${KUBE_ROOT}/cluster/vsphere/${KUBE_CONFIG_FILE-"config-default.sh"}"
|
||||
source "${KUBE_ROOT}/cluster/common.sh"
|
||||
|
||||
# Detect the IP for the master
|
||||
#
|
||||
@@ -169,29 +170,17 @@ function upload-server-tars {
|
||||
}
|
||||
|
||||
# Ensure that we have a password created for validating to the master. Will
|
||||
# read from $HOME/.kubernetes_auth if available.
|
||||
# read from kubeconfig if available.
|
||||
#
|
||||
# Vars set:
|
||||
# KUBE_USER
|
||||
# KUBE_PASSWORD
|
||||
function get-password {
|
||||
local file="$HOME/.kubernetes_auth"
|
||||
if [[ -r "$file" ]]; then
|
||||
KUBE_USER=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["User"]')
|
||||
KUBE_PASSWORD=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["Password"]')
|
||||
return
|
||||
get-kubeconfig-basicauth
|
||||
if [[ -z "${KUBE_USER}" || -z "${KUBE_PASSWORD}" ]]; then
|
||||
KUBE_USER=admin
|
||||
KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))')
|
||||
fi
|
||||
KUBE_USER=admin
|
||||
KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))')
|
||||
|
||||
# Store password for reuse.
|
||||
cat << EOF > "$file"
|
||||
{
|
||||
"User": "$KUBE_USER",
|
||||
"Password": "$KUBE_PASSWORD"
|
||||
}
|
||||
EOF
|
||||
chmod 0600 "$file"
|
||||
}
|
||||
|
||||
# Run command over ssh
|
||||
@@ -372,6 +361,24 @@ function kube-up {
|
||||
printf " OK\n"
|
||||
done
|
||||
|
||||
echo "Kubernetes cluster created."
|
||||
|
||||
# TODO use token instead of basic auth
|
||||
export KUBE_CERT="/tmp/$RANDOM-kubecfg.crt"
|
||||
export KUBE_KEY="/tmp/$RANDOM-kubecfg.key"
|
||||
export CA_CERT="/tmp/$RANDOM-kubernetes.ca.crt"
|
||||
export CONTEXT="vsphere_${INSTANCE_PREFIX}"
|
||||
|
||||
(
|
||||
umask 077
|
||||
|
||||
kube-ssh "${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/kubecfg.crt >"${KUBE_CERT}" 2>/dev/null
|
||||
kube-ssh "${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/kubecfg.key >"${KUBE_KEY}" 2>/dev/null
|
||||
kube-ssh "${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/ca.crt >"${CA_CERT}" 2>/dev/null
|
||||
|
||||
create-kubeconfig
|
||||
)
|
||||
|
||||
echo
|
||||
echo "Sanity checking cluster..."
|
||||
|
||||
@@ -394,33 +401,8 @@ function kube-up {
|
||||
echo
|
||||
echo " https://${KUBE_MASTER_IP}"
|
||||
echo
|
||||
echo "The user name and password to use is located in ~/.kubernetes_auth."
|
||||
echo "The user name and password to use is located in ${KUBECONFIG}"
|
||||
echo
|
||||
|
||||
local kube_cert=".kubecfg.crt"
|
||||
local kube_key=".kubecfg.key"
|
||||
local ca_cert=".kubernetes.ca.crt"
|
||||
|
||||
(
|
||||
umask 077
|
||||
|
||||
kube-ssh "${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/kubecfg.crt >"${HOME}/${kube_cert}" 2>/dev/null
|
||||
kube-ssh "${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/kubecfg.key >"${HOME}/${kube_key}" 2>/dev/null
|
||||
kube-ssh "${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/ca.crt >"${HOME}/${ca_cert}" 2>/dev/null
|
||||
|
||||
cat << EOF > ~/.kubernetes_auth
|
||||
{
|
||||
"User": "$KUBE_USER",
|
||||
"Password": "$KUBE_PASSWORD",
|
||||
"CAFile": "$HOME/$ca_cert",
|
||||
"CertFile": "$HOME/$kube_cert",
|
||||
"KeyFile": "$HOME/$kube_key"
|
||||
}
|
||||
EOF
|
||||
|
||||
chmod 0600 ~/.kubernetes_auth "${HOME}/${kube_cert}" \
|
||||
"${HOME}/${kube_key}" "${HOME}/${ca_cert}"
|
||||
)
|
||||
}
|
||||
|
||||
# Delete a kubernetes cluster
|
||||
|
Reference in New Issue
Block a user