From 69ae2fe4bbf0eb6e59476a63afcec5b8cf9a9e1a Mon Sep 17 00:00:00 2001 From: derekwaynecarr Date: Mon, 14 Jul 2014 13:50:04 -0400 Subject: [PATCH] Initial vagrant setup and e2e testing support --- .gitignore | 3 + README.md | 141 ++++++- Vagrantfile | 54 +++ cluster/{ => gce}/config-default.sh | 0 cluster/{ => gce}/config-test.sh | 0 cluster/gce/util.sh | 363 ++++++++++++++++++ cluster/kube-down.sh | 37 +- cluster/kube-env.sh | 21 + cluster/kube-push.sh | 35 +- cluster/kube-up.sh | 150 +------- cluster/kube-util.sh | 63 +++ cluster/kubecfg.sh | 3 +- .../saltbase/salt/apiserver/apiserver.service | 10 + cluster/saltbase/salt/apiserver/default | 34 +- cluster/saltbase/salt/apiserver/init.sls | 31 +- cluster/saltbase/salt/base.sls | 6 +- .../controller-manager.service | 11 + .../saltbase/salt/controller-manager/default | 17 +- .../saltbase/salt/controller-manager/init.sls | 32 +- cluster/saltbase/salt/docker/init.sls | 28 +- cluster/saltbase/salt/etcd/default | 2 + cluster/saltbase/salt/etcd/etcd.service | 11 + cluster/saltbase/salt/etcd/init.sls | 24 ++ cluster/saltbase/salt/kube-proxy/default | 13 +- cluster/saltbase/salt/kube-proxy/init.sls | 31 +- .../salt/kube-proxy/kube-proxy.service | 10 + cluster/saltbase/salt/kubelet/default | 20 +- cluster/saltbase/salt/kubelet/init.sls | 29 +- cluster/saltbase/salt/kubelet/kubelet.service | 10 + cluster/saltbase/salt/nginx/init.sls | 2 + cluster/saltbase/salt/nginx/nginx.conf | 5 + cluster/saltbase/salt/top.sls | 2 +- cluster/util.sh | 105 ----- cluster/vagrant/config-default.sh | 30 ++ cluster/vagrant/config-test.sh | 18 + cluster/vagrant/provision-config.sh | 33 ++ cluster/vagrant/provision-master.sh | 78 ++++ cluster/vagrant/provision-minion.sh | 57 +++ cluster/vagrant/util.sh | 69 ++++ hack/e2e-suite/basic.sh | 4 +- hack/e2e-suite/guestbook.sh | 4 +- hack/e2e-test.sh | 39 +- release/release.sh | 3 +- 43 files changed, 1268 insertions(+), 370 deletions(-) create mode 100644 Vagrantfile rename cluster/{ => gce}/config-default.sh (100%) rename cluster/{ => gce}/config-test.sh (100%) create mode 100755 cluster/gce/util.sh create mode 100644 cluster/kube-env.sh create mode 100644 cluster/kube-util.sh create mode 100644 cluster/saltbase/salt/apiserver/apiserver.service create mode 100644 cluster/saltbase/salt/controller-manager/controller-manager.service create mode 100644 cluster/saltbase/salt/etcd/default create mode 100644 cluster/saltbase/salt/etcd/etcd.service create mode 100644 cluster/saltbase/salt/kube-proxy/kube-proxy.service create mode 100644 cluster/saltbase/salt/kubelet/kubelet.service delete mode 100755 cluster/util.sh create mode 100755 cluster/vagrant/config-default.sh create mode 100644 cluster/vagrant/config-test.sh create mode 100755 cluster/vagrant/provision-config.sh create mode 100755 cluster/vagrant/provision-master.sh create mode 100755 cluster/vagrant/provision-minion.sh create mode 100644 cluster/vagrant/util.sh diff --git a/.gitignore b/.gitignore index 4a2583979a1..67d37f32dd8 100755 --- a/.gitignore +++ b/.gitignore @@ -20,5 +20,8 @@ **/.hg **/.hg* +# Vagrant +.vagrant + # Version file we automatically make /pkg/version/autogenerated.go diff --git a/README.md b/README.md index c301efbfb87..20569d435f3 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,8 @@ While the concepts and architecture in Kubernetes represent years of experience ### Contents * [Getting started on Google Compute Engine](#getting-started-on-google-compute-engine) -* [Running a local cluster](#running-locally) +* [Getting started with a Vagrant cluster on your host](#getting-started-with-a-vagrant-cluster-on-your-host) +* [Running a local cluster on your host](#running-locally) * [Running on CoreOS](#running-on-coreos) * [Discussion and Community Support](#community-discussion-and-support) * [Hacking on Kubernetes](#development) @@ -127,6 +128,144 @@ cd kubernetes cluster/kube-down.sh ``` +## Getting started with a Vagrant cluster on your host + +### Prerequisites +1. Install latest version >= 1.6.2 of vagrant from http://www.vagrantup.com/downloads.html +2. Install latest version of Virtual Box from https://www.virtualbox.org/wiki/Downloads +3. Get the Kubernetes source: + +``` +git clone https://github.com/GoogleCloudPlatform/kubernetes.git +``` + +### Setup + +By default, the Vagrant setup will create a single kubernetes-master and 3 kubernetes-minions. You can control the number of minions that are instantiated via an environment variable on your host machine. If you plan to work with replicas, we strongly encourage you to work with enough minions to satisfy your largest intended replica size. If you do not plan to work with replicas, you can save some system resources by running with a single minion. + +``` +export KUBERNETES_NUM_MINIONS=3 +``` + +To start your local cluster, open a terminal window and run: + +``` +cd kubernetes +vagrant up +``` + +Vagrant will provision each machine in the cluster with all the necessary components to build and run Kubernetes. The initial setup can take a few minutes to complete on each machine. + +By default, each VM in the cluster is running Fedora, and all of the Kubernetes services are installed into systemd. + +To access the master or any minion: + +``` +vagrant ssh master +vagrant ssh minion-1 +vagrant ssh minion-2 +vagrant ssh minion-3 +``` + +To view the service status and/or logs on the kubernetes-master: +``` +vagrant ssh master +[vagrant@kubernetes-master ~] $ sudo systemctl status apiserver +[vagrant@kubernetes-master ~] $ sudo journalctl -r -u apiserver + +[vagrant@kubernetes-master ~] $ sudo systemctl status controller-manager +[vagrant@kubernetes-master ~] $ sudo journalctl -r -u controller-manager + +[vagrant@kubernetes-master ~] $ sudo systemctl status etcd +[vagrant@kubernetes-master ~] $ sudo systemctl status nginx +``` + +To view the services on any of the kubernetes-minion(s): +``` +vagrant ssh minion-1 +[vagrant@kubernetes-minion-1] $ sudo systemctl status docker +[vagrant@kubernetes-minion-1] $ sudo journalctl -r -u docker +[vagrant@kubernetes-minion-1] $ sudo systemctl status kubelet +[vagrant@kubernetes-minion-1] $ sudo journalctl -r -u kubelet +``` + +To push updates to new Kubernetes code after making source changes: +``` +vagrant provision +``` + +To shutdown and then restart the cluster: +``` +vagrant halt +vagrant up +``` + +To destroy the cluster: +``` +vagrant destroy -f +``` + +You can also use the cluster/kube-*.sh scripts to interact with vagrant based providers just like any other hosting platform for kubernetes. + +``` +cd kubernetes +modify cluster/kube-env.sh: + KUBERNETES_PROVIDER="vagrant" + +cluster/kube-up.sh => brings up a vagrant cluster +cluster/kube-down.sh => destroys a vagrant cluster +cluster/kube-push.sh => updates a vagrant cluster +cluster/kubecfg.sh => interact with the cluster +``` + + +### Running a container + +Your cluster is running, and you want to start running containers! + +You can now use any of the cluster/kube-*.sh commands to interact with your VM machines. +``` +cluster/kubecfg.sh list /pods +cluster/kubecfg.sh list /services +cluster/kubecfg.sh list /replicationControllers +cluster/kubecfg.sh -p 8080:80 run dockerfile/nginx 3 myNginx + +## begin wait for provision to complete, you can monitor the minions by doing + vagrant ssh minion-1 + sudo docker images + ## you should see it pulling the dockerfile/nginx image, once the above command returns it + sudo docker ps + ## you should see your container running! + exit +## end wait + +## back on the host, introspect kubernetes! +cluster/kubecfg.sh list /pods +cluster/kubecfg.sh list /services +cluster/kubecfg.sh list /replicationControllers +``` + +Congratulations! + +### Testing + +The following will run all of the end-to-end testing scenarios assuming you set your environment in cluster/kube-env.sh + +``` +hack/e2e-test.sh +``` + + +### Troubleshooting + +#### I just created the cluster, but I do not see my container running! + +If this is your first time creating the cluster, the kubelet on each minion schedules a number of docker pull requests to fetch prerequisite images. This can take some time and as a result may delay your initial pod getting provisioned. + +#### I changed Kubernetes code, but its not running! + +Are you sure there was no build error? After running $ vagrant provison , scroll up and ensure that each Salt state was completed successfully on each box in the cluster. Its very likely you see a build error due to an error in your source files! + ## Running locally In a separate tab of your terminal, run: diff --git a/Vagrantfile b/Vagrantfile new file mode 100644 index 00000000000..51eae9363e1 --- /dev/null +++ b/Vagrantfile @@ -0,0 +1,54 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +# Vagrantfile API/syntax version. Don't touch unless you know what you're doing! +VAGRANTFILE_API_VERSION = "2" + +# Require a recent version of vagrant otherwise some have reported errors setting host names on boxes +Vagrant.require_version ">= 1.6.2" + +Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| + + # The number of minions to provision + num_minion = (ENV['KUBERNETES_NUM_MINIONS'] || 3).to_i + + # ip configuration + master_ip = "10.245.1.2" + minion_ip_base = "10.245.2." + minion_ips = num_minion.times.collect { |n| minion_ip_base + "#{n+2}" } + minion_ips_str = minion_ips.join(",") + + # Determine the OS platform to use + kube_os = ENV['KUBERNETES_OS'] || "fedora" + + # OS platform to box information + kube_box = { + "fedora" => { + "name" => "fedora20", + "box_url" => "http://opscode-vm-bento.s3.amazonaws.com/vagrant/virtualbox/opscode_fedora-20_chef-provisionerless.box" + } + } + + # Kubernetes master + config.vm.define "master" do |config| + config.vm.box = kube_box[kube_os]["name"] + config.vm.box_url = kube_box[kube_os]["box_url"] + config.vm.provision "shell", inline: "/vagrant/cluster/vagrant/provision-master.sh #{master_ip} #{num_minion} #{minion_ips_str}" + config.vm.network "private_network", ip: "#{master_ip}" + config.vm.hostname = "kubernetes-master" + end + + # Kubernetes minion + num_minion.times do |n| + config.vm.define "minion-#{n+1}" do |minion| + minion_index = n+1 + minion_ip = minion_ips[n] + minion.vm.box = kube_box[kube_os]["name"] + minion.vm.box_url = kube_box[kube_os]["box_url"] + minion.vm.provision "shell", inline: "/vagrant/cluster/vagrant/provision-minion.sh #{master_ip} #{num_minion} #{minion_ips_str} #{minion_ip}" + minion.vm.network "private_network", ip: "#{minion_ip}" + minion.vm.hostname = "kubernetes-minion-#{minion_index}" + end + end + +end diff --git a/cluster/config-default.sh b/cluster/gce/config-default.sh similarity index 100% rename from cluster/config-default.sh rename to cluster/gce/config-default.sh diff --git a/cluster/config-test.sh b/cluster/gce/config-test.sh similarity index 100% rename from cluster/config-test.sh rename to cluster/gce/config-test.sh diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh new file mode 100755 index 00000000000..b90812c6dcd --- /dev/null +++ b/cluster/gce/util.sh @@ -0,0 +1,363 @@ +#!/bin/bash + +# Copyright 2014 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# A library of helper functions and constant for the local config. + +# Use the config file specified in $KUBE_CONFIG_FILE, or default to +# config-default.sh. +source $(dirname ${BASH_SOURCE})/${KUBE_CONFIG_FILE-"config-default.sh"} + +# Find the release to use. If passed in, go with that and validate. If not use +# the release/config.sh version assuming a dev workflow. +function find-release() { + if [ -n "$1" ]; then + RELEASE_NORMALIZED=$1 + else + local RELEASE_CONFIG_SCRIPT=$(dirname $0)/../release/config.sh + if [ -f $(dirname $0)/../release/config.sh ]; then + . $RELEASE_CONFIG_SCRIPT + normalize_release + fi + fi + + # Do one final check that we have a good release + if ! gsutil -q stat $RELEASE_NORMALIZED/master-release.tgz; then + echo "Could not find release tar. If developing, make sure you have run src/release/release.sh to create a release." + exit 1 + fi + echo "Release: ${RELEASE_NORMALIZED}" +} + +# Use the gcloud defaults to find the project. If it is already set in the +# environment then go with that. +function detect-project () { + if [ -z "$PROJECT" ]; then + PROJECT=$(gcloud config list project | tail -n 1 | cut -f 3 -d ' ') + fi + + if [ -z "$PROJECT" ]; then + echo "Could not detect Google Cloud Platform project. Set the default project using 'gcloud config set project '" + exit 1 + fi + echo "Project: $PROJECT (autodetected from gcloud config)" +} + +function detect-minions () { + KUBE_MINION_IP_ADDRESSES=() + for (( i=0; i<${#MINION_NAMES[@]}; i++)); do + local minion_ip=$(gcutil listinstances --format=csv --sort=external-ip \ + --columns=external-ip --filter="name eq ${MINION_NAMES[$i]}" \ + | tail -n 1) + echo "Found ${MINION_NAMES[$i]} at ${minion_ip}" + KUBE_MINION_IP_ADDRESSES+=("${minion_ip}") + done + if [ -z "$KUBE_MINION_IP_ADDRESSES" ]; then + echo "Could not detect Kubernetes minion nodes. Make sure you've launched a cluster with 'kube-up.sh'" + exit 1 + fi +} + +function detect-master () { + KUBE_MASTER=${MASTER_NAME} + if [ -z "$KUBE_MASTER_IP" ]; then + KUBE_MASTER_IP=$(gcutil listinstances --format=csv --sort=external-ip \ + --columns=external-ip --filter="name eq ${MASTER_NAME}" \ + | tail -n 1) + fi + if [ -z "$KUBE_MASTER_IP" ]; then + echo "Could not detect Kubernetes master node. Make sure you've launched a cluster with 'kube-up.sh'" + exit 1 + fi + echo "Using master: $KUBE_MASTER (external IP: $KUBE_MASTER_IP)" +} + +function get-password { + file=${HOME}/.kubernetes_auth + if [ -e ${file} ]; then + user=$(cat $file | python -c 'import json,sys;print json.load(sys.stdin)["User"]') + passwd=$(cat $file | python -c 'import json,sys;print json.load(sys.stdin)["Password"]') + return + fi + user=admin + passwd=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))') + + # Store password for reuse. + cat << EOF > ~/.kubernetes_auth +{ + "User": "$user", + "Password": "$passwd" +} +EOF + chmod 0600 ~/.kubernetes_auth +} + +# Verify prereqs +function verify-prereqs { + for x in gcloud gcutil gsutil; do + if [ "$(which $x)" == "" ]; then + echo "Can't find $x in PATH, please fix and retry." + exit 1 + fi + done +} + +# Instantiate a kubernetes cluster +function kube-up { + + # Find the release to use. Generally it will be passed when doing a 'prod' + # install and will default to the release/config.sh version when doing a + # developer up. + find-release $1 + + # Detect the project into $PROJECT if it isn't set + detect-project + + # Build up start up script for master + KUBE_TEMP=$(mktemp -d -t kubernetes.XXXXXX) + trap "rm -rf ${KUBE_TEMP}" EXIT + + get-password + echo "Using password: $user:$passwd" + python $(dirname $0)/../third_party/htpasswd/htpasswd.py -b -c ${KUBE_TEMP}/htpasswd $user $passwd + HTPASSWD=$(cat ${KUBE_TEMP}/htpasswd) + + ( + echo "#! /bin/bash" + echo "MASTER_NAME=${MASTER_NAME}" + echo "MASTER_RELEASE_TAR=${RELEASE_NORMALIZED}/master-release.tgz" + echo "MASTER_HTPASSWD='${HTPASSWD}'" + grep -v "^#" $(dirname $0)/templates/download-release.sh + grep -v "^#" $(dirname $0)/templates/salt-master.sh + ) > ${KUBE_TEMP}/master-start.sh + + echo "Starting VMs and configuring firewalls" + gcutil addfirewall ${MASTER_NAME}-https \ + --norespect_terminal_width \ + --project ${PROJECT} \ + --network ${NETWORK} \ + --target_tags ${MASTER_TAG} \ + --allowed tcp:443 & + + gcutil addinstance ${MASTER_NAME}\ + --norespect_terminal_width \ + --project ${PROJECT} \ + --zone ${ZONE} \ + --machine_type ${MASTER_SIZE} \ + --image ${IMAGE} \ + --tags ${MASTER_TAG} \ + --network ${NETWORK} \ + --service_account_scopes="storage-ro,compute-rw" \ + --automatic_restart \ + --metadata_from_file startup-script:${KUBE_TEMP}/master-start.sh & + + for (( i=0; i<${#MINION_NAMES[@]}; i++)); do + ( + echo "#! /bin/bash" + echo "MASTER_NAME=${MASTER_NAME}" + echo "MINION_IP_RANGE=${MINION_IP_RANGES[$i]}" + grep -v "^#" $(dirname $0)/templates/salt-minion.sh + ) > ${KUBE_TEMP}/minion-start-${i}.sh + + gcutil addinstance ${MINION_NAMES[$i]} \ + --norespect_terminal_width \ + --project ${PROJECT} \ + --zone ${ZONE} \ + --machine_type ${MINION_SIZE} \ + --image ${IMAGE} \ + --tags ${MINION_TAG} \ + --network ${NETWORK} \ + --service_account_scopes=${MINION_SCOPES} \ + --automatic_restart \ + --can_ip_forward \ + --metadata_from_file startup-script:${KUBE_TEMP}/minion-start-${i}.sh & + + gcutil addroute ${MINION_NAMES[$i]} ${MINION_IP_RANGES[$i]} \ + --norespect_terminal_width \ + --project ${PROJECT} \ + --network ${NETWORK} \ + --next_hop_instance ${ZONE}/instances/${MINION_NAMES[$i]} & + done + + FAIL=0 + for job in `jobs -p` + do + wait $job || let "FAIL+=1" + done + if (( $FAIL != 0 )); then + echo "${FAIL} commands failed. Exiting." + exit 2 + fi + + + detect-master > /dev/null + + echo "Waiting for cluster initialization." + echo + echo " This will continually check to see if the API for kubernetes is reachable." + echo " This might loop forever if there was some uncaught error during start" + echo " up." + echo + + until $(curl --insecure --user ${user}:${passwd} --max-time 5 \ + --fail --output /dev/null --silent https://${KUBE_MASTER_IP}/api/v1beta1/pods); do + printf "." + sleep 2 + done + + echo "Kubernetes cluster created." + echo "Sanity checking cluster..." + + sleep 5 + + # Don't bail on errors, we want to be able to print some info. + set +e + + # Basic sanity checking + for (( i=0; i<${#MINION_NAMES[@]}; i++)); do + # Make sure docker is installed + gcutil ssh ${MINION_NAMES[$i]} which docker > /dev/null + if [ "$?" != "0" ]; then + echo "Docker failed to install on ${MINION_NAMES[$i]} your cluster is unlikely to work correctly" + echo "Please run ./cluster/kube-down.sh and re-create the cluster. (sorry!)" + exit 1 + fi + + # Make sure the kubelet is healthy + if [ "$(curl --insecure --user ${user}:${passwd} https://${KUBE_MASTER_IP}/proxy/minion/${MINION_NAMES[$i]}/healthz)" != "ok" ]; then + echo "Kubelet failed to install on ${MINION_NAMES[$i]} your cluster is unlikely to work correctly" + echo "Please run ./cluster/kube-down.sh and re-create the cluster. (sorry!)" + exit 1 + else + echo "Kubelet is successfully installed on ${MINION_NAMES[$i]}" + + fi + + done + + echo + echo "Kubernetes cluster is running. Access the master at:" + echo + echo " https://${user}:${passwd}@${KUBE_MASTER_IP}" + echo + echo "Security note: The server above uses a self signed certificate. This is" + echo " subject to \"Man in the middle\" type attacks." + +} + +# Delete a kubernetes cluster +function kube-down { + # Detect the project into $PROJECT + detect-project + + echo "Bringing down cluster" + gcutil deletefirewall \ + --project ${PROJECT} \ + --norespect_terminal_width \ + --force \ + ${MASTER_NAME}-https & + + gcutil deleteinstance \ + --project ${PROJECT} \ + --norespect_terminal_width \ + --force \ + --delete_boot_pd \ + --zone ${ZONE} \ + ${MASTER_NAME} & + + gcutil deleteinstance \ + --project ${PROJECT} \ + --norespect_terminal_width \ + --force \ + --delete_boot_pd \ + --zone ${ZONE} \ + ${MINION_NAMES[*]} & + + gcutil deleteroute \ + --project ${PROJECT} \ + --force \ + ${MINION_NAMES[*]} & + + wait + +} + +# Update a kubernetes cluster with latest source +function kube-push { + + # Find the release to use. Generally it will be passed when doing a 'prod' + # install and will default to the release/config.sh version when doing a + # developer up. + find-release $1 + + # Detect the project into $PROJECT + detect-master + + ( + echo MASTER_RELEASE_TAR=$RELEASE_NORMALIZED/master-release.tgz + grep -v "^#" $(dirname $0)/templates/download-release.sh + echo "echo Executing configuration" + echo "sudo salt '*' mine.update" + echo "sudo salt --force-color '*' state.highstate" + ) | gcutil ssh --project ${PROJECT} --zone ${ZONE} $KUBE_MASTER bash + + get-password + + echo "Kubernetes cluster is updated. Access the master at:" + echo + echo " https://${user}:${passwd}@${KUBE_MASTER_IP}" + echo + +} + +# Execute prior to running tests to build a release if required for env +function test-build-release { + # Build source + ${KUBE_REPO_ROOT}/hack/build-go.sh + # Make a release + $(dirname $0)/../release/release.sh +} + +# Execute prior to running tests to initialize required structure +function test-setup { + + # Detect the project into $PROJECT if it isn't set + # gce specific + detect-project + + if [[ ${ALREADY_UP} -ne 1 ]]; then + # Open up port 80 & 8080 so common containers on minions can be reached + gcutil addfirewall \ + --norespect_terminal_width \ + --project ${PROJECT} \ + --target_tags ${MINION_TAG} \ + --allowed tcp:80,tcp:8080 \ + --network ${NETWORK} \ + ${MINION_TAG}-http-alt + fi + +} + +# Execute after running tests to perform any required clean-up +function test-teardown { + echo "Shutting down test cluster in background." + gcutil deletefirewall \ + --project ${PROJECT} \ + --norespect_terminal_width \ + --force \ + ${MINION_TAG}-http-alt & + $(dirname $0)/../cluster/kube-down.sh > /dev/null & +} + diff --git a/cluster/kube-down.sh b/cluster/kube-down.sh index 2f6e4be241b..baaa233524e 100755 --- a/cluster/kube-down.sh +++ b/cluster/kube-down.sh @@ -19,37 +19,12 @@ # exit on any error set -e -source $(dirname $0)/util.sh +source $(dirname $0)/kube-env.sh +source $(dirname $0)/$KUBERNETES_PROVIDER/util.sh -# Detect the project into $PROJECT -detect-project +echo "Bringing down cluster using provider: $KUBERNETES_PROVIDER" -echo "Bringing down cluster" -gcutil deletefirewall \ - --project ${PROJECT} \ - --norespect_terminal_width \ - --force \ - ${MASTER_NAME}-https & +verify-prereqs +kube-down -gcutil deleteinstance \ - --project ${PROJECT} \ - --norespect_terminal_width \ - --force \ - --delete_boot_pd \ - --zone ${ZONE} \ - ${MASTER_NAME} & - -gcutil deleteinstance \ - --project ${PROJECT} \ - --norespect_terminal_width \ - --force \ - --delete_boot_pd \ - --zone ${ZONE} \ - ${MINION_NAMES[*]} & - -gcutil deleteroute \ - --project ${PROJECT} \ - --force \ - ${MINION_NAMES[*]} & - -wait +echo "Done" diff --git a/cluster/kube-env.sh b/cluster/kube-env.sh new file mode 100644 index 00000000000..640c07a2a03 --- /dev/null +++ b/cluster/kube-env.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# Copyright 2014 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Set provider independent environment variables + +# Set provider of Kubernetes cluster to know where to load provider-specific scripts, values: gce, vagrant, etc. +KUBERNETES_PROVIDER="gce" +#KUBERNETES_PROVIDER="vagrant" diff --git a/cluster/kube-push.sh b/cluster/kube-push.sh index bb8ee34f8d5..33e36f26f1a 100755 --- a/cluster/kube-push.sh +++ b/cluster/kube-push.sh @@ -22,35 +22,12 @@ # exit on any error set -e -source $(dirname $0)/util.sh +source $(dirname $0)/kube-env.sh +source $(dirname $0)/$KUBERNETES_PROVIDER/util.sh -# Make sure that prerequisites are installed. -for x in gcloud gcutil gsutil; do - if [ "$(which $x)" == "" ]; then - echo "Can't find $x in PATH, please fix and retry." - exit 1 - fi -done +echo "Updating cluster using provider: $KUBERNETES_PROVIDER" -# Find the release to use. Generally it will be passed when doing a 'prod' -# install and will default to the release/config.sh version when doing a -# developer up. -find-release $1 +verify-prereqs +kube-up -# Detect the project into $PROJECT -detect-master - -( - echo MASTER_RELEASE_TAR=$RELEASE_NORMALIZED/master-release.tgz - grep -v "^#" $(dirname $0)/templates/download-release.sh - echo "echo Executing configuration" - echo "sudo salt '*' mine.update" - echo "sudo salt --force-color '*' state.highstate" -) | gcutil ssh --project ${PROJECT} --zone ${ZONE} $KUBE_MASTER bash - -get-password - -echo "Kubernetes cluster is updated. Access the master at:" -echo -echo " https://${user}:${passwd}@${KUBE_MASTER_IP}" -echo +echo "Done" diff --git a/cluster/kube-up.sh b/cluster/kube-up.sh index 8e1711a42fe..4de5ed5b8ea 100755 --- a/cluster/kube-up.sh +++ b/cluster/kube-up.sh @@ -23,150 +23,12 @@ # exit on any error set -e -source $(dirname $0)/util.sh +source $(dirname $0)/kube-env.sh +source $(dirname $0)/$KUBERNETES_PROVIDER/util.sh -# Make sure that prerequisites are installed. -for x in gcloud gcutil gsutil; do - if [ "$(which $x)" == "" ]; then - echo "Can't find $x in PATH, please fix and retry." - exit 1 - fi -done +echo "Starting cluster using provider: $KUBERNETES_PROVIDER" -# Find the release to use. Generally it will be passed when doing a 'prod' -# install and will default to the release/config.sh version when doing a -# developer up. -find-release $1 +verify-prereqs +kube-up -# Detect the project into $PROJECT if it isn't set -detect-project - -# Build up start up script for master -KUBE_TEMP=$(mktemp -d -t kubernetes.XXXXXX) -trap "rm -rf ${KUBE_TEMP}" EXIT - -get-password -echo "Using password: $user:$passwd" -python $(dirname $0)/../third_party/htpasswd/htpasswd.py -b -c ${KUBE_TEMP}/htpasswd $user $passwd -HTPASSWD=$(cat ${KUBE_TEMP}/htpasswd) - -( - echo "#! /bin/bash" - echo "MASTER_NAME=${MASTER_NAME}" - echo "MASTER_RELEASE_TAR=${RELEASE_NORMALIZED}/master-release.tgz" - echo "MASTER_HTPASSWD='${HTPASSWD}'" - grep -v "^#" $(dirname $0)/templates/download-release.sh - grep -v "^#" $(dirname $0)/templates/salt-master.sh -) > ${KUBE_TEMP}/master-start.sh - -echo "Starting VMs and configuring firewalls" -gcutil addfirewall ${MASTER_NAME}-https \ - --norespect_terminal_width \ - --project ${PROJECT} \ - --network ${NETWORK} \ - --target_tags ${MASTER_TAG} \ - --allowed tcp:443 & - -gcutil addinstance ${MASTER_NAME}\ - --norespect_terminal_width \ - --project ${PROJECT} \ - --zone ${ZONE} \ - --machine_type ${MASTER_SIZE} \ - --image ${IMAGE} \ - --tags ${MASTER_TAG} \ - --network ${NETWORK} \ - --service_account_scopes="storage-ro,compute-rw" \ - --automatic_restart \ - --metadata_from_file startup-script:${KUBE_TEMP}/master-start.sh & - -for (( i=0; i<${#MINION_NAMES[@]}; i++)); do - ( - echo "#! /bin/bash" - echo "MASTER_NAME=${MASTER_NAME}" - echo "MINION_IP_RANGE=${MINION_IP_RANGES[$i]}" - grep -v "^#" $(dirname $0)/templates/salt-minion.sh - ) > ${KUBE_TEMP}/minion-start-${i}.sh - - gcutil addinstance ${MINION_NAMES[$i]} \ - --norespect_terminal_width \ - --project ${PROJECT} \ - --zone ${ZONE} \ - --machine_type ${MINION_SIZE} \ - --image ${IMAGE} \ - --tags ${MINION_TAG} \ - --network ${NETWORK} \ - --service_account_scopes=${MINION_SCOPES} \ - --automatic_restart \ - --can_ip_forward \ - --metadata_from_file startup-script:${KUBE_TEMP}/minion-start-${i}.sh & - - gcutil addroute ${MINION_NAMES[$i]} ${MINION_IP_RANGES[$i]} \ - --norespect_terminal_width \ - --project ${PROJECT} \ - --network ${NETWORK} \ - --next_hop_instance ${ZONE}/instances/${MINION_NAMES[$i]} & -done - -FAIL=0 -for job in `jobs -p` -do - wait $job || let "FAIL+=1" -done -if (( $FAIL != 0 )); then - echo "${FAIL} commands failed. Exiting." - exit 2 -fi - - -detect-master > /dev/null - -echo "Waiting for cluster initialization." -echo -echo " This will continually check to see if the API for kubernetes is reachable." -echo " This might loop forever if there was some uncaught error during start" -echo " up." -echo - -until $(curl --insecure --user ${user}:${passwd} --max-time 5 \ - --fail --output /dev/null --silent https://${KUBE_MASTER_IP}/api/v1beta1/pods); do - printf "." - sleep 2 -done - -echo "Kubernetes cluster created." -echo "Sanity checking cluster..." - -sleep 5 - -# Don't bail on errors, we want to be able to print some info. -set +e - -# Basic sanity checking -for (( i=0; i<${#MINION_NAMES[@]}; i++)); do - # Make sure docker is installed - gcutil ssh ${MINION_NAMES[$i]} which docker > /dev/null - if [ "$?" != "0" ]; then - echo "Docker failed to install on ${MINION_NAMES[$i]} your cluster is unlikely to work correctly" - echo "Please run ./cluster/kube-down.sh and re-create the cluster. (sorry!)" - exit 1 - fi - - # Make sure the kubelet is healthy - if [ "$(curl --insecure --user ${user}:${passwd} https://${KUBE_MASTER_IP}/proxy/minion/${MINION_NAMES[$i]}/healthz)" != "ok" ]; then - echo "Kubelet failed to install on ${MINION_NAMES[$i]} your cluster is unlikely to work correctly" - echo "Please run ./cluster/kube-down.sh and re-create the cluster. (sorry!)" - exit 1 - else - echo "Kubelet is successfully installed on ${MINION_NAMES[$i]}" - - fi - -done - -echo -echo "Kubernetes cluster is running. Access the master at:" -echo -echo " https://${user}:${passwd}@${KUBE_MASTER_IP}" -echo -echo "Security note: The server above uses a self signed certificate. This is" -echo " subject to \"Man in the middle\" type attacks." +echo "Done" diff --git a/cluster/kube-util.sh b/cluster/kube-util.sh new file mode 100644 index 00000000000..deb38b39ff9 --- /dev/null +++ b/cluster/kube-util.sh @@ -0,0 +1,63 @@ +#!/bin/bash + +# Copyright 2014 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# A library of helper functions that each provider hosting Kubernetes must implement to use cluster/kube-*.sh scripts. + +# Must ensure that the following ENV vars are set +function detect-master { + echo "KUBE_MASTER_IP: $KUBE_MASTER_IP" + echo "KUBE_MASTER: $KUBE_MASTER" +} + +# Get minion IP addresses and store in KUBE_MINION_IP_ADDRESSES[] +function detect-minions { + echo "KUBE_MINION_IP_ADDRESSES=[]" +} + +# Verify prereqs on host machine +function verify-prereqs { + echo "TODO" +} + +# Instantiate a kubernetes cluster +function kube-up { + echo "TODO" +} + +# Delete a kubernetes cluster +function kube-down { + echo "TODO" +} + +# Update a kubernetes cluster with latest source +function kube-push { + echo "TODO" +} + +# Execute prior to running tests to build a release if required for env +function test-build-release { + echo "TODO" +} + +# Execute prior to running tests to initialize required structure +function test-setup { + echo "TODO" +} + +# Execute after running tests to perform any required clean-up +function test-teardown { + echo "TODO" +} diff --git a/cluster/kubecfg.sh b/cluster/kubecfg.sh index 798c22e3976..fb5efd6f87a 100755 --- a/cluster/kubecfg.sh +++ b/cluster/kubecfg.sh @@ -16,7 +16,8 @@ #!/bin/bash -source $(dirname $0)/util.sh +source $(dirname $0)/kube-env.sh +source $(dirname $0)/$KUBERNETES_PROVIDER/util.sh CLOUDCFG=$(dirname $0)/../output/go/kubecfg if [ ! -x $CLOUDCFG ]; then diff --git a/cluster/saltbase/salt/apiserver/apiserver.service b/cluster/saltbase/salt/apiserver/apiserver.service new file mode 100644 index 00000000000..a2f439bf16c --- /dev/null +++ b/cluster/saltbase/salt/apiserver/apiserver.service @@ -0,0 +1,10 @@ +[Unit] +Description=Kubernetes API Server +Documentation=https://github.com/GoogleCloudPlatform/kubernetes + +[Service] +EnvironmentFile=/etc/sysconfig/apiserver +ExecStart=/usr/local/bin/apiserver "$DAEMON_ARGS" + +[Install] +WantedBy=multi-user.target diff --git a/cluster/saltbase/salt/apiserver/default b/cluster/saltbase/salt/apiserver/default index 853571ef7fc..5909f097b0a 100644 --- a/cluster/saltbase/salt/apiserver/default +++ b/cluster/saltbase/salt/apiserver/default @@ -1,11 +1,27 @@ -{%- set ips = salt['mine.get']('roles:kubernetes-master', 'network.ip_addrs', 'grain').values() %} -DAEMON_ARGS="$DAEMON_ARGS -etcd_servers=http://{{ ips[0][0] }}:4001" - -{% if grains['cloud'] is defined and grains['cloud'] == 'gce' %} -DAEMON_ARGS="$DAEMON_ARGS -cloud_provider=gce -minion_regexp='{{ pillar['instance_prefix'] }}.*'" -MACHINES="{{ ','.join(salt['mine.get']('roles:kubernetes-pool', 'network.ip_addrs', expr_form='grain').keys()) }}" -{% elif grains['cloud'] is defined and grains['cloud'] == 'azure' %} -MACHINES="{{ salt['mine.get']('roles:kubernetes-pool', 'grains.items', expr_form='grain').values()|join(',', attribute='hostnamef') }}" +{% set daemon_args = "$DAEMON_ARGS" %} +{% if grains['os_family'] == 'RedHat' %} + {% set daemon_args = "" %} {% endif %} -DAEMON_ARGS="$DAEMON_ARGS -machines=$MACHINES" +{% set cloud_provider = "" %} +{% set minion_regexp = "-minion_regexp '" + pillar['instance_prefix'] + ".*'" %} +{% set address = "-address 127.0.0.1" %} + +{% if grains.etcd_servers is defined %} + {% set etcd_servers = "-etcd_servers=http://" + grains.etcd_servers + ":4001" %} + {% set address = "-address=" + grains.etcd_servers %} +{% else %} + {% set ips = salt['mine.get']('roles:kubernetes-master', 'network.ip_addrs', 'grain').values() %} + {% set etcd_servers = "-etcd_servers=http://" + ips[0][0] + ":4001" %} +{% endif %} +{% if grains.minion_ips is defined %} + {% set machines = "-machines " + grains.minion_ips %} +{% elif grains.cloud is defined and grains.cloud == 'gce' %} + {% set cloud_provider = "-cloud_provider=gce" %} + {% set machines = "-machines " + ','.join(salt['mine.get']('roles:kubernetes-pool', 'network.ip_addrs', expr_form='grain').keys()) %} +{% elif grains.cloud is defined and grains.cloud == 'azure' %} + MACHINES="{{ salt['mine.get']('roles:kubernetes-pool', 'grains.items', expr_form='grain').values()|join(',', attribute='hostnamef') }}" + {% set machines = "-machines $MACHINES" %} +{% endif %} + +DAEMON_ARGS="{{daemon_args}} {{address}} {{machines}} {{etcd_servers}} {{ minion_regexp }} {{ cloud_provider }}" diff --git a/cluster/saltbase/salt/apiserver/init.sls b/cluster/saltbase/salt/apiserver/init.sls index 2ee680e2c6f..3df3b4271ba 100644 --- a/cluster/saltbase/salt/apiserver/init.sls +++ b/cluster/saltbase/salt/apiserver/init.sls @@ -1,12 +1,21 @@ {% set root = '/var/src/apiserver' %} {% set package = 'github.com/GoogleCloudPlatform/kubernetes' %} {% set package_dir = root + '/src/' + package %} +{% if grains['os_family'] == 'RedHat' %} +{% set environment_file = '/etc/sysconfig/apiserver' %} +{% else %} +{% set environment_file = '/etc/default/apiserver' %} +{% endif %} {{ package_dir }}: file.recurse: - source: salt://apiserver/go - user: root + {% if grains['os_family'] == 'RedHat' %} + - group: root + {% else %} - group: staff + {% endif %} - dir_mode: 775 - file_mode: 664 - makedirs: True @@ -20,7 +29,11 @@ apiserver-third-party-go: - name: {{ root }}/src - source: salt://third-party/go/src - user: root + {% if grains['os_family'] == 'RedHat' %} + - group: root + {% else %} - group: staff + {% endif %} - dir_mode: 775 - file_mode: 664 - makedirs: True @@ -29,7 +42,7 @@ apiserver-third-party-go: - group - mode -/etc/default/apiserver: +{{ environment_file }}: file.managed: - source: salt://apiserver/default - template: jinja @@ -54,6 +67,16 @@ apiserver-build: - watch: - cmd: apiserver-build +{% if grains['os_family'] == 'RedHat' %} + +/usr/lib/systemd/system/apiserver.service: + file.managed: + - source: salt://apiserver/apiserver.service + - user: root + - group: root + +{% else %} + /etc/init.d/apiserver: file.managed: - source: salt://apiserver/initd @@ -61,6 +84,8 @@ apiserver-build: - group: root - mode: 755 +{% endif %} + apiserver: group.present: - system: True @@ -75,6 +100,8 @@ apiserver: - enable: True - watch: - cmd: apiserver-build - - file: /etc/default/apiserver + - file: {{ environment_file }} - file: /usr/local/bin/apiserver +{% if grains['os_family'] != 'RedHat' %} - file: /etc/init.d/apiserver +{% endif %} diff --git a/cluster/saltbase/salt/base.sls b/cluster/saltbase/salt/base.sls index 56b2714783b..5954b84243f 100755 --- a/cluster/saltbase/salt/base.sls +++ b/cluster/saltbase/salt/base.sls @@ -1,6 +1,10 @@ - pkg-core: pkg.latest: - names: +{% if grains['os_family'] == 'RedHat' %} + - python + - git +{% else %} - apt-transport-https - python-apt +{% endif %} \ No newline at end of file diff --git a/cluster/saltbase/salt/controller-manager/controller-manager.service b/cluster/saltbase/salt/controller-manager/controller-manager.service new file mode 100644 index 00000000000..894e9eb150e --- /dev/null +++ b/cluster/saltbase/salt/controller-manager/controller-manager.service @@ -0,0 +1,11 @@ +[Unit] +Description=Kubernetes Controller Manager +Documentation=https://github.com/GoogleCloudPlatform/kubernetes + +[Service] +Type=simple +EnvironmentFile=-/etc/sysconfig/controller-manager +ExecStart=/usr/local/bin/controller-manager "$DAEMON_ARGS" + +[Install] +WantedBy=multi-user.target diff --git a/cluster/saltbase/salt/controller-manager/default b/cluster/saltbase/salt/controller-manager/default index 5a9726d5511..a9e9dcd2544 100644 --- a/cluster/saltbase/salt/controller-manager/default +++ b/cluster/saltbase/salt/controller-manager/default @@ -1,2 +1,15 @@ -{%- set ips = salt['mine.get']('roles:kubernetes-master', 'network.ip_addrs', 'grain').values() %} -DAEMON_ARGS="$DAEMON_ARGS -etcd_servers=http://{{ ips[0][0] }}:4001" +{% set daemon_args = "$DAEMON_ARGS" %} +{% if grains['os_family'] == 'RedHat' %} + {% set daemon_args = "" %} +{% endif %} +{% set master="-master=127.0.0.1:8080" %} +{% if grains.master_ip is defined %} + {% set master="-master=" + grains.master_ip + ":8080" %} +{% endif %} +{% if grains.etcd_servers is defined %} + {% set etcd_servers = "-etcd_servers=http://" + grains.etcd_servers + ":4001" %} +{% else %} + {% set ips = salt['mine.get']('roles:kubernetes-master', 'network.ip_addrs', 'grain').values() %} + {% set etcd_servers = "-etcd_servers=http://" + ips[0][0] + ":4001" %} +{% endif %} +DAEMON_ARGS="{{daemon_args}} {{master}} {{etcd_servers}}" \ No newline at end of file diff --git a/cluster/saltbase/salt/controller-manager/init.sls b/cluster/saltbase/salt/controller-manager/init.sls index 9ae54debbbe..d38d5598e85 100644 --- a/cluster/saltbase/salt/controller-manager/init.sls +++ b/cluster/saltbase/salt/controller-manager/init.sls @@ -1,12 +1,21 @@ {% set root = '/var/src/controller-manager' %} {% set package = 'github.com/GoogleCloudPlatform/kubernetes' %} {% set package_dir = root + '/src/' + package %} +{% if grains['os_family'] == 'RedHat' %} +{% set environment_file = '/etc/sysconfig/controller-manager' %} +{% else %} +{% set environment_file = '/etc/default/controller-manager' %} +{% endif %} {{ package_dir }}: file.recurse: - source: salt://controller-manager/go - user: root + {% if grains['os_family'] == 'RedHat' %} + - group: root + {% else %} - group: staff + {% endif %} - dir_mode: 775 - file_mode: 664 - makedirs: True @@ -20,7 +29,11 @@ controller-manager-third-party-go: - name: {{ root }}/src - source: salt://third-party/go/src - user: root + {% if grains['os_family'] == 'RedHat' %} + - group: root + {% else %} - group: staff + {% endif %} - dir_mode: 775 - file_mode: 664 - makedirs: True @@ -29,7 +42,7 @@ controller-manager-third-party-go: - group - mode -/etc/default/controller-manager: +{{ environment_file }}: file.managed: - source: salt://controller-manager/default - template: jinja @@ -54,6 +67,16 @@ controller-manager-build: - watch: - cmd: controller-manager-build +{% if grains['os_family'] == 'RedHat' %} + +/usr/lib/systemd/system/controller-manager.service: + file.managed: + - source: salt://controller-manager/controller-manager.service + - user: root + - group: root + +{% else %} + /etc/init.d/controller-manager: file.managed: - source: salt://controller-manager/initd @@ -61,6 +84,8 @@ controller-manager-build: - group: root - mode: 755 +{% endif %} + controller-manager: group.present: - system: True @@ -76,6 +101,9 @@ controller-manager: - watch: - cmd: controller-manager-build - file: /usr/local/bin/controller-manager + - file: {{ environment_file }} +{% if grains['os_family'] != 'RedHat' %} - file: /etc/init.d/controller-manager - - file: /etc/default/controller-manager +{% endif %} + diff --git a/cluster/saltbase/salt/docker/init.sls b/cluster/saltbase/salt/docker/init.sls index 0ba9b8b0023..6e06584cbf8 100755 --- a/cluster/saltbase/salt/docker/init.sls +++ b/cluster/saltbase/salt/docker/init.sls @@ -1,3 +1,11 @@ +{% if grains['os_family'] == 'RedHat' %} +{% set environment_file = '/etc/sysconfig/docker' %} +{% else %} +{% set environment_file = '/etc/default/docker' %} +{% endif %} + +{% if grains['os_family'] != 'RedHat' %} + docker-repo: pkgrepo.managed: - humanname: Docker Repo @@ -25,7 +33,23 @@ cbr0: - cidr: {{ grains['cbr-cidr'] }} - mtu: 1460 -/etc/default/docker: +{% endif %} + +{% if grains['os_family'] == 'RedHat' %} + +docker-io: + pkg: + - installed + +docker: + service.running: + - enable: True + - require: + - pkg: docker-io + +{% else %} + +{{ environment_file }}: file.managed: - source: salt://docker/docker-defaults - template: jinja @@ -51,3 +75,5 @@ lxc-docker: # - pkg: lxc-docker # - watch: # - file: /etc/default/docker + +{% endif %} diff --git a/cluster/saltbase/salt/etcd/default b/cluster/saltbase/salt/etcd/default new file mode 100644 index 00000000000..f44190cdfd6 --- /dev/null +++ b/cluster/saltbase/salt/etcd/default @@ -0,0 +1,2 @@ +{% set hostname = grains.host %} +DAEMON_ARGS="-peer-addr {{hostname}}:7001 -name {{hostname}}" diff --git a/cluster/saltbase/salt/etcd/etcd.service b/cluster/saltbase/salt/etcd/etcd.service new file mode 100644 index 00000000000..a33f0142afc --- /dev/null +++ b/cluster/saltbase/salt/etcd/etcd.service @@ -0,0 +1,11 @@ +[Unit] +Description=etcd +Documentation=https://github.com/coreos/etcd + +[Service] +Type=simple +EnvironmentFile=/etc/default/etcd +ExecStart=/usr/local/bin/etcd $DAEMON_ARGS + +[Install] +WantedBy=multi-user.target diff --git a/cluster/saltbase/salt/etcd/init.sls b/cluster/saltbase/salt/etcd/init.sls index 50911ed58cc..4e8f33e79df 100755 --- a/cluster/saltbase/salt/etcd/init.sls +++ b/cluster/saltbase/salt/etcd/init.sls @@ -46,6 +46,24 @@ etcd: - group: etcd - dir_mode: 700 +{% if grains['os_family'] == 'RedHat' %} + +/etc/default/etcd: + file.managed: + - source: salt://etcd/default + - template: jinja + - user: root + - group: root + - mode: 644 + +/usr/lib/systemd/system/etcd.service: + file.managed: + - source: salt://etcd/etcd.service + - user: root + - group: root + +{% else %} + /etc/init.d/etcd: file.managed: - source: salt://etcd/initd @@ -53,11 +71,17 @@ etcd: - group: root - mode: 755 +{% endif %} + etcd-service: service.running: - name: etcd - enable: True - watch: - file: /etc/etcd/etcd.conf + {% if grains['os_family'] == 'RedHat' %} + - file: /usr/lib/systemd/system/etcd.service + - file: /etc/default/etcd + {% endif %} - cmd: etcd-install diff --git a/cluster/saltbase/salt/kube-proxy/default b/cluster/saltbase/salt/kube-proxy/default index c24d3b01260..45e7a44f1a7 100644 --- a/cluster/saltbase/salt/kube-proxy/default +++ b/cluster/saltbase/salt/kube-proxy/default @@ -1,2 +1,11 @@ -{%- set ips = salt['mine.get']('roles:kubernetes-master', 'network.ip_addrs', 'grain').values() %} -DAEMON_ARGS="$DAEMON_ARGS --etcd_servers=http://{{ ips[0][0] }}:4001" +{% set daemon_args = "$DAEMON_ARGS" %} +{% if grains['os_family'] == 'RedHat' %} + {% set daemon_args = "" %} +{% endif %} +{% if grains.etcd_servers is defined %} + {% set etcd_servers = "-etcd_servers=http://" + grains.etcd_servers + ":4001" %} +{% else %} + {% set ips = salt['mine.get']('roles:kubernetes-master', 'network.ip_addrs', 'grain').values() %} + {% set etcd_servers = "-etcd_servers=http://" + ips[0][0] + ":4001" %} +{% endif %} +DAEMON_ARGS="{{daemon_args}} {{etcd_servers}}" diff --git a/cluster/saltbase/salt/kube-proxy/init.sls b/cluster/saltbase/salt/kube-proxy/init.sls index f68a6a30f6b..228e32c099c 100644 --- a/cluster/saltbase/salt/kube-proxy/init.sls +++ b/cluster/saltbase/salt/kube-proxy/init.sls @@ -1,12 +1,21 @@ {% set root = '/var/src/kube-proxy' %} {% set package = 'github.com/GoogleCloudPlatform/kubernetes' %} {% set package_dir = root + '/src/' + package %} +{% if grains['os_family'] == 'RedHat' %} +{% set environment_file = '/etc/sysconfig/kube-proxy' %} +{% else %} +{% set environment_file = '/etc/default/kube-proxy' %} +{% endif %} {{ package_dir }}: file.recurse: - source: salt://kube-proxy/go - user: root + {% if grains['os_family'] == 'RedHat' %} + - group: root + {% else %} - group: staff + {% endif %} - dir_mode: 775 - file_mode: 664 - makedirs: True @@ -20,7 +29,11 @@ third-party-go: - name: {{ root }}/src - source: salt://third-party/go/src - user: root + {% if grains['os_family'] == 'RedHat' %} + - group: root + {% else %} - group: staff + {% endif %} - dir_mode: 775 - file_mode: 664 - makedirs: True @@ -46,6 +59,16 @@ kube-proxy-build: - watch: - cmd: kube-proxy-build +{% if grains['os_family'] == 'RedHat' %} + +/usr/lib/systemd/system/kube-proxy.service: + file.managed: + - source: salt://kube-proxy/kube-proxy.service + - user: root + - group: root + +{% else %} + /etc/init.d/kube-proxy: file.managed: - source: salt://kube-proxy/initd @@ -53,7 +76,9 @@ kube-proxy-build: - group: root - mode: 755 -/etc/default/kube-proxy: +{% endif %} + +{{ environment_file }}: file.managed: - source: salt://kube-proxy/default - template: jinja @@ -75,5 +100,7 @@ kube-proxy: - enable: True - watch: - cmd: kube-proxy-build - - file: /etc/default/kube-proxy + - file: {{ environment_file }} +{% if grains['os_family'] != 'RedHat' %} - file: /etc/init.d/kube-proxy +{% endif %} diff --git a/cluster/saltbase/salt/kube-proxy/kube-proxy.service b/cluster/saltbase/salt/kube-proxy/kube-proxy.service new file mode 100644 index 00000000000..70843605444 --- /dev/null +++ b/cluster/saltbase/salt/kube-proxy/kube-proxy.service @@ -0,0 +1,10 @@ +[Unit] +Description=Kubernetes Kube-Proxy Server +Documentation=https://github.com/GoogleCloudPlatform/kubernetes + +[Service] +EnvironmentFile=/etc/sysconfig/kube-proxy +ExecStart=/usr/local/bin/kube-proxy "$DAEMON_ARGS" + +[Install] +WantedBy=multi-user.target diff --git a/cluster/saltbase/salt/kubelet/default b/cluster/saltbase/salt/kubelet/default index c6f547cf4ea..9e5769c6905 100644 --- a/cluster/saltbase/salt/kubelet/default +++ b/cluster/saltbase/salt/kubelet/default @@ -1,2 +1,18 @@ -{%- set ips = salt['mine.get']('roles:kubernetes-master', 'network.ip_addrs', 'grain').values() %} -DAEMON_ARGS="$DAEMON_ARGS -etcd_servers=http://{{ ips[0][0] }}:4001 -address=$HOSTNAME -config=/etc/kubernetes/manifests" +{% set daemon_args = "$DAEMON_ARGS" %} +{% if grains['os_family'] == 'RedHat' %} + {% set daemon_args = "" %} +{% endif %} +{% if grains.etcd_servers is defined %} + {% set etcd_servers = "-etcd_servers=http://" + grains.etcd_servers + ":4001" %} +{% else %} + {% set ips = salt['mine.get']('roles:kubernetes-master', 'network.ip_addrs', 'grain').values() %} + {% set etcd_servers = "-etcd_servers=http://" + ips[0][0] + ":4001" %} +{% endif %} + +{% if grains.minion_ip is defined %} + {% set address = "-address=" + grains.minion_ip + " -hostname_override=" + grains.minion_ip %} +{% else %} + {% set address = "-address=$HOSTNAME" %} +{% endif %} + +DAEMON_ARGS="{{daemon_args}} {{etcd_servers}} {{address}} -config=/etc/kubernetes/manifests" diff --git a/cluster/saltbase/salt/kubelet/init.sls b/cluster/saltbase/salt/kubelet/init.sls index f08a73e99b5..a1ffd09f4ba 100644 --- a/cluster/saltbase/salt/kubelet/init.sls +++ b/cluster/saltbase/salt/kubelet/init.sls @@ -1,12 +1,21 @@ {% set root = '/var/src/kubelet' %} {% set package = 'github.com/GoogleCloudPlatform/kubernetes' %} {% set package_dir = root + '/src/' + package %} +{% if grains['os_family'] == 'RedHat' %} +{% set environment_file = '/etc/sysconfig/kubelet' %} +{% else %} +{% set environment_file = '/etc/default/kubelet' %} +{% endif %} {{ package_dir }}: file.recurse: - source: salt://kubelet/go - user: root + {% if grains['os_family'] == 'RedHat' %} + - group: root + {% else %} - group: staff + {% endif %} - dir_mode: 775 - file_mode: 664 - makedirs: True @@ -20,7 +29,11 @@ kubelet-third-party-go: - name: {{ root }}/src - source: salt://third-party/go/src - user: root + {% if grains['os_family'] == 'RedHat' %} + - group: root + {% else %} - group: staff + {% endif %} - dir_mode: 775 - file_mode: 664 - makedirs: True @@ -29,7 +42,7 @@ kubelet-third-party-go: - group - mode -/etc/default/kubelet: +{{ environment_file}}: file.managed: - source: salt://kubelet/default - template: jinja @@ -54,6 +67,16 @@ kubelet-build: - watch: - cmd: kubelet-build +{% if grains['os_family'] == 'RedHat' %} + +/usr/lib/systemd/system/kubelet.service: + file.managed: + - source: salt://kubelet/kubelet.service + - user: root + - group: root + +{% else %} + /etc/init.d/kubelet: file.managed: - source: salt://kubelet/initd @@ -61,6 +84,8 @@ kubelet-build: - group: root - mode: 755 +{% endif %} + kubelet: group.present: - system: True @@ -78,5 +103,7 @@ kubelet: - watch: - cmd: kubelet-build - file: /usr/local/bin/kubelet +{% if grains['os_family'] != 'RedHat' %} - file: /etc/init.d/kubelet +{% endif %} diff --git a/cluster/saltbase/salt/kubelet/kubelet.service b/cluster/saltbase/salt/kubelet/kubelet.service new file mode 100644 index 00000000000..fef69a8034b --- /dev/null +++ b/cluster/saltbase/salt/kubelet/kubelet.service @@ -0,0 +1,10 @@ +[Unit] +Description=Kubernetes Kubelet Server +Documentation=https://github.com/GoogleCloudPlatform/kubernetes + +[Service] +EnvironmentFile=/etc/sysconfig/kubelet +ExecStart=/usr/local/bin/kubelet "$DAEMON_ARGS" + +[Install] +WantedBy=multi-user.target diff --git a/cluster/saltbase/salt/nginx/init.sls b/cluster/saltbase/salt/nginx/init.sls index a914140bc90..912e5b52785 100644 --- a/cluster/saltbase/salt/nginx/init.sls +++ b/cluster/saltbase/salt/nginx/init.sls @@ -23,6 +23,7 @@ nginx: file: - managed - source: salt://nginx/nginx.conf + - template: jinja - user: root - group: root - mode: 644 @@ -30,6 +31,7 @@ nginx: /etc/nginx/sites-enabled/default: file: - managed + - makedirs: true - source: salt://nginx/kubernetes-site - user: root - group: root diff --git a/cluster/saltbase/salt/nginx/nginx.conf b/cluster/saltbase/salt/nginx/nginx.conf index 2523548cea4..00b1961ab61 100644 --- a/cluster/saltbase/salt/nginx/nginx.conf +++ b/cluster/saltbase/salt/nginx/nginx.conf @@ -1,4 +1,9 @@ +{% if grains['os_family'] == 'RedHat' %} +user nginx; +{% else %} user www-data; +{% endif %} + worker_processes 4; pid /var/run/nginx.pid; diff --git a/cluster/saltbase/salt/top.sls b/cluster/saltbase/salt/top.sls index 432e52d44df..4a083c3678f 100755 --- a/cluster/saltbase/salt/top.sls +++ b/cluster/saltbase/salt/top.sls @@ -13,7 +13,7 @@ base: 'roles:kubernetes-master': - match: grain - golang + - etcd - apiserver - controller-manager - - etcd - nginx diff --git a/cluster/util.sh b/cluster/util.sh deleted file mode 100755 index 1189292257d..00000000000 --- a/cluster/util.sh +++ /dev/null @@ -1,105 +0,0 @@ -#!/bin/bash - -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# A library of helper functions and constant for the local config. - -# Use the config file specified in $KUBE_CONFIG_FILE, or default to -# config-default.sh. -source $(dirname ${BASH_SOURCE})/${KUBE_CONFIG_FILE-"config-default.sh"} - -# Find the release to use. If passed in, go with that and validate. If not use -# the release/config.sh version assuming a dev workflow. -function find-release() { - if [ -n "$1" ]; then - RELEASE_NORMALIZED=$1 - else - local RELEASE_CONFIG_SCRIPT=$(dirname $0)/../release/config.sh - if [ -f $(dirname $0)/../release/config.sh ]; then - . $RELEASE_CONFIG_SCRIPT - normalize_release - fi - fi - - # Do one final check that we have a good release - if ! gsutil -q stat $RELEASE_NORMALIZED/master-release.tgz; then - echo "Could not find release tar. If developing, make sure you have run src/release/release.sh to create a release." - exit 1 - fi - echo "Release: ${RELEASE_NORMALIZED}" -} - -# Use the gcloud defaults to find the project. If it is already set in the -# environment then go with that. -function detect-project () { - if [ -z "$PROJECT" ]; then - PROJECT=$(gcloud config list project | tail -n 1 | cut -f 3 -d ' ') - fi - - if [ -z "$PROJECT" ]; then - echo "Could not detect Google Cloud Platform project. Set the default project using 'gcloud config set project '" - exit 1 - fi - echo "Project: $PROJECT (autodetected from gcloud config)" -} - -function detect-minions () { - KUBE_MINION_IP_ADDRESSES=() - for (( i=0; i<${#MINION_NAMES[@]}; i++)); do - local minion_ip=$(gcutil listinstances --format=csv --sort=external-ip \ - --columns=external-ip --filter="name eq ${MINION_NAMES[$i]}" \ - | tail -n 1) - echo "Found ${MINION_NAMES[$i]} at ${minion_ip}" - KUBE_MINION_IP_ADDRESSES+=("${minion_ip}") - done - if [ -z "$KUBE_MINION_IP_ADDRESSES" ]; then - echo "Could not detect Kubernetes minion nodes. Make sure you've launched a cluster with 'kube-up.sh'" - exit 1 - fi -} - -function detect-master () { - KUBE_MASTER=${MASTER_NAME} - if [ -z "$KUBE_MASTER_IP" ]; then - KUBE_MASTER_IP=$(gcutil listinstances --format=csv --sort=external-ip \ - --columns=external-ip --filter="name eq ${MASTER_NAME}" \ - | tail -n 1) - fi - if [ -z "$KUBE_MASTER_IP" ]; then - echo "Could not detect Kubernetes master node. Make sure you've launched a cluster with 'kube-up.sh'" - exit 1 - fi - echo "Using master: $KUBE_MASTER (external IP: $KUBE_MASTER_IP)" -} - -function get-password { - file=${HOME}/.kubernetes_auth - if [ -e ${file} ]; then - user=$(cat $file | python -c 'import json,sys;print json.load(sys.stdin)["User"]') - passwd=$(cat $file | python -c 'import json,sys;print json.load(sys.stdin)["Password"]') - return - fi - user=admin - passwd=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))') - - # Store password for reuse. - cat << EOF > ~/.kubernetes_auth -{ - "User": "$user", - "Password": "$passwd" -} -EOF - chmod 0600 ~/.kubernetes_auth -} diff --git a/cluster/vagrant/config-default.sh b/cluster/vagrant/config-default.sh new file mode 100755 index 00000000000..d12b4c9d212 --- /dev/null +++ b/cluster/vagrant/config-default.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +# Copyright 2014 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +## Contains configuration values for interacting with the Vagrant cluster + +# NUMBER OF MINIONS IN THE CLUSTER +NUM_MINIONS=${KUBERNETES_NUM_MINIONS-"3"} + +# IP LOCATIONS FOR INTERACTING WITH THE MASTER +export KUBE_MASTER_IP="10.245.1.2" +export KUBERNETES_MASTER="http://10.245.1.2:8080" + +# IP LOCATIONS FOR INTERACTING WITH THE MINIONS +MINION_IP_BASE="10.245.2." +for (( i=0; i <${NUM_MINIONS}; i++)) do + KUBE_MINION_IP_ADDRESSES[$i]="${MINION_IP_BASE}$[$i+2]" +done \ No newline at end of file diff --git a/cluster/vagrant/config-test.sh b/cluster/vagrant/config-test.sh new file mode 100644 index 00000000000..7a636f9d6a2 --- /dev/null +++ b/cluster/vagrant/config-test.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +# Copyright 2014 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +## Contains configuration values for interacting with the Vagrant cluster in test mode +source $(dirname ${BASH_SOURCE})/config-default.sh diff --git a/cluster/vagrant/provision-config.sh b/cluster/vagrant/provision-config.sh new file mode 100755 index 00000000000..ae209a97774 --- /dev/null +++ b/cluster/vagrant/provision-config.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +# Copyright 2014 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Passed as arguments to provisioning from Vagrantfile +MASTER_IP=$1 +NUM_MINIONS=$2 +MINION_IPS=$3 + +INSTANCE_PREFIX=kubernetes +MASTER_NAME="${INSTANCE_PREFIX}-master" +MASTER_TAG="${INSTANCE_PREFIX}-master" +MINION_TAG="${INSTANCE_PREFIX}-minion" +MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}})) +MINION_IP_RANGES=($(eval echo "10.245.{2..${NUM_MINIONS}}.2/24")) +MINION_SCOPES="" + +# simplified setup for local vagrant 2 node cluster +MASTER_HTPASSWD=passw0rd + + diff --git a/cluster/vagrant/provision-master.sh b/cluster/vagrant/provision-master.sh new file mode 100755 index 00000000000..db6052955ec --- /dev/null +++ b/cluster/vagrant/provision-master.sh @@ -0,0 +1,78 @@ +#!/bin/bash + +# Copyright 2014 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# exit on any error +set -e +source $(dirname $0)/provision-config.sh + +# we will run provision to update code each time we test, so we do not want to do salt install each time +if [ ! -f "/var/kube-vagrant-setup" ]; then + mkdir -p /etc/salt/minion.d + echo "master: $MASTER_NAME" > /etc/salt/minion.d/master.conf + + cat </etc/salt/minion.d/grains.conf +grains: + master_ip: $MASTER_IP + etcd_servers: $MASTER_IP + minion_ips: $MINION_IPS + roles: + - kubernetes-master +EOF + + # Configure the salt-master + # Auto accept all keys from minions that try to join + mkdir -p /etc/salt/master.d + cat </etc/salt/master.d/auto-accept.conf +open_mode: True +auto_accept: True +EOF + + cat </etc/salt/master.d/reactor.conf +# React to new minions starting by running highstate on them. +reactor: + - 'salt/minion/*/start': + - /srv/reactor/start.sls +EOF + + # Install Salt + # + # We specify -X to avoid a race condition that can cause minion failure to + # install. See https://github.com/saltstack/salt-bootstrap/issues/270 + # + # -M installs the master + curl -L http://bootstrap.saltstack.com | sh -s -- -M + + mkdir -p /srv/salt/nginx + echo $MASTER_HTPASSWD > /srv/salt/nginx/htpasswd + + # a file we touch to state that base-setup is done + echo "Salt configured" > /var/kube-vagrant-setup +fi + +# Build release +echo "Building release" +pushd /vagrant + ./release/build-release.sh kubernetes +popd + +echo "Running release install script" +pushd /vagrant/output/release/master-release/src/scripts + ./master-release-install.sh +popd + +echo "Executing configuration" +salt '*' mine.update +salt --force-color '*' state.highstate \ No newline at end of file diff --git a/cluster/vagrant/provision-minion.sh b/cluster/vagrant/provision-minion.sh new file mode 100755 index 00000000000..5796ebb8bb7 --- /dev/null +++ b/cluster/vagrant/provision-minion.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +# Copyright 2014 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# exit on any error +set -e +source $(dirname $0)/provision-config.sh + +MINION_IP=$4 +# we will run provision to update code each time we test, so we do not want to do salt install each time +if [ ! -f "/var/kube-vagrant-setup" ]; then + + if [ ! "$(cat /etc/hosts | grep $MASTER_NAME)" ]; then + echo "Adding host entry for $MASTER_NAME" + echo "$MASTER_IP $MASTER_NAME" >> /etc/hosts + fi + + # Prepopulate the name of the Master + mkdir -p /etc/salt/minion.d + echo "master: $MASTER_NAME" > /etc/salt/minion.d/master.conf + + # Our minions will have a pool role to distinguish them from the master. + cat </etc/salt/minion.d/grains.conf +grains: + minion_ip: $MINION_IP + etcd_servers: $MASTER_IP + roles: + - kubernetes-pool + cbr-cidr: $MINION_IP_RANGE +EOF + + # Install Salt + # + # We specify -X to avoid a race condition that can cause minion failure to + # install. See https://github.com/saltstack/salt-bootstrap/issues/270 + curl -L http://bootstrap.saltstack.com | sh -s -- -X + + ## TODO this only works on systemd distros, need to find a work-around as removing -X above fails to start the services installed + systemctl enable salt-minion + systemctl start salt-minion + + # a file we touch to state that base-setup is done + echo "Salt configured" > /var/kube-vagrant-setup + +fi diff --git a/cluster/vagrant/util.sh b/cluster/vagrant/util.sh new file mode 100644 index 00000000000..2cd4010c320 --- /dev/null +++ b/cluster/vagrant/util.sh @@ -0,0 +1,69 @@ +#!/bin/bash + +# Copyright 2014 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# A library of helper functions that each provider hosting Kubernetes must implement to use cluster/kube-*.sh scripts. + +source $(dirname ${BASH_SOURCE})/${KUBE_CONFIG_FILE-"config-default.sh"} + +function detect-master () { + echo "KUBE_MASTER_IP: $KUBE_MASTER_IP" + echo "KUBE_MASTER: $KUBE_MASTER" +} + +# Get minion IP addresses and store in KUBE_MINION_IP_ADDRESSES[] +function detect-minions { + echo "Minions already detected" +} + +# Verify prereqs on host machine +function verify-prereqs { + for x in vagrant virtualbox; do + if [ "$(which $x)" == "" ]; then + echo "Can't find $x in PATH, please fix and retry." + exit 1 + fi + done +} + +# Instantiate a kubernetes cluster +function kube-up { + vagrant up +} + +# Delete a kubernetes cluster +function kube-down { + vagrant destroy -f +} + +# Update a kubernetes cluster with latest source +function kube-push { + vagrant provision +} + +# Execute prior to running tests to build a release if required for env +function test-build-release { + echo "Vagrant provider can skip release build" +} + +# Execute prior to running tests to initialize required structure +function test-setup { + echo "Vagrant test setup complete" +} + +# Execute after running tests to perform any required clean-up +function test-teardown { + echo "Vagrant ignores tear-down" +} diff --git a/hack/e2e-suite/basic.sh b/hack/e2e-suite/basic.sh index 9ecd6d6c81f..985007f4a61 100755 --- a/hack/e2e-suite/basic.sh +++ b/hack/e2e-suite/basic.sh @@ -20,8 +20,8 @@ # Exit on error set -e -source "${KUBE_REPO_ROOT}/cluster/util.sh" -detect-project +source "${KUBE_REPO_ROOT}/cluster/kube-env.sh" +source "${KUBE_REPO_ROOT}/cluster/$KUBERNETES_PROVIDER/util.sh" # Launch a container $CLOUDCFG -p 8080:80 run dockerfile/nginx 2 myNginx diff --git a/hack/e2e-suite/guestbook.sh b/hack/e2e-suite/guestbook.sh index 96023578793..59bfcde6a84 100755 --- a/hack/e2e-suite/guestbook.sh +++ b/hack/e2e-suite/guestbook.sh @@ -20,7 +20,9 @@ set -e -source "${KUBE_REPO_ROOT}/cluster/util.sh" +source "${KUBE_REPO_ROOT}/cluster/kube-env.sh" +source "${KUBE_REPO_ROOT}/cluster/$KUBERNETES_PROVIDER/util.sh" + GUESTBOOK="${KUBE_REPO_ROOT}/examples/guestbook" # Launch the guestbook example diff --git a/hack/e2e-test.sh b/hack/e2e-test.sh index d4d5314d18f..3d8aa18baf0 100755 --- a/hack/e2e-test.sh +++ b/hack/e2e-test.sh @@ -17,6 +17,9 @@ # Starts a Kubernetes cluster, runs the e2e test suite, and shuts it # down. +source $(dirname $0)/../cluster/kube-env.sh +source $(dirname $0)/../cluster/$KUBERNETES_PROVIDER/util.sh + # For debugging of this test's components, it's helpful to leave the test # cluster running. ALREADY_UP=${1:-0} @@ -25,6 +28,7 @@ LEAVE_UP=${2:-0} HAVE_JQ=$(which jq) if [[ -z ${HAVE_JQ} ]]; then echo "Please install jq, e.g.: 'sudo apt-get install jq' or, " + echo "'sudo yum install jq' or, " echo "if you're on a mac with homebrew, 'brew install jq'." exit 1 fi @@ -37,11 +41,8 @@ export KUBE_CONFIG_FILE="config-test.sh" export KUBE_REPO_ROOT="$(dirname $0)/.." export CLOUDCFG="${KUBE_REPO_ROOT}/cluster/kubecfg.sh" -source "${KUBE_REPO_ROOT}/cluster/util.sh" -${KUBE_REPO_ROOT}/hack/build-go.sh - -# Build a release -$(dirname $0)/../release/release.sh +# Build a release required by the test provider [if any] +test-build-release if [[ ${ALREADY_UP} -ne 1 ]]; then # Now bring a test cluster up with that release. @@ -51,35 +52,13 @@ else $(dirname $0)/../cluster/kube-push.sh fi -# Detect the project into $PROJECT if it isn't set -detect-project +# Perform any required setup of the cluster +test-setup set +e -if [[ ${ALREADY_UP} -ne 1 ]]; then - # Open up port 80 & 8080 so common containers on minions can be reached - gcutil addfirewall \ - --norespect_terminal_width \ - --project ${PROJECT} \ - --target_tags ${MINION_TAG} \ - --allowed tcp:80,tcp:8080 \ - --network ${NETWORK} \ - ${MINION_TAG}-http-alt -fi - -# Auto shutdown cluster when we exit -function shutdown-test-cluster () { - echo "Shutting down test cluster in background." - gcutil deletefirewall \ - --project ${PROJECT} \ - --norespect_terminal_width \ - --force \ - ${MINION_TAG}-http-alt & - $(dirname $0)/../cluster/kube-down.sh > /dev/null & -} - if [[ ${LEAVE_UP} -ne 1 ]]; then - trap shutdown-test-cluster EXIT + trap test-teardown EXIT fi any_failed=0 diff --git a/release/release.sh b/release/release.sh index 241444ff2ad..8b7ad6c8155 100755 --- a/release/release.sh +++ b/release/release.sh @@ -27,7 +27,8 @@ SCRIPT_DIR=$(CDPATH="" cd $(dirname $0); pwd) source $SCRIPT_DIR/config.sh -source $(dirname ${BASH_SOURCE})/../cluster/${KUBE_CONFIG_FILE-"config-default.sh"} +source "${KUBE_REPO_ROOT}/cluster/kube-env.sh" +source $(dirname ${BASH_SOURCE})/../cluster/${KUBERNETES_PROVIDER}/${KUBE_CONFIG_FILE-"config-default.sh"} cd $SCRIPT_DIR/..