Use binary releases for cluster push scripts.

This is for GCE right now.  Other clouds/clusters are probably broken.
This commit is contained in:
Joe Beda
2014-09-23 15:54:27 -07:00
parent f5dffe3bfe
commit 15cd6f07d6
19 changed files with 397 additions and 437 deletions

View File

@@ -20,29 +20,51 @@
# config-default.sh.
source $(dirname ${BASH_SOURCE})/${KUBE_CONFIG_FILE-"config-default.sh"}
# Find the release to use. If passed in, go with that and validate. If not use
# the release/config.sh version assuming a dev workflow.
function find-release() {
if [ -n "$1" ]; then
RELEASE_NORMALIZED=$1
else
local RELEASE_CONFIG_SCRIPT=$(dirname $0)/../release/config.sh
if [ -f $(dirname $0)/../release/config.sh ]; then
. $RELEASE_CONFIG_SCRIPT
normalize_release
fi
fi
# Verify prereqs
#
# Vars set:
# KUBE_REPO_ROOT
function verify-prereqs {
KUBE_REPO_ROOT="$(dirname ${BASH_SOURCE})/../.."
# Do one final check that we have a good release
if ! gsutil -q stat $RELEASE_NORMALIZED/master-release.tgz; then
echo "Could not find release tar. If developing, make sure you have run src/release/release.sh to create a release." 1>&2
for x in gcloud gcutil gsutil; do
if [ "$(which $x)" == "" ]; then
echo "Can't find $x in PATH, please fix and retry."
exit 1
fi
done
}
# Verify and find the various tar files that we are going to use on the server.
#
# Vars set:
# SERVER_BINARY_TAR
# SALT_TAR
function find-release-tars {
SERVER_BINARY_TAR="${KUBE_REPO_ROOT}/server/kubernetes-server-linux-amd64.tar.gz"
if [[ ! -f "$SERVER_BINARY_TAR" ]]; then
SERVER_BINARY_TAR="${KUBE_REPO_ROOT}/_output/release-tars/kubernetes-server-linux-amd64.tar.gz"
fi
if [[ ! -f "$SERVER_BINARY_TAR" ]]; then
echo "!!! Cannot find kubernetes-server-linux-amd64.tar.gz"
exit 1
fi
SALT_TAR="${KUBE_REPO_ROOT}/server/kubernetes-salt.tar.gz"
if [[ ! -f "$SALT_TAR" ]]; then
SALT_TAR="${KUBE_REPO_ROOT}/_output/release-tars/kubernetes-salt.tar.gz"
fi
if [[ ! -f "$SALT_TAR" ]]; then
echo "!!! Cannot find kubernetes-salt.tar.gz"
exit 1
fi
echo "Release: ${RELEASE_NORMALIZED}"
}
# Use the gcloud defaults to find the project. If it is already set in the
# environment then go with that.
#
# Vars set:
# PROJECT
function detect-project () {
if [ -z "$PROJECT" ]; then
PROJECT=$(gcloud config list project | tail -n 1 | cut -f 3 -d ' ')
@@ -55,6 +77,52 @@ function detect-project () {
echo "Project: $PROJECT (autodetected from gcloud config)"
}
# Take the local tar files and upload them to Google Storage. They will then be
# downloaded by the master as part of the start up script for the master.
#
# Assumed vars:
# PROJECT
# SERVER_BINARY_TAR
# SALT_TAR
# Vars set:
# SERVER_BINARY_TAR_URL
# SALT_TAR_URL
function upload-server-tars() {
SERVER_BINARY_TAR_URL=
SALT_TAR_URL=
local project_hash
if which md5 > /dev/null 2>&1; then
project_hash=$(md5 -q -s "$PROJECT")
else
project_hash=$(echo -n "$PROJECT" | md5sum)
fi
local -r staging_bucket="gs://kubernetes-staging-${project_hash}"
# Ensure the bucket is created
if ! gsutil ls "$staging_bucket" > /dev/null 2>&1 ; then
echo "Creating $staging_bucket"
gsutil mb "${staging_bucket}"
fi
local -r staging_path="${staging_bucket}/devel"
echo "+++ Staging server tars to Google Storage: ${staging_path}"
SERVER_BINARY_TAR_URL="${staging_path}/${SERVER_BINARY_TAR##*/}"
gsutil -q cp "${SERVER_BINARY_TAR}" "${SERVER_BINARY_TAR_URL}"
SALT_TAR_URL="${staging_path}/${SALT_TAR##*/}"
gsutil -q cp "${SALT_TAR}" "${SALT_TAR_URL}"
}
# Detect the information about the minions
#
# Assumed vars:
# MINION_NAMES
# ZONE
# Vars set:
# KUBE_MINION_IP_ADDRESS (array)
function detect-minions () {
KUBE_MINION_IP_ADDRESSES=()
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
@@ -75,6 +143,14 @@ function detect-minions () {
fi
}
# Detect the IP for the master
#
# Assumed vars:
# MASTER_NAME
# ZONE
# Vars set:
# KUBE_MASTER
# KUBE_MASTER_IP
function detect-master () {
KUBE_MASTER=${MASTER_NAME}
if [ -z "$KUBE_MASTER_IP" ]; then
@@ -90,58 +166,53 @@ function detect-master () {
echo "Using master: $KUBE_MASTER (external IP: $KUBE_MASTER_IP)"
}
# Ensure that we have a password created for validating to the master. Will
# read from $HOME/.kubernetres_auth if available.
#
# Vars set:
# KUBE_USER
# KUBE_PASSWORD
function get-password {
file=${HOME}/.kubernetes_auth
if [ -e ${file} ]; then
user=$(cat $file | python -c 'import json,sys;print json.load(sys.stdin)["User"]')
passwd=$(cat $file | python -c 'import json,sys;print json.load(sys.stdin)["Password"]')
local file="$HOME/.kubernetes_auth"
if [[ -r "$file" ]]; then
KUBE_USER=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["User"]')
KUBE_PASSWORD=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["Password"]')
return
fi
user=admin
passwd=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))')
KUBE_USER=admin
KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))')
# Store password for reuse.
cat << EOF > ~/.kubernetes_auth
cat << EOF > "$file"
{
"User": "$user",
"Password": "$passwd"
"User": "$KUBE_USER",
"Password": "$KUBE_PASSWORD"
}
EOF
chmod 0600 ~/.kubernetes_auth
}
# Verify prereqs
function verify-prereqs {
for x in gcloud gcutil gsutil; do
if [ "$(which $x)" == "" ]; then
echo "Can't find $x in PATH, please fix and retry." 1>&2
exit 1
fi
done
chmod 0600 "$file"
}
# Instantiate a kubernetes cluster
#
# Assumed vars
# KUBE_REPO_ROOT
# <Various vars set in config file>
function kube-up {
# Find the release to use. Generally it will be passed when doing a 'prod'
# install and will default to the release/config.sh version when doing a
# developer up.
find-release $1
# Detect the project into $PROJECT if it isn't set
detect-project
# This will take us up to the git repo root
local base_dir=$(dirname "${BASH_SOURCE}")/../..
# Make sure we have the tar files staged on Google Storage
find-release-tars
upload-server-tars
# Build up start up script for master
KUBE_TEMP=$(mktemp -d -t kubernetes.XXXXXX)
trap 'rm -rf "${KUBE_TEMP}"' EXIT
local kube_temp=$(mktemp -d -t kubernetes.XXXXXX)
trap 'rm -rf "${kube_temp}"' EXIT
get-password
python "${base_dir}/third_party/htpasswd/htpasswd.py" -b \
-c "${KUBE_TEMP}/htpasswd" $user $passwd
HTPASSWD=$(cat "${KUBE_TEMP}/htpasswd")
python "${KUBE_REPO_ROOT}/third_party/htpasswd/htpasswd.py" \
-b -c "${kube_temp}/htpasswd" "$KUBE_USER" "$KUBE_PASSWORD"
local htpasswd=$(cat "${kube_temp}/htpasswd")
if ! gcutil getnetwork "${NETWORK}"; then
echo "Creating new network for: ${NETWORK}"
@@ -175,12 +246,16 @@ function kube-up {
(
echo "#! /bin/bash"
echo "MASTER_NAME='${MASTER_NAME}'"
echo "MASTER_RELEASE_TAR=${RELEASE_NORMALIZED}/master-release.tgz"
echo "MASTER_HTPASSWD='${HTPASSWD}'"
grep -v "^#" "${base_dir}/cluster/templates/download-release.sh"
grep -v "^#" "${base_dir}/cluster/templates/salt-master.sh"
) > "${KUBE_TEMP}/master-start.sh"
echo "mkdir -p /var/cache/kubernetes-install"
echo "cd /var/cache/kubernetes-install"
echo "readonly MASTER_NAME='${MASTER_NAME}'"
echo "readonly NODE_INSTANCE_PREFIX='${INSTANCE_PREFIX}-minion'"
echo "readonly SERVER_BINARY_TAR_URL='${SERVER_BINARY_TAR_URL}'"
echo "readonly SALT_TAR_URL='${SALT_TAR_URL}'"
echo "readonly MASTER_HTPASSWD='${htpasswd}'"
grep -v "^#" "${KUBE_REPO_ROOT}/cluster/gce/templates/download-release.sh"
grep -v "^#" "${KUBE_REPO_ROOT}/cluster/gce/templates/salt-master.sh"
) > "${kube_temp}/master-start.sh"
gcutil addinstance ${MASTER_NAME}\
--project ${PROJECT} \
@@ -193,15 +268,15 @@ function kube-up {
--network ${NETWORK} \
--service_account_scopes="storage-ro,compute-rw" \
--automatic_restart \
--metadata_from_file "startup-script:${KUBE_TEMP}/master-start.sh" &
--metadata_from_file "startup-script:${kube_temp}/master-start.sh" &
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
(
echo "#! /bin/bash"
echo "MASTER_NAME='${MASTER_NAME}'"
echo "MINION_IP_RANGE=${MINION_IP_RANGES[$i]}"
grep -v "^#" "${base_dir}/cluster/templates/salt-minion.sh"
) > ${KUBE_TEMP}/minion-start-${i}.sh
grep -v "^#" "${KUBE_REPO_ROOT}/cluster/gce/templates/salt-minion.sh"
) > "${kube_temp}/minion-start-${i}.sh"
gcutil addfirewall ${MINION_NAMES[$i]}-all \
--project ${PROJECT} \
@@ -223,7 +298,7 @@ function kube-up {
--service_account_scopes=${MINION_SCOPES} \
--automatic_restart \
--can_ip_forward \
--metadata_from_file "startup-script:${KUBE_TEMP}/minion-start-${i}.sh" &
--metadata_from_file "startup-script:${kube_temp}/minion-start-${i}.sh" &
gcutil addroute ${MINION_NAMES[$i]} ${MINION_IP_RANGES[$i]} \
--project ${PROJECT} \
@@ -233,17 +308,17 @@ function kube-up {
--next_hop_instance ${ZONE}/instances/${MINION_NAMES[$i]} &
done
FAIL=0
local fail=0
local job
for job in `jobs -p`
do
wait $job || let "FAIL+=1"
wait $job || let "fail+=1"
done
if (( $FAIL != 0 )); then
echo "${FAIL} commands failed. Exiting."
if (( $fail != 0 )); then
echo "${fail} commands failed. Exiting."
exit 2
fi
detect-master > /dev/null
echo "Waiting for cluster initialization."
@@ -253,7 +328,7 @@ function kube-up {
echo " up."
echo
until $(curl --insecure --user ${user}:${passwd} --max-time 5 \
until $(curl --insecure --user ${KUBE_USER}:${KUBE_PASSWORD} --max-time 5 \
--fail --output /dev/null --silent https://${KUBE_MASTER_IP}/api/v1beta1/pods); do
printf "."
sleep 2
@@ -264,16 +339,15 @@ function kube-up {
sleep 5
# Don't bail on errors, we want to be able to print some info.
set +e
# Basic sanity checking
local i
local rc # Capture return code without exiting because of errexit bash option
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
# Make sure docker is installed
gcutil ssh ${MINION_NAMES[$i]} which docker > /dev/null
if [ "$?" != "0" ]; then
echo "Docker failed to install on ${MINION_NAMES[$i]}. Your cluster is unlikely to work correctly." 1>&2
echo "Please run ./cluster/kube-down.sh and re-create the cluster. (sorry!)" 1>&2
gcutil ssh ${MINION_NAMES[$i]} which docker >/dev/null && rc=$? || rc=$?
if [[ "$rc" != "0" ]]; then
echo "Docker failed to install on ${MINION_NAMES[$i]}. Your cluster is unlikely to work correctly."
echo "Please run ./cluster/kube-down.sh and re-create the cluster. (sorry!)"
exit 1
fi
done
@@ -286,29 +360,28 @@ function kube-up {
echo "The user name and password to use is located in ~/.kubernetes_auth."
echo
kube_cert=".kubecfg.crt"
kube_key=".kubecfg.key"
ca_cert=".kubernetes.ca.crt"
local kube_cert=".kubecfg.crt"
local kube_key=".kubecfg.key"
local ca_cert=".kubernetes.ca.crt"
(umask 077
gcutil ssh "${MASTER_NAME}" sudo cat /usr/share/nginx/kubecfg.crt > "${HOME}/${kube_cert}"
gcutil ssh "${MASTER_NAME}" sudo cat /usr/share/nginx/kubecfg.key > "${HOME}/${kube_key}"
gcutil ssh "${MASTER_NAME}" sudo cat /usr/share/nginx/ca.crt > "${HOME}/${ca_cert}"
gcutil ssh "${MASTER_NAME}" sudo cat /usr/share/nginx/kubecfg.crt >"${HOME}/${kube_cert}" 2>/dev/null
gcutil ssh "${MASTER_NAME}" sudo cat /usr/share/nginx/kubecfg.key >"${HOME}/${kube_key}" 2>/dev/null
gcutil ssh "${MASTER_NAME}" sudo cat /usr/share/nginx/ca.crt >"${HOME}/${ca_cert}" 2>/dev/null
cat << EOF > ~/.kubernetes_auth
{
"User": "$user",
"Password": "$passwd",
"User": "$KUBE_USER",
"Password": "$KUBE_PASSWORD",
"CAFile": "$HOME/$ca_cert",
"CertFile": "$HOME/$kube_cert",
"KeyFile": "$HOME/$kube_key"
}
EOF
chmod 0600 ~/.kubernetes_auth
chmod 0600 "${HOME}/${kube_cert}"
chmod 0600 "${HOME}/${kube_key}"
chmod 0600 "${HOME}/${ca_cert}")
chmod 0600 ~/.kubernetes_auth "${HOME}/${kube_cert}" \
"${HOME}/${kube_key}" "${HOME}/${ca_cert}"
)
}
# Delete a kubernetes cluster
@@ -362,22 +435,24 @@ function kube-down {
# Update a kubernetes cluster with latest source
function kube-push {
# Find the release to use. Generally it will be passed when doing a 'prod'
# install and will default to the release/config.sh version when doing a
# developer up.
find-release $1
# Detect the project into $PROJECT
detect-project
detect-master
# Make sure we have the tar files staged on Google Storage
find-release-tars
upload-server-tars
(
echo MASTER_RELEASE_TAR=$RELEASE_NORMALIZED/master-release.tgz
grep -v "^#" $(dirname $0)/templates/download-release.sh
echo "#! /bin/bash"
echo "mkdir -p /var/cache/kubernetes-install"
echo "cd /var/cache/kubernetes-install"
echo "readonly SERVER_BINARY_TAR_URL=${SERVER_BINARY_TAR_URL}"
echo "readonly SALT_TAR_URL=${SALT_TAR_URL}"
grep -v "^#" "${KUBE_REPO_ROOT}/cluster/gce/templates/download-release.sh"
echo "echo Executing configuration"
echo "sudo salt '*' mine.update"
echo "sudo salt --force-color '*' state.highstate"
) | gcutil ssh --project ${PROJECT} --zone ${ZONE} $KUBE_MASTER bash
) | gcutil ssh --project $PROJECT --zone $ZONE $KUBE_MASTER sudo bash
get-password
@@ -391,15 +466,27 @@ function kube-push {
}
# Execute prior to running tests to build a release if required for env
# -----------------------------------------------------------------------------
# Cluster specific test helpers used from hack/e2e-test.sh
# Execute prior to running tests to build a release if required for env.
#
# Assumed Vars:
# KUBE_REPO_ROOT
function test-build-release {
# Build source
${KUBE_REPO_ROOT}/hack/build-go.sh
"${KUBE_REPO_ROOT}/hack/build-go.sh"
# Make a release
$(dirname $0)/../release/release.sh
"${KUBE_REPO_ROOT}/release/release.sh"
}
# Execute prior to running tests to initialize required structure
# Execute prior to running tests to initialize required structure. This is
# called from hack/e2e-test.sh.
#
# Assumed vars:
# PROJECT
# ALREADY_UP
# Variables from config.sh
function test-setup {
# Detect the project into $PROJECT if it isn't set
@@ -420,7 +507,11 @@ function test-setup {
}
# Execute after running tests to perform any required clean-up
# Execute after running tests to perform any required clean-up. This is called
# from hack/e2e-test.sh
#
# Assumed Vars:
# PROJECT
function test-teardown {
echo "Shutting down test cluster in background."
gcutil deletefirewall \
@@ -431,5 +522,3 @@ function test-teardown {
${MINION_TAG}-${INSTANCE_PREFIX}-http-alt || true > /dev/null
$(dirname $0)/../cluster/kube-down.sh > /dev/null
}