Refactor cluster/gce/trusty/node.yaml
The node.yaml has some logic that will be also used by the kubernetes master on trusty work (issue #16702). This change moves the code shared by the master and node configuration to a separate script, and the master and node configuration can source it to use the code. Moreover, this change stages the script for GKE use.
This commit is contained in:
parent
068e70dba8
commit
5ca070478e
@ -1000,7 +1000,7 @@ function kube::release::gcs::copy_release_artifacts() {
|
|||||||
# deploy hosted with the release is useful for GKE.
|
# deploy hosted with the release is useful for GKE.
|
||||||
kube::release::gcs::stage_and_hash "${RELEASE_STAGE}/full/kubernetes/cluster/gce/configure-vm.sh" extra/gce || return 1
|
kube::release::gcs::stage_and_hash "${RELEASE_STAGE}/full/kubernetes/cluster/gce/configure-vm.sh" extra/gce || return 1
|
||||||
kube::release::gcs::stage_and_hash "${RELEASE_STAGE}/full/kubernetes/cluster/gce/trusty/node.yaml" extra/gce || return 1
|
kube::release::gcs::stage_and_hash "${RELEASE_STAGE}/full/kubernetes/cluster/gce/trusty/node.yaml" extra/gce || return 1
|
||||||
|
kube::release::gcs::stage_and_hash "${RELEASE_STAGE}/full/kubernetes/cluster/gce/trusty/configure.sh" extra/gce || return 1
|
||||||
|
|
||||||
# Upload the "naked" binaries to GCS. This is useful for install scripts that
|
# Upload the "naked" binaries to GCS. This is useful for install scripts that
|
||||||
# download the binaries directly and don't need tars.
|
# download the binaries directly and don't need tars.
|
||||||
|
221
cluster/gce/trusty/configure.sh
Normal file
221
cluster/gce/trusty/configure.sh
Normal file
@ -0,0 +1,221 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# This script contains functions for configuring instances to run kubernetes
|
||||||
|
# nodes. It is uploaded to GCE metadata server when a VM instance is created,
|
||||||
|
# and then downloaded by the instance. The upstart jobs in
|
||||||
|
# cluster/gce/trusty/node.yaml source this script to make use of needed
|
||||||
|
# functions. The script itself is not supposed to be executed in other manners.
|
||||||
|
|
||||||
|
config_hostname() {
|
||||||
|
# Set the hostname to the short version.
|
||||||
|
short_hostname=$(hostname -s)
|
||||||
|
hostname $short_hostname
|
||||||
|
}
|
||||||
|
|
||||||
|
config_ip_firewall() {
|
||||||
|
# We have seen that GCE image may have strict host firewall rules which drop
|
||||||
|
# most inbound/forwarded packets. In such a case, add rules to accept all
|
||||||
|
# TCP/UDP packets.
|
||||||
|
if iptables -L INPUT | grep "Chain INPUT (policy DROP)" > /dev/null; then
|
||||||
|
echo "Add rules to accpet all inbound TCP/UDP packets"
|
||||||
|
iptables -A INPUT -w -p TCP -j ACCEPT
|
||||||
|
iptables -A INPUT -w -p UDP -j ACCEPT
|
||||||
|
fi
|
||||||
|
if iptables -L FORWARD | grep "Chain FORWARD (policy DROP)" > /dev/null; then
|
||||||
|
echo "Add rules to accpet all forwarded TCP/UDP packets"
|
||||||
|
iptables -A FORWARD -w -p TCP -j ACCEPT
|
||||||
|
iptables -A FORWARD -w -p UDP -j ACCEPT
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
create_dirs() {
|
||||||
|
# Create required directories.
|
||||||
|
mkdir -p /var/lib/kubelet
|
||||||
|
mkdir -p /var/lib/kube-proxy
|
||||||
|
mkdir -p /etc/kubernetes/manifests
|
||||||
|
}
|
||||||
|
|
||||||
|
download_kube_env() {
|
||||||
|
# Fetch kube-env from GCE metadata server.
|
||||||
|
curl --fail --silent --show-error \
|
||||||
|
-H "X-Google-Metadata-Request: True" \
|
||||||
|
-o /tmp/kube-env-yaml \
|
||||||
|
http://metadata.google.internal/computeMetadata/v1/instance/attributes/kube-env
|
||||||
|
# Convert the yaml format file into a shell-style file.
|
||||||
|
eval $(python -c '''
|
||||||
|
import pipes,sys,yaml
|
||||||
|
for k,v in yaml.load(sys.stdin).iteritems():
|
||||||
|
print "readonly {var}={value}".format(var = k, value = pipes.quote(str(v)))
|
||||||
|
''' < /tmp/kube-env-yaml > /etc/kube-env)
|
||||||
|
}
|
||||||
|
|
||||||
|
create_kubelet_kubeconfig() {
|
||||||
|
# Create the kubelet kubeconfig file.
|
||||||
|
. /etc/kube-env
|
||||||
|
if [ -z "${KUBELET_CA_CERT:-}" ]; then
|
||||||
|
KUBELET_CA_CERT="${CA_CERT}"
|
||||||
|
fi
|
||||||
|
cat > /var/lib/kubelet/kubeconfig << EOF
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Config
|
||||||
|
users:
|
||||||
|
- name: kubelet
|
||||||
|
user:
|
||||||
|
client-certificate-data: ${KUBELET_CERT}
|
||||||
|
client-key-data: ${KUBELET_KEY}
|
||||||
|
clusters:
|
||||||
|
- name: local
|
||||||
|
cluster:
|
||||||
|
certificate-authority-data: ${KUBELET_CA_CERT}
|
||||||
|
contexts:
|
||||||
|
- context:
|
||||||
|
cluster: local
|
||||||
|
user: kubelet
|
||||||
|
name: service-account-context
|
||||||
|
current-context: service-account-context
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
create_kubeproxy_kubeconfig() {
|
||||||
|
# Create the kube-proxy config file.
|
||||||
|
cat > /var/lib/kube-proxy/kubeconfig << EOF
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Config
|
||||||
|
users:
|
||||||
|
- name: kube-proxy
|
||||||
|
user:
|
||||||
|
token: ${KUBE_PROXY_TOKEN}
|
||||||
|
clusters:
|
||||||
|
- name: local
|
||||||
|
cluster:
|
||||||
|
certificate-authority-data: ${CA_CERT}
|
||||||
|
contexts:
|
||||||
|
- context:
|
||||||
|
cluster: local
|
||||||
|
user: kube-proxy
|
||||||
|
name: service-account-context
|
||||||
|
current-context: service-account-context
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
# Installs the critical packages that are required by spinning up a cluster.
|
||||||
|
install_critical_packages() {
|
||||||
|
apt-get update
|
||||||
|
# Install docker and brctl if they are not in the image.
|
||||||
|
if ! which docker > /dev/null; then
|
||||||
|
echo "Do not find docker. Install it."
|
||||||
|
# We should install the latest qualified docker, which is version 1.8.3 at present.
|
||||||
|
curl -sSL https://get.docker.com/ | DOCKER_VERSION=1.8.3 sh
|
||||||
|
fi
|
||||||
|
if ! which brctl > /dev/null; then
|
||||||
|
echo "Do not find brctl. Install it."
|
||||||
|
apt-get install --yes bridge-utils
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Install the packages that are useful but not required by spinning up a cluster.
|
||||||
|
install_additional_packages() {
|
||||||
|
# Socat and nsenter are not required for spinning up a cluster. We move the
|
||||||
|
# installation here to be in parallel with the cluster creation.
|
||||||
|
if ! which socat > /dev/null; then
|
||||||
|
echo "Do not find socat. Install it."
|
||||||
|
apt-get install --yes socat
|
||||||
|
fi
|
||||||
|
if ! which nsenter > /dev/null; then
|
||||||
|
echo "Do not find nsenter. Install it."
|
||||||
|
# Note: this is an easy way to install nsenter, but may not be the fastest
|
||||||
|
# way. In addition, this may not be a trusted source. So, replace it if
|
||||||
|
# we have a better solution.
|
||||||
|
docker run --rm -v /usr/local/bin:/target jpetazzo/nsenter
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Downloads kubernetes binaries and salt tarball, unpacks them, and places them
|
||||||
|
# to suitable directories.
|
||||||
|
install_kube_binary_config() {
|
||||||
|
. /etc/kube-env
|
||||||
|
# For a testing cluster, we pull kubelet, kube-proxy, and kubectl binaries,
|
||||||
|
# and place them in /usr/local/bin. For a non-test cluster, we use the binaries
|
||||||
|
# pre-installed in the image, or pull and place them in /usr/bin if they are
|
||||||
|
# not pre-installed.
|
||||||
|
BINARY_PATH="/usr/bin/"
|
||||||
|
if [ "${TEST_CLUSTER:-}" = "true" ]; then
|
||||||
|
BINARY_PATH="/usr/local/bin/"
|
||||||
|
fi
|
||||||
|
if ! which kubelet > /dev/null || ! which kube-proxy > /dev/null || [ "${TEST_CLUSTER:-}" = "true" ]; then
|
||||||
|
cd /tmp
|
||||||
|
k8s_sha1="${SERVER_BINARY_TAR_URL##*/}.sha1"
|
||||||
|
echo "Downloading k8s tar sha1 file ${k8s_sha1}"
|
||||||
|
curl -Lo "${k8s_sha1}" --connect-timeout 20 --retry 6 --retry-delay 2 "${SERVER_BINARY_TAR_URL}.sha1"
|
||||||
|
k8s_tar="${SERVER_BINARY_TAR_URL##*/}"
|
||||||
|
echo "Downloading k8s tar file ${k8s_tar}"
|
||||||
|
curl -Lo "${k8s_tar}" --connect-timeout 20 --retry 6 --retry-delay 2 "${SERVER_BINARY_TAR_URL}"
|
||||||
|
# Validate hash.
|
||||||
|
actual=$(sha1sum ${k8s_tar} | awk '{ print $1 }') || true
|
||||||
|
if [ "${actual}" != "${SERVER_BINARY_TAR_HASH}" ]; then
|
||||||
|
echo "== ${k8s_tar} corrupted, sha1 ${actual} doesn't match expected ${SERVER_BINARY_TAR_HASH} =="
|
||||||
|
else
|
||||||
|
echo "Validated ${SERVER_BINARY_TAR_URL} SHA1 = ${SERVER_BINARY_TAR_HASH}"
|
||||||
|
fi
|
||||||
|
tar xzf "/tmp/${k8s_tar}" -C /tmp/ --overwrite
|
||||||
|
cp /tmp/kubernetes/server/bin/kubelet ${BINARY_PATH}
|
||||||
|
cp /tmp/kubernetes/server/bin/kube-proxy ${BINARY_PATH}
|
||||||
|
cp /tmp/kubernetes/server/bin/kubectl ${BINARY_PATH}
|
||||||
|
rm -rf "/tmp/kubernetes"
|
||||||
|
rm "/tmp/${k8s_tar}"
|
||||||
|
rm "/tmp/${k8s_sha1}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Put saltbase configuration files in /etc/saltbase. We will use the add-on yaml files.
|
||||||
|
mkdir -p /etc/saltbase
|
||||||
|
cd /etc/saltbase
|
||||||
|
salt_sha1="${SALT_TAR_URL##*/}.sha1"
|
||||||
|
echo "Downloading Salt tar sha1 file ${salt_sha1}"
|
||||||
|
curl -Lo "${salt_sha1}" --connect-timeout 20 --retry 6 --retry-delay 2 "${SALT_TAR_URL}.sha1"
|
||||||
|
salt_tar="${SALT_TAR_URL##*/}"
|
||||||
|
echo "Downloading Salt tar file ${salt_tar}"
|
||||||
|
curl -Lo "${salt_tar}" --connect-timeout 20 --retry 6 --retry-delay 2 "${SALT_TAR_URL}"
|
||||||
|
# Validate hash.
|
||||||
|
actual=$(sha1sum ${salt_tar} | awk '{ print $1 }') || true
|
||||||
|
if [ "${actual}" != "${SALT_TAR_HASH}" ]; then
|
||||||
|
echo "== ${salt_tar} corrupted, sha1 ${actual} doesn't match expected ${SALT_TAR_HASH} =="
|
||||||
|
else
|
||||||
|
echo "Validated ${SALT_TAR_URL} SHA1 = ${SALT_TAR_HASH}"
|
||||||
|
fi
|
||||||
|
tar xzf "/etc/saltbase/${salt_tar}" -C /etc/saltbase/ --overwrite
|
||||||
|
rm "/etc/saltbase/${salt_sha1}"
|
||||||
|
rm "/etc/saltbase/${salt_tar}"
|
||||||
|
}
|
||||||
|
|
||||||
|
restart_docker_daemon() {
|
||||||
|
. /etc/kube-env
|
||||||
|
# Assemble docker deamon options
|
||||||
|
DOCKER_OPTS="-p /var/run/docker.pid --bridge=cbr0 --iptables=false --ip-masq=false"
|
||||||
|
if [ "${TEST_CLUSTER:-}" = "true" ]; then
|
||||||
|
DOCKER_OPTS="${DOCKER_OPTS} --log-level=debug"
|
||||||
|
fi
|
||||||
|
echo "DOCKER_OPTS=\"${DOCKER_OPTS} ${EXTRA_DOCKER_OPTS}\"" > /etc/default/docker
|
||||||
|
# Make sure the network interface cbr0 is created before restarting docker daemon
|
||||||
|
while ! [ -L /sys/class/net/cbr0 ]; do
|
||||||
|
echo "Sleep 1 second to wait for cbr0"
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
initctl restart docker
|
||||||
|
# Remove docker0
|
||||||
|
ifconfig docker0 down
|
||||||
|
brctl delbr docker0
|
||||||
|
}
|
@ -32,5 +32,6 @@ function create-node-instance-template {
|
|||||||
local template_name="$1"
|
local template_name="$1"
|
||||||
create-node-template "$template_name" "${scope_flags[*]}" \
|
create-node-template "$template_name" "${scope_flags[*]}" \
|
||||||
"kube-env=${KUBE_TEMP}/node-kube-env.yaml" \
|
"kube-env=${KUBE_TEMP}/node-kube-env.yaml" \
|
||||||
"user-data=${KUBE_ROOT}/cluster/gce/trusty/node.yaml"
|
"user-data=${KUBE_ROOT}/cluster/gce/trusty/node.yaml" \
|
||||||
|
"configure-sh=${KUBE_ROOT}/cluster/gce/trusty/configure.sh"
|
||||||
}
|
}
|
||||||
|
@ -18,84 +18,25 @@ script
|
|||||||
set -o errexit
|
set -o errexit
|
||||||
set -o nounset
|
set -o nounset
|
||||||
|
|
||||||
# Set the hostname to the short version.
|
# Fetch the script for configuring the instance.
|
||||||
short_hostname=$(hostname -s)
|
|
||||||
hostname $short_hostname
|
|
||||||
|
|
||||||
# We have seen that GCE image may have strict host firewall rules which drop
|
|
||||||
# most inbound/forwarded packets. In such a case, add rules to accept all
|
|
||||||
# TCP/UDP packets.
|
|
||||||
if iptables -L INPUT | grep "Chain INPUT (policy DROP)" > /dev/null; then
|
|
||||||
echo "Add rules to accpet all inbound TCP/UDP packets"
|
|
||||||
iptables -A INPUT -w -p TCP -j ACCEPT
|
|
||||||
iptables -A INPUT -w -p UDP -j ACCEPT
|
|
||||||
fi
|
|
||||||
if iptables -L FORWARD | grep "Chain FORWARD (policy DROP)" > /dev/null; then
|
|
||||||
echo "Add rules to accpet all forwarded TCP/UDP packets"
|
|
||||||
iptables -A FORWARD -w -p TCP -j ACCEPT
|
|
||||||
iptables -A FORWARD -w -p UDP -j ACCEPT
|
|
||||||
fi
|
|
||||||
# Create required directories.
|
|
||||||
mkdir -p /var/lib/kubelet
|
|
||||||
mkdir -p /var/lib/kube-proxy
|
|
||||||
mkdir -p /etc/kubernetes/manifests
|
|
||||||
|
|
||||||
# Fetch kube-env from GCE metadata server.
|
|
||||||
curl --fail --silent --show-error \
|
curl --fail --silent --show-error \
|
||||||
-H "X-Google-Metadata-Request: True" \
|
-H "X-Google-Metadata-Request: True" \
|
||||||
-o /tmp/kube-env-yaml \
|
-o /etc/kube-configure.sh \
|
||||||
http://metadata.google.internal/computeMetadata/v1/instance/attributes/kube-env
|
http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh
|
||||||
# Convert the yaml format file into a shell-style file.
|
. /etc/kube-configure.sh
|
||||||
eval $(python -c '''
|
|
||||||
import pipes,sys,yaml
|
|
||||||
for k,v in yaml.load(sys.stdin).iteritems():
|
|
||||||
print "readonly {var}={value}".format(var = k, value = pipes.quote(str(v)))
|
|
||||||
''' < /tmp/kube-env-yaml > /etc/kube-env)
|
|
||||||
|
|
||||||
#Create the kubelet kubeconfig file.
|
echo "Configuring hostname"
|
||||||
. /etc/kube-env
|
config_hostname
|
||||||
if [ -z "${KUBELET_CA_CERT:-}" ]; then
|
echo "Configuring IP firewall rules"
|
||||||
KUBELET_CA_CERT="${CA_CERT}"
|
config_ip_firewall
|
||||||
fi
|
echo "Creating required directories"
|
||||||
cat > /var/lib/kubelet/kubeconfig << EOF
|
create_dirs
|
||||||
apiVersion: v1
|
echo "Downloading kube-env file"
|
||||||
kind: Config
|
download_kube_env
|
||||||
users:
|
echo "Creating kubelet kubeconfig file"
|
||||||
- name: kubelet
|
create_kubelet_kubeconfig
|
||||||
user:
|
echo "Creating kube-proxy kubeconfig file"
|
||||||
client-certificate-data: ${KUBELET_CERT}
|
create_kubeproxy_kubeconfig
|
||||||
client-key-data: ${KUBELET_KEY}
|
|
||||||
clusters:
|
|
||||||
- name: local
|
|
||||||
cluster:
|
|
||||||
certificate-authority-data: ${KUBELET_CA_CERT}
|
|
||||||
contexts:
|
|
||||||
- context:
|
|
||||||
cluster: local
|
|
||||||
user: kubelet
|
|
||||||
name: service-account-context
|
|
||||||
current-context: service-account-context
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# Create the kube-proxy config file.
|
|
||||||
cat > /var/lib/kube-proxy/kubeconfig << EOF
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Config
|
|
||||||
users:
|
|
||||||
- name: kube-proxy
|
|
||||||
user:
|
|
||||||
token: ${KUBE_PROXY_TOKEN}
|
|
||||||
clusters:
|
|
||||||
- name: local
|
|
||||||
cluster:
|
|
||||||
certificate-authority-data: ${CA_CERT}
|
|
||||||
contexts:
|
|
||||||
- context:
|
|
||||||
cluster: local
|
|
||||||
user: kube-proxy
|
|
||||||
name: service-account-context
|
|
||||||
current-context: service-account-context
|
|
||||||
EOF
|
|
||||||
end script
|
end script
|
||||||
|
|
||||||
--====================================
|
--====================================
|
||||||
@ -108,23 +49,14 @@ Content-Disposition: attachment; filename="kube-install-packages.conf"
|
|||||||
|
|
||||||
description "Install packages needed to run kubernetes"
|
description "Install packages needed to run kubernetes"
|
||||||
|
|
||||||
start on cloud-config
|
start on stopped kube-env
|
||||||
|
|
||||||
script
|
script
|
||||||
set -o errexit
|
set -o errexit
|
||||||
set -o nounset
|
set -o nounset
|
||||||
|
|
||||||
apt-get update
|
. /etc/kube-configure.sh
|
||||||
# Install docker and brctl if they are not in the image.
|
install_critical_packages
|
||||||
if ! which docker > /dev/null; then
|
|
||||||
echo "Do not find docker. Install it."
|
|
||||||
# We should install the latest qualified docker, which is version 1.8.3 at present.
|
|
||||||
curl -sSL https://get.docker.com/ | DOCKER_VERSION=1.8.3 sh
|
|
||||||
fi
|
|
||||||
if ! which brctl > /dev/null; then
|
|
||||||
echo "Do not find brctl. Install it."
|
|
||||||
apt-get install --yes bridge-utils
|
|
||||||
fi
|
|
||||||
end script
|
end script
|
||||||
|
|
||||||
--====================================
|
--====================================
|
||||||
@ -143,19 +75,8 @@ script
|
|||||||
set -o errexit
|
set -o errexit
|
||||||
set -o nounset
|
set -o nounset
|
||||||
|
|
||||||
# Socat and nsenter are not required for spinning up a cluster. We move the
|
. /etc/kube-configure.sh
|
||||||
# installation here to be in parallel with the cluster creation.
|
install_additional_packages
|
||||||
if ! which socat > /dev/null; then
|
|
||||||
echo "Do not find socat. Install it."
|
|
||||||
apt-get install --yes socat
|
|
||||||
fi
|
|
||||||
if ! which nsenter > /dev/null; then
|
|
||||||
echo "Do not find nsenter. Install it."
|
|
||||||
# Note: this is an easy way to install nsenter, but may not be the fastest way.
|
|
||||||
# In addition, this may not be a trusted source. So, replace it if we have a
|
|
||||||
# better solution.
|
|
||||||
docker run --rm -v /usr/local/bin:/target jpetazzo/nsenter
|
|
||||||
fi
|
|
||||||
end script
|
end script
|
||||||
|
|
||||||
--====================================
|
--====================================
|
||||||
@ -174,56 +95,8 @@ script
|
|||||||
set -o errexit
|
set -o errexit
|
||||||
set -o nounset
|
set -o nounset
|
||||||
|
|
||||||
. /etc/kube-env
|
. /etc/kube-configure.sh
|
||||||
# For a testing cluster, we pull kubelet and kube-proxy binaries, and place them
|
install_kube_binary_config
|
||||||
# in /usr/local/bin. For a non-test cluster, we use the binaries pre-installed
|
|
||||||
# in the image, or pull and place them in /usr/bin if they are not pre-installed.
|
|
||||||
BINARY_PATH="/usr/bin/"
|
|
||||||
if [ "${TEST_CLUSTER:-}" = "true" ]; then
|
|
||||||
BINARY_PATH="/usr/local/bin/"
|
|
||||||
fi
|
|
||||||
if ! which kubelet > /dev/null || ! which kube-proxy > /dev/null || [ "${TEST_CLUSTER:-}" = "true" ]; then
|
|
||||||
cd /tmp
|
|
||||||
k8s_sha1="${SERVER_BINARY_TAR_URL##*/}.sha1"
|
|
||||||
echo "Downloading k8s tar sha1 file ${k8s_sha1}"
|
|
||||||
curl -Lo "${k8s_sha1}" --connect-timeout 20 --retry 6 --retry-delay 2 "${SERVER_BINARY_TAR_URL}.sha1"
|
|
||||||
k8s_tar="${SERVER_BINARY_TAR_URL##*/}"
|
|
||||||
echo "Downloading k8s tar file ${k8s_tar}"
|
|
||||||
curl -Lo "${k8s_tar}" --connect-timeout 20 --retry 6 --retry-delay 2 "${SERVER_BINARY_TAR_URL}"
|
|
||||||
# Validate hash.
|
|
||||||
actual=$(sha1sum ${k8s_tar} | awk '{ print $1 }') || true
|
|
||||||
if [ "${actual}" != "${SERVER_BINARY_TAR_HASH}" ]; then
|
|
||||||
echo "== ${k8s_tar} corrupted, sha1 ${actual} doesn't match expected ${SERVER_BINARY_TAR_HASH} =="
|
|
||||||
else
|
|
||||||
echo "Validated ${SERVER_BINARY_TAR_URL} SHA1 = ${SERVER_BINARY_TAR_HASH}"
|
|
||||||
fi
|
|
||||||
tar xzf "/tmp/${k8s_tar}" -C /tmp/ --overwrite
|
|
||||||
cp /tmp/kubernetes/server/bin/kubelet ${BINARY_PATH}
|
|
||||||
cp /tmp/kubernetes/server/bin/kube-proxy ${BINARY_PATH}
|
|
||||||
rm -rf "/tmp/kubernetes"
|
|
||||||
rm "/tmp/${k8s_tar}"
|
|
||||||
rm "/tmp/${k8s_sha1}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Put saltbase configuration files in /etc/saltbase. We will use the add-on yaml files.
|
|
||||||
mkdir -p /etc/saltbase
|
|
||||||
cd /etc/saltbase
|
|
||||||
salt_sha1="${SALT_TAR_URL##*/}.sha1"
|
|
||||||
echo "Downloading Salt tar sha1 file ${salt_sha1}"
|
|
||||||
curl -Lo "${salt_sha1}" --connect-timeout 20 --retry 6 --retry-delay 2 "${SALT_TAR_URL}.sha1"
|
|
||||||
salt_tar="${SALT_TAR_URL##*/}"
|
|
||||||
echo "Downloading Salt tar file ${salt_tar}"
|
|
||||||
curl -Lo "${salt_tar}" --connect-timeout 20 --retry 6 --retry-delay 2 "${SALT_TAR_URL}"
|
|
||||||
# Validate hash.
|
|
||||||
actual=$(sha1sum ${salt_tar} | awk '{ print $1 }') || true
|
|
||||||
if [ "${actual}" != "${SALT_TAR_HASH}" ]; then
|
|
||||||
echo "== ${salt_tar} corrupted, sha1 ${actual} doesn't match expected ${SALT_TAR_HASH} =="
|
|
||||||
else
|
|
||||||
echo "Validated ${SALT_TAR_URL} SHA1 = ${SALT_TAR_HASH}"
|
|
||||||
fi
|
|
||||||
tar xzf "/etc/saltbase/${salt_tar}" -C /etc/saltbase/ --overwrite
|
|
||||||
rm "/etc/saltbase/${salt_sha1}"
|
|
||||||
rm "/etc/saltbase/${salt_tar}"
|
|
||||||
end script
|
end script
|
||||||
|
|
||||||
--====================================
|
--====================================
|
||||||
@ -325,22 +198,8 @@ script
|
|||||||
set -o errexit
|
set -o errexit
|
||||||
set -o nounset
|
set -o nounset
|
||||||
|
|
||||||
. /etc/kube-env
|
. /etc/kube-configure.sh
|
||||||
# Assemble docker deamon options
|
restart_docker_daemon
|
||||||
DOCKER_OPTS="-p /var/run/docker.pid --bridge=cbr0 --iptables=false --ip-masq=false"
|
|
||||||
if [ "${TEST_CLUSTER:-}" = "true" ]; then
|
|
||||||
DOCKER_OPTS="${DOCKER_OPTS} --log-level=debug"
|
|
||||||
fi
|
|
||||||
echo "DOCKER_OPTS=\"${DOCKER_OPTS} ${EXTRA_DOCKER_OPTS}\"" > /etc/default/docker
|
|
||||||
# Make sure the network interface cbr0 is created before restarting docker daemon
|
|
||||||
while ! [ -L /sys/class/net/cbr0 ]; do
|
|
||||||
echo "Sleep 1 second to wait for cbr0"
|
|
||||||
sleep 1
|
|
||||||
done
|
|
||||||
initctl restart docker
|
|
||||||
# Remove docker0
|
|
||||||
ifconfig docker0 down
|
|
||||||
brctl delbr docker0
|
|
||||||
end script
|
end script
|
||||||
|
|
||||||
--====================================
|
--====================================
|
||||||
@ -416,4 +275,3 @@ script
|
|||||||
end script
|
end script
|
||||||
|
|
||||||
--====================================--
|
--====================================--
|
||||||
|
|
||||||
|
@ -351,6 +351,7 @@ function get-template-name-from-version {
|
|||||||
# $2: The scopes flag.
|
# $2: The scopes flag.
|
||||||
# $3: The minion start script metadata from file.
|
# $3: The minion start script metadata from file.
|
||||||
# $4: The kube-env metadata.
|
# $4: The kube-env metadata.
|
||||||
|
# $5(optional): Additional metadata for Ubuntu trusty nodes.
|
||||||
function create-node-template {
|
function create-node-template {
|
||||||
detect-project
|
detect-project
|
||||||
local template_name="$1"
|
local template_name="$1"
|
||||||
@ -386,7 +387,7 @@ function create-node-template {
|
|||||||
${preemptible_minions} \
|
${preemptible_minions} \
|
||||||
$2 \
|
$2 \
|
||||||
--can-ip-forward \
|
--can-ip-forward \
|
||||||
--metadata-from-file "$3","$4" >&2; then
|
--metadata-from-file $(echo ${@:3} | tr ' ' ',') >&2; then
|
||||||
if (( attempt > 5 )); then
|
if (( attempt > 5 )); then
|
||||||
echo -e "${color_red}Failed to create instance template $template_name ${color_norm}" >&2
|
echo -e "${color_red}Failed to create instance template $template_name ${color_norm}" >&2
|
||||||
exit 2
|
exit 2
|
||||||
|
Loading…
Reference in New Issue
Block a user