commit
dc81fe1f12
36
cluster/rackspace/authorization.sh
Normal file
36
cluster/rackspace/authorization.sh
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# Create generic token following GCE standard
|
||||||
|
create_token() {
|
||||||
|
echo $(cat /dev/urandom | base64 | tr -d "=+/" | dd bs=32 count=1 2> /dev/null)
|
||||||
|
}
|
||||||
|
|
||||||
|
get_token_from_csv() {
|
||||||
|
KUBE_BEARER_TOKEN=$(awk -F, '/admin/ {print $1}' ${KUBE_TEMP}/${1}_tokens.csv)
|
||||||
|
KUBELET_TOKEN=$(awk -F, '/kubelet/ {print $1}' ${KUBE_TEMP}/${1}_tokens.csv)
|
||||||
|
KUBE_PROXY_TOKEN=$(awk -F, '/kube_proxy/ {print $1}' ${KUBE_TEMP}/${1}_tokens.csv)
|
||||||
|
}
|
||||||
|
|
||||||
|
generate_admin_token() {
|
||||||
|
echo "$(create_token),admin,admin" >> ${KUBE_TEMP}/${1}_tokens.csv
|
||||||
|
}
|
||||||
|
|
||||||
|
# Creates a csv file each time called (i.e one per kubelet).
|
||||||
|
generate_kubelet_tokens() {
|
||||||
|
echo "$(create_token),kubelet,kubelet" > ${KUBE_TEMP}/${1}_tokens.csv
|
||||||
|
echo "$(create_token),kube_proxy,kube_proxy" >> ${KUBE_TEMP}/${1}_tokens.csv
|
||||||
|
}
|
@ -23,12 +23,19 @@ write_files:
|
|||||||
permissions: 0755
|
permissions: 0755
|
||||||
content: |
|
content: |
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
# This temp URL is only good for the length of time specified at cluster creation time.
|
||||||
|
# Afterward, it will result in a 403.
|
||||||
OBJECT_URL="CLOUD_FILES_URL"
|
OBJECT_URL="CLOUD_FILES_URL"
|
||||||
echo "Downloading release ($OBJECT_URL)"
|
if [ ! -s /opt/kubernetes.tar.gz ]
|
||||||
wget "${OBJECT_URL}" -O /opt/kubernetes.tar.gz
|
then
|
||||||
echo "Unpacking release"
|
echo "Downloading release ($OBJECT_URL)"
|
||||||
rm -rf /opt/kubernetes || false
|
wget "${OBJECT_URL}" -O /opt/kubernetes.tar.gz
|
||||||
tar xzf /opt/kubernetes.tar.gz -C /opt/
|
echo "Unpacking release"
|
||||||
|
rm -rf /opt/kubernetes || false
|
||||||
|
tar xzf /opt/kubernetes.tar.gz -C /opt/
|
||||||
|
else
|
||||||
|
echo "kubernetes release found. Skipping download."
|
||||||
|
fi
|
||||||
- path: /opt/.kubernetes_auth
|
- path: /opt/.kubernetes_auth
|
||||||
permissions: 0600
|
permissions: 0600
|
||||||
content: |
|
content: |
|
||||||
@ -65,7 +72,7 @@ coreos:
|
|||||||
Type=oneshot
|
Type=oneshot
|
||||||
RemainAfterExit=yes
|
RemainAfterExit=yes
|
||||||
ExecStart=/usr/bin/bash /opt/bin/download-release.sh
|
ExecStart=/usr/bin/bash /opt/bin/download-release.sh
|
||||||
- name: master-apiserver.service
|
- name: kube-apiserver.service
|
||||||
command: start
|
command: start
|
||||||
content: |
|
content: |
|
||||||
[Unit]
|
[Unit]
|
||||||
@ -77,10 +84,33 @@ coreos:
|
|||||||
Requires=download-release.service
|
Requires=download-release.service
|
||||||
[Service]
|
[Service]
|
||||||
ExecStartPre=/usr/bin/ln -sf /opt/kubernetes/server/bin/kube-apiserver /opt/bin/kube-apiserver
|
ExecStartPre=/usr/bin/ln -sf /opt/kubernetes/server/bin/kube-apiserver /opt/bin/kube-apiserver
|
||||||
ExecStart=/opt/bin/kube-apiserver --address=127.0.0.1 --port=8080 --etcd_servers=http://127.0.0.1:4001 --portal_net=PORTAL_NET --logtostderr=true --cloud_provider=rackspace --cloud_config=/etc/cloud.conf --v=2
|
ExecStartPre=/usr/bin/mkdir -p /var/lib/kube-apiserver
|
||||||
|
ExecStartPre=/usr/bin/cp /media/configdrive/openstack/content/0000 /var/lib/kube-apiserver/known_tokens.csv
|
||||||
|
ExecStart=/opt/bin/kube-apiserver \
|
||||||
|
--address=127.0.0.1 \
|
||||||
|
--cloud_provider=rackspace \
|
||||||
|
--cloud_config=/etc/cloud.conf \
|
||||||
|
--etcd_servers=http://127.0.0.1:4001 \
|
||||||
|
--logtostderr=true \
|
||||||
|
--port=8080 \
|
||||||
|
--portal_net=PORTAL_NET \
|
||||||
|
--token-auth-file=/var/lib/kube-apiserver/known_tokens.csv \
|
||||||
|
--v=2
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=2
|
RestartSec=5
|
||||||
- name: master-controller-manager.service
|
- name: apiserver-advertiser.service
|
||||||
|
command: start
|
||||||
|
content: |
|
||||||
|
[Unit]
|
||||||
|
Description=Kubernetes Apiserver Advertiser
|
||||||
|
After=etcd.service
|
||||||
|
Requires=etcd.service
|
||||||
|
After=master-apiserver.service
|
||||||
|
[Service]
|
||||||
|
ExecStart=/bin/sh -c 'etcdctl set /corekube/apiservers/$public_ipv4 $public_ipv4'
|
||||||
|
Restart=always
|
||||||
|
RestartSec=120
|
||||||
|
- name: kube-controller-manager.service
|
||||||
command: start
|
command: start
|
||||||
content: |
|
content: |
|
||||||
[Unit]
|
[Unit]
|
||||||
@ -88,14 +118,19 @@ coreos:
|
|||||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||||
After=network-online.target
|
After=network-online.target
|
||||||
Requires=network-online.target
|
Requires=network-online.target
|
||||||
After=master-apiserver.service
|
After=kube-apiserver.service
|
||||||
Requires=master-apiserver.service
|
Requires=kube-apiserver.service
|
||||||
[Service]
|
[Service]
|
||||||
ExecStartPre=/usr/bin/ln -sf /opt/kubernetes/server/bin/kube-controller-manager /opt/bin/kube-controller-manager
|
ExecStartPre=/usr/bin/ln -sf /opt/kubernetes/server/bin/kube-controller-manager /opt/bin/kube-controller-manager
|
||||||
ExecStart=/opt/bin/kube-controller-manager --master=127.0.0.1:8080 --logtostderr=true --cloud_provider=rackspace --cloud_config=/etc/cloud.conf --v=2
|
ExecStart=/opt/bin/kube-controller-manager \
|
||||||
|
--cloud_provider=rackspace \
|
||||||
|
--cloud_config=/etc/cloud.conf \
|
||||||
|
--logtostderr=true \
|
||||||
|
--master=127.0.0.1:8080 \
|
||||||
|
--v=2
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=2
|
RestartSec=5
|
||||||
- name: master-scheduler.service
|
- name: kube-scheduler.service
|
||||||
command: start
|
command: start
|
||||||
content: |
|
content: |
|
||||||
[Unit]
|
[Unit]
|
||||||
@ -103,27 +138,33 @@ coreos:
|
|||||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||||
After=network-online.target
|
After=network-online.target
|
||||||
Requires=network-online.target
|
Requires=network-online.target
|
||||||
After=master-apiserver.service
|
After=kube-apiserver.service
|
||||||
Requires=master-apiserver.service
|
Requires=kube-apiserver.service
|
||||||
[Service]
|
[Service]
|
||||||
ExecStartPre=/usr/bin/ln -sf /opt/kubernetes/server/bin/kube-scheduler /opt/bin/kube-scheduler
|
ExecStartPre=/usr/bin/ln -sf /opt/kubernetes/server/bin/kube-scheduler /opt/bin/kube-scheduler
|
||||||
ExecStart=/opt/bin/kube-scheduler --master=127.0.0.1:8080 --logtostderr=true
|
ExecStart=/opt/bin/kube-scheduler \
|
||||||
|
--logtostderr=true \
|
||||||
|
--master=127.0.0.1:8080
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=10
|
RestartSec=5
|
||||||
- name: master-register.service
|
- name: kube-register.service
|
||||||
command: start
|
command: start
|
||||||
content: |
|
content: |
|
||||||
[Unit]
|
[Unit]
|
||||||
Description=Kubernetes Registration Service
|
Description=Kubernetes Registration Service
|
||||||
Documentation=https://github.com/kelseyhightower/kube-register
|
Documentation=https://github.com/kelseyhightower/kube-register
|
||||||
|
Requires=kube-apiserver.service
|
||||||
|
After=kube-apiserver.service
|
||||||
|
Requires=fleet.service
|
||||||
|
After=fleet.service
|
||||||
[Service]
|
[Service]
|
||||||
ExecStartPre=/usr/bin/wget -N -P /opt/bin https://storage.googleapis.com/kubernetes/kube-register
|
ExecStartPre=/usr/bin/curl -L -o /opt/bin/kube-register -z /opt/bin/kube-register https://github.com/kelseyhightower/kube-register/releases/download/v0.0.3/kube-register-0.0.3-linux-amd64
|
||||||
ExecStartPre=/usr/bin/chmod +x /opt/bin/kube-register
|
ExecStartPre=/usr/bin/chmod +x /opt/bin/kube-register
|
||||||
ExecStart=/opt/bin/kube-register \
|
ExecStart=/opt/bin/kube-register \
|
||||||
--metadata=kubernetes_role=minion \
|
--api-endpoint=http://127.0.0.1:8080 \
|
||||||
--fleet-endpoint=unix:///var/run/fleet.sock \
|
--fleet-endpoint=unix:///var/run/fleet.sock \
|
||||||
--api-endpoint=http://127.0.0.1:8080
|
--healthz-port=10248 \
|
||||||
|
--metadata=kubernetes_role=minion
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=10
|
RestartSec=10
|
||||||
#Running nginx service with --net="host" is a necessary evil until running all k8s services in docker.
|
#Running nginx service with --net="host" is a necessary evil until running all k8s services in docker.
|
||||||
|
@ -1,27 +1,62 @@
|
|||||||
#cloud-config
|
#cloud-config
|
||||||
|
|
||||||
write_files:
|
write_files:
|
||||||
- path: /opt/bin/kube-net-update.sh
|
- path: /opt/bin/regen-apiserver-list.sh
|
||||||
permissions: 0755
|
permissions: 0755
|
||||||
content: |
|
content: |
|
||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
set -x -e
|
m=$(echo $(etcdctl ls --recursive /corekube/apiservers | cut -d/ -f4 | sort) | tr ' ' ,)
|
||||||
nh=${ETCD_WATCH_KEY##*/}
|
mkdir -p /run/kubelet
|
||||||
net=$ETCD_WATCH_VALUE
|
echo "APISERVER_IPS=$m" > /run/kubelet/apiservers.env
|
||||||
case $ETCD_WATCH_ACTION in
|
echo "FIRST_APISERVER_URL=https://${m%%\,*}:6443" >> /run/kubelet/apiservers.env
|
||||||
set) ip route replace $net via $nh dev eth2 metric 900 ;;
|
|
||||||
expire) ip route del $net via $nh metric 900 ;;
|
|
||||||
esac
|
|
||||||
- path: /opt/bin/download-release.sh
|
- path: /opt/bin/download-release.sh
|
||||||
permissions: 0755
|
permissions: 0755
|
||||||
content: |
|
content: |
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
# This temp URL is only good for the length of time specified at cluster creation time.
|
||||||
|
# Afterward, it will result in a 403.
|
||||||
OBJECT_URL="CLOUD_FILES_URL"
|
OBJECT_URL="CLOUD_FILES_URL"
|
||||||
echo "Downloading release ($OBJECT_URL)"
|
if [ ! -s /opt/kubernetes.tar.gz ]
|
||||||
wget "${OBJECT_URL}" -O /opt/kubernetes.tar.gz
|
then
|
||||||
echo "Unpacking release"
|
echo "Downloading release ($OBJECT_URL)"
|
||||||
rm -rf /opt/kubernetes || false
|
wget "${OBJECT_URL}" -O /opt/kubernetes.tar.gz
|
||||||
tar xzf /opt/kubernetes.tar.gz -C /opt/
|
echo "Unpacking release"
|
||||||
|
rm -rf /opt/kubernetes || false
|
||||||
|
tar xzf /opt/kubernetes.tar.gz -C /opt/
|
||||||
|
else
|
||||||
|
echo "kubernetes release found. Skipping download."
|
||||||
|
fi
|
||||||
|
- path: /run/setup-auth.sh
|
||||||
|
permissions: 0755
|
||||||
|
content: |
|
||||||
|
#!/bin/bash -e
|
||||||
|
set -x
|
||||||
|
/usr/bin/mkdir -p /var/lib/kubelet
|
||||||
|
/bin/echo "{\"BearerToken\": \"KUBELET_TOKEN\", \"Insecure\": true }" > /var/lib/kubelet/kubernetes_auth
|
||||||
|
- path: /run/config-kube-proxy.sh
|
||||||
|
permissions: 0755
|
||||||
|
content: |
|
||||||
|
#!/bin/bash -e
|
||||||
|
set -x
|
||||||
|
/usr/bin/mkdir -p /var/lib/kube-proxy
|
||||||
|
cat > /var/lib/kube-proxy/kubeconfig << EOF
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Config
|
||||||
|
users:
|
||||||
|
- name: kube-proxy
|
||||||
|
user:
|
||||||
|
token: KUBE_PROXY_TOKEN
|
||||||
|
clusters:
|
||||||
|
- name: local
|
||||||
|
cluster:
|
||||||
|
insecure-skip-tls-verify: true
|
||||||
|
contexts:
|
||||||
|
- context:
|
||||||
|
cluster: local
|
||||||
|
user: kube-proxy
|
||||||
|
name: service-account-context
|
||||||
|
current-context: service-account-context
|
||||||
|
EOF
|
||||||
|
|
||||||
coreos:
|
coreos:
|
||||||
etcd:
|
etcd:
|
||||||
@ -43,6 +78,31 @@ coreos:
|
|||||||
command: start
|
command: start
|
||||||
- name: fleet.service
|
- name: fleet.service
|
||||||
command: start
|
command: start
|
||||||
|
- name: flanneld.service
|
||||||
|
command: start
|
||||||
|
drop-ins:
|
||||||
|
- name: 50-network-config.conf
|
||||||
|
content: |
|
||||||
|
[Service]
|
||||||
|
ExecStartPre=/usr/bin/etcdctl set /coreos.com/network/config '{ "Network": "KUBE_NETWORK", "Backend": {"Type": "host-gw"}}'
|
||||||
|
ExecStart=
|
||||||
|
ExecStart=/usr/libexec/sdnotify-proxy /run/flannel/sd.sock \
|
||||||
|
/usr/bin/docker run --net=host --privileged=true --rm \
|
||||||
|
--volume=/run/flannel:/run/flannel \
|
||||||
|
--env=NOTIFY_SOCKET=/run/flannel/sd.sock \
|
||||||
|
--env-file=/run/flannel/options.env \
|
||||||
|
--volume=${ETCD_SSL_DIR}:/etc/ssl/etcd:ro \
|
||||||
|
quay.io/coreos/flannel:${FLANNEL_VER} /opt/bin/flanneld -etcd-endpoints http://127.0.0.1:4001 --ip-masq=true --iface=eth2
|
||||||
|
- name: docker.service
|
||||||
|
command: start
|
||||||
|
drop-ins:
|
||||||
|
- name: 51-docker-mirror.conf
|
||||||
|
content: |
|
||||||
|
[Unit]
|
||||||
|
# making sure that flanneld finished startup, otherwise containers
|
||||||
|
# won't land in flannel's network...
|
||||||
|
Requires=flanneld.service
|
||||||
|
After=flanneld.service
|
||||||
- name: download-release.service
|
- name: download-release.service
|
||||||
command: start
|
command: start
|
||||||
content: |
|
content: |
|
||||||
@ -54,7 +114,7 @@ coreos:
|
|||||||
Type=oneshot
|
Type=oneshot
|
||||||
RemainAfterExit=yes
|
RemainAfterExit=yes
|
||||||
ExecStart=/usr/bin/bash /opt/bin/download-release.sh
|
ExecStart=/usr/bin/bash /opt/bin/download-release.sh
|
||||||
- name: minion-kubelet.service
|
- name: kubelet.service
|
||||||
command: start
|
command: start
|
||||||
content: |
|
content: |
|
||||||
[Unit]
|
[Unit]
|
||||||
@ -67,11 +127,21 @@ coreos:
|
|||||||
After=download-release.service
|
After=download-release.service
|
||||||
Requires=download-release.service
|
Requires=download-release.service
|
||||||
[Service]
|
[Service]
|
||||||
|
EnvironmentFile=/run/kubelet/apiservers.env
|
||||||
|
ExecStartPre=/run/setup-auth.sh
|
||||||
ExecStartPre=/usr/bin/ln -sf /opt/kubernetes/server/bin/kubelet /opt/bin/kubelet
|
ExecStartPre=/usr/bin/ln -sf /opt/kubernetes/server/bin/kubelet /opt/bin/kubelet
|
||||||
ExecStart=/opt/bin/kubelet --address=$private_ipv4 --hostname_override=$private_ipv4 --etcd_servers=http://127.0.0.1:4001 --logtostderr=true --config=/opt/kubernetes-manifests --cluster_dns=DNS_SERVER_IP --cluster_domain=DNS_DOMAIN
|
ExecStart=/opt/bin/kubelet \
|
||||||
|
--address=$private_ipv4 \
|
||||||
|
--api_servers=${FIRST_APISERVER_URL} \
|
||||||
|
--cluster_dns=DNS_SERVER_IP \
|
||||||
|
--cluster_domain=DNS_DOMAIN \
|
||||||
|
--healthz-bind-address=$private_ipv4 \
|
||||||
|
--hostname_override=$private_ipv4 \
|
||||||
|
--logtostderr=true \
|
||||||
|
--v=2
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=2
|
RestartSec=5
|
||||||
- name: minion-proxy.service
|
- name: kube-proxy.service
|
||||||
command: start
|
command: start
|
||||||
content: |
|
content: |
|
||||||
[Unit]
|
[Unit]
|
||||||
@ -84,46 +154,50 @@ coreos:
|
|||||||
After=download-release.service
|
After=download-release.service
|
||||||
Requires=download-release.service
|
Requires=download-release.service
|
||||||
[Service]
|
[Service]
|
||||||
|
EnvironmentFile=/run/kubelet/apiservers.env
|
||||||
|
ExecStartPre=/run/config-kube-proxy.sh
|
||||||
ExecStartPre=/usr/bin/ln -sf /opt/kubernetes/server/bin/kube-proxy /opt/bin/kube-proxy
|
ExecStartPre=/usr/bin/ln -sf /opt/kubernetes/server/bin/kube-proxy /opt/bin/kube-proxy
|
||||||
ExecStart=/opt/bin/kube-proxy --bind_address=$private_ipv4 --etcd_servers=http://127.0.0.1:4001 --logtostderr=true --v=2
|
ExecStart=/opt/bin/kube-proxy \
|
||||||
|
--bind_address=$private_ipv4 \
|
||||||
|
--kubeconfig=/var/lib/kube-proxy/kubeconfig \
|
||||||
|
--logtostderr=true \
|
||||||
|
--master=${FIRST_APISERVER_URL}
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=2
|
RestartSec=5
|
||||||
- name: minion-advertiser.service
|
- name: kubelet-sighup.path
|
||||||
|
command: start
|
||||||
|
content: |
|
||||||
|
[Path]
|
||||||
|
PathChanged=/run/kubelet/apiservers.env
|
||||||
|
- name: kubelet-sighup.service
|
||||||
|
command: start
|
||||||
|
content: |
|
||||||
|
[Service]
|
||||||
|
ExecStart=/usr/bin/pkill -SIGHUP -f kubelet
|
||||||
|
- name: kube-proxy-sighup.path
|
||||||
|
command: start
|
||||||
|
content: |
|
||||||
|
[Path]
|
||||||
|
PathChanged=/run/kubelet/apiservers.env
|
||||||
|
- name: kube-proxy-sighup.service
|
||||||
|
command: start
|
||||||
|
content: |
|
||||||
|
[Service]
|
||||||
|
ExecStart=/usr/bin/pkill -SIGHUP -f kube-proxy
|
||||||
|
- name: apiserver-finder.service
|
||||||
command: start
|
command: start
|
||||||
content: |
|
content: |
|
||||||
[Unit]
|
[Unit]
|
||||||
Description=Kubernetes Minion Advertiser
|
Description=Kubernetes Apiserver finder
|
||||||
|
After=network-online.target
|
||||||
|
Requires=network-online.target
|
||||||
After=etcd.service
|
After=etcd.service
|
||||||
Requires=etcd.service
|
Requires=etcd.service
|
||||||
After=minion-kubelet.service
|
|
||||||
[Service]
|
[Service]
|
||||||
ExecStart=/bin/sh -c 'while :; do etcdctl set /corekube/minions/$private_ipv4 $private_ipv4 --ttl 300; sleep 120; done'
|
ExecStartPre=/opt/bin/regen-apiserver-list.sh
|
||||||
|
ExecStart=/usr/bin/etcdctl exec-watch --recursive /corekube/apiservers -- /opt/bin/regen-apiserver-list.sh
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=120
|
RestartSec=30
|
||||||
- name: net-advertiser.service
|
|
||||||
command: start
|
|
||||||
content: |
|
|
||||||
[Unit]
|
|
||||||
Description=Kubernetes Network Advertiser
|
|
||||||
After=etcd.service
|
|
||||||
Requires=etcd.service
|
|
||||||
After=minion-kubelet.service
|
|
||||||
[Service]
|
|
||||||
ExecStart=/bin/sh -c 'eth2_ip=$$(ip -o -f inet a show dev eth2 | sed "s/.* inet \([0-9.]\+\).*/\1/"); while :; do etcdctl set /corekube/net/$$eth2_ip 10.240.INDEX.0/24 --ttl 300; sleep 120; done'
|
|
||||||
Restart=always
|
|
||||||
RestartSec=120
|
|
||||||
- name: net-router.service
|
|
||||||
command: start
|
|
||||||
content: |
|
|
||||||
[Unit]
|
|
||||||
Description=Kubernetes Network Router
|
|
||||||
After=etcd.service
|
|
||||||
Requires=etcd.service
|
|
||||||
After=minion-kubelet.service
|
|
||||||
[Service]
|
|
||||||
ExecStart=/usr/bin/etcdctl exec-watch --recursive /corekube/net -- /opt/bin/kube-net-update.sh
|
|
||||||
Restart=always
|
|
||||||
RestartSec=120
|
|
||||||
- name: cbr0.netdev
|
- name: cbr0.netdev
|
||||||
command: start
|
command: start
|
||||||
content: |
|
content: |
|
||||||
@ -149,41 +223,3 @@ coreos:
|
|||||||
ExecStart=/usr/sbin/iptables -t nat -A POSTROUTING -o eth1 -s 10.240.INDEX.0/24 -j MASQUERADE
|
ExecStart=/usr/sbin/iptables -t nat -A POSTROUTING -o eth1 -s 10.240.INDEX.0/24 -j MASQUERADE
|
||||||
RemainAfterExit=yes
|
RemainAfterExit=yes
|
||||||
Type=oneshot
|
Type=oneshot
|
||||||
- name: docker.service
|
|
||||||
command: start
|
|
||||||
content: |
|
|
||||||
[Unit]
|
|
||||||
After=network.target
|
|
||||||
Description=Docker Application Container Engine
|
|
||||||
Documentation=http://docs.docker.io
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
ExecStartPre=/bin/mount --make-rprivate /
|
|
||||||
ExecStart=/usr/bin/docker -d -H fd:// -b cbr0 --iptables=false
|
|
||||||
Restart=always
|
|
||||||
RestartSec=30
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
- name: format-data.service
|
|
||||||
command: start
|
|
||||||
content: |
|
|
||||||
[Unit]
|
|
||||||
Description=Formats data drive
|
|
||||||
[Service]
|
|
||||||
Type=oneshot
|
|
||||||
RemainAfterExit=yes
|
|
||||||
ExecStart=/usr/sbin/wipefs -f /dev/xvde1
|
|
||||||
ExecStart=/usr/sbin/mkfs.btrfs -f /dev/xvde1
|
|
||||||
- name: var-lib-docker-volumes.mount
|
|
||||||
command: start
|
|
||||||
content: |
|
|
||||||
[Unit]
|
|
||||||
Description=Mount data drive to /var/lib/docker/volumes
|
|
||||||
Requires=format-data.service
|
|
||||||
After=format-data.service
|
|
||||||
Before=docker.service
|
|
||||||
[Mount]
|
|
||||||
What=/dev/xvde1
|
|
||||||
Where=/var/lib/docker/volumes
|
|
||||||
Type=btrfs
|
|
||||||
|
@ -19,7 +19,7 @@
|
|||||||
# KUBE_IMAGE, KUBE_MASTER_FLAVOR, KUBE_MINION_FLAVOR, NUM_MINIONS, NOVA_NETWORK and SSH_KEY_NAME
|
# KUBE_IMAGE, KUBE_MASTER_FLAVOR, KUBE_MINION_FLAVOR, NUM_MINIONS, NOVA_NETWORK and SSH_KEY_NAME
|
||||||
|
|
||||||
# Shared
|
# Shared
|
||||||
KUBE_IMAGE="${KUBE_IMAGE-b63e1435-a46f-4726-b984-e3f15ae92753}" # CoreOS(Beta)
|
KUBE_IMAGE="${KUBE_IMAGE-2c210e44-5149-4ae3-83d6-f855a4d28490}" # CoreOS(Beta)
|
||||||
SSH_KEY_NAME="${SSH_KEY_NAME-id_kubernetes}"
|
SSH_KEY_NAME="${SSH_KEY_NAME-id_kubernetes}"
|
||||||
NOVA_NETWORK_LABEL="kubernetes-pool-net"
|
NOVA_NETWORK_LABEL="kubernetes-pool-net"
|
||||||
NOVA_NETWORK_CIDR="${NOVA_NETWORK-192.168.0.0/24}"
|
NOVA_NETWORK_CIDR="${NOVA_NETWORK-192.168.0.0/24}"
|
||||||
@ -35,7 +35,7 @@ KUBE_MINION_FLAVOR="${KUBE_MINION_FLAVOR-performance1-2}"
|
|||||||
RAX_NUM_MINIONS="${RAX_NUM_MINIONS-4}"
|
RAX_NUM_MINIONS="${RAX_NUM_MINIONS-4}"
|
||||||
MINION_TAG="tags=${INSTANCE_PREFIX}-minion"
|
MINION_TAG="tags=${INSTANCE_PREFIX}-minion"
|
||||||
MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${RAX_NUM_MINIONS}}))
|
MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${RAX_NUM_MINIONS}}))
|
||||||
KUBE_NETWORK=($(eval echo "10.240.{1..${RAX_NUM_MINIONS}}.0/24"))
|
KUBE_NETWORK="10.240.0.0/16"
|
||||||
PORTAL_NET="10.0.0.0/16"
|
PORTAL_NET="10.0.0.0/16"
|
||||||
|
|
||||||
# Optional: Install node monitoring.
|
# Optional: Install node monitoring.
|
||||||
|
@ -21,6 +21,7 @@
|
|||||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||||
source $(dirname ${BASH_SOURCE})/${KUBE_CONFIG_FILE-"config-default.sh"}
|
source $(dirname ${BASH_SOURCE})/${KUBE_CONFIG_FILE-"config-default.sh"}
|
||||||
source "${KUBE_ROOT}/cluster/common.sh"
|
source "${KUBE_ROOT}/cluster/common.sh"
|
||||||
|
source "${KUBE_ROOT}/cluster/rackspace/authorization.sh"
|
||||||
|
|
||||||
verify-prereqs() {
|
verify-prereqs() {
|
||||||
# Make sure that prerequisites are installed.
|
# Make sure that prerequisites are installed.
|
||||||
@ -60,7 +61,7 @@ get-password() {
|
|||||||
get-kubeconfig-basicauth
|
get-kubeconfig-basicauth
|
||||||
if [[ -z "${KUBE_USER}" || -z "${KUBE_PASSWORD}" ]]; then
|
if [[ -z "${KUBE_USER}" || -z "${KUBE_PASSWORD}" ]]; then
|
||||||
KUBE_USER=admin
|
KUBE_USER=admin
|
||||||
KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))')
|
KUBE_PASSWORD=$(python2.7 -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))')
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -106,7 +107,8 @@ find-object-url() {
|
|||||||
|
|
||||||
KUBE_TAR=${CLOUDFILES_CONTAINER}/${CONTAINER_PREFIX}/kubernetes-server-linux-amd64.tar.gz
|
KUBE_TAR=${CLOUDFILES_CONTAINER}/${CONTAINER_PREFIX}/kubernetes-server-linux-amd64.tar.gz
|
||||||
|
|
||||||
RELEASE_TMP_URL=$(swiftly -A ${OS_AUTH_URL} -U ${OS_USERNAME} -K ${OS_PASSWORD} tempurl GET ${KUBE_TAR})
|
# Create temp URL good for 24 hours
|
||||||
|
RELEASE_TMP_URL=$(swiftly -A ${OS_AUTH_URL} -U ${OS_USERNAME} -K ${OS_PASSWORD} tempurl GET ${KUBE_TAR} 86400 )
|
||||||
echo "cluster/rackspace/util.sh: Object temp URL:"
|
echo "cluster/rackspace/util.sh: Object temp URL:"
|
||||||
echo -e "\t${RELEASE_TMP_URL}"
|
echo -e "\t${RELEASE_TMP_URL}"
|
||||||
|
|
||||||
@ -128,10 +130,29 @@ copy_dev_tarballs() {
|
|||||||
echo "cluster/rackspace/util.sh: Uploading to Cloud Files"
|
echo "cluster/rackspace/util.sh: Uploading to Cloud Files"
|
||||||
${SWIFTLY_CMD} put -i ${RELEASE_DIR}/kubernetes-server-linux-amd64.tar.gz \
|
${SWIFTLY_CMD} put -i ${RELEASE_DIR}/kubernetes-server-linux-amd64.tar.gz \
|
||||||
${CLOUDFILES_CONTAINER}/${CONTAINER_PREFIX}/kubernetes-server-linux-amd64.tar.gz > /dev/null 2>&1
|
${CLOUDFILES_CONTAINER}/${CONTAINER_PREFIX}/kubernetes-server-linux-amd64.tar.gz > /dev/null 2>&1
|
||||||
|
|
||||||
echo "Release pushed."
|
echo "Release pushed."
|
||||||
}
|
}
|
||||||
|
|
||||||
|
prep_known_tokens() {
|
||||||
|
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
|
||||||
|
generate_kubelet_tokens ${MINION_NAMES[i]}
|
||||||
|
cat ${KUBE_TEMP}/${MINION_NAMES[i]}_tokens.csv >> ${KUBE_TEMP}/known_tokens.csv
|
||||||
|
done
|
||||||
|
|
||||||
|
# Generate tokens for other "service accounts". Append to known_tokens.
|
||||||
|
#
|
||||||
|
# NB: If this list ever changes, this script actually has to
|
||||||
|
# change to detect the existence of this file, kill any deleted
|
||||||
|
# old tokens and add any new tokens (to handle the upgrade case).
|
||||||
|
local -r service_accounts=("system:scheduler" "system:controller_manager" "system:logging" "system:monitoring" "system:dns")
|
||||||
|
for account in "${service_accounts[@]}"; do
|
||||||
|
echo "$(create_token),${account},${account}" >> ${KUBE_TEMP}/known_tokens.csv
|
||||||
|
done
|
||||||
|
|
||||||
|
generate_admin_token
|
||||||
|
}
|
||||||
|
|
||||||
rax-boot-master() {
|
rax-boot-master() {
|
||||||
|
|
||||||
DISCOVERY_URL=$(curl https://discovery.etcd.io/new)
|
DISCOVERY_URL=$(curl https://discovery.etcd.io/new)
|
||||||
@ -159,6 +180,7 @@ rax-boot-master() {
|
|||||||
--meta ${MASTER_TAG} \
|
--meta ${MASTER_TAG} \
|
||||||
--meta ETCD=${DISCOVERY_ID} \
|
--meta ETCD=${DISCOVERY_ID} \
|
||||||
--user-data ${KUBE_TEMP}/master-cloud-config.yaml \
|
--user-data ${KUBE_TEMP}/master-cloud-config.yaml \
|
||||||
|
--file /var/lib/kube-apiserver/known_tokens.csv=${KUBE_TEMP}/known_tokens.csv \
|
||||||
--config-drive true \
|
--config-drive true \
|
||||||
--nic net-id=${NETWORK_UUID} \
|
--nic net-id=${NETWORK_UUID} \
|
||||||
${MASTER_NAME}"
|
${MASTER_NAME}"
|
||||||
@ -175,15 +197,20 @@ rax-boot-minions() {
|
|||||||
|
|
||||||
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
|
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
|
||||||
|
|
||||||
|
get_tokens_from_csv ${MINION_NAMES[i]}
|
||||||
|
|
||||||
sed -e "s|DISCOVERY_ID|${DISCOVERY_ID}|" \
|
sed -e "s|DISCOVERY_ID|${DISCOVERY_ID}|" \
|
||||||
-e "s|INDEX|$((i + 1))|g" \
|
|
||||||
-e "s|CLOUD_FILES_URL|${RELEASE_TMP_URL//&/\\&}|" \
|
-e "s|CLOUD_FILES_URL|${RELEASE_TMP_URL//&/\\&}|" \
|
||||||
-e "s|ENABLE_NODE_MONITORING|${ENABLE_NODE_MONITORING:-false}|" \
|
|
||||||
-e "s|ENABLE_NODE_LOGGING|${ENABLE_NODE_LOGGING:-false}|" \
|
|
||||||
-e "s|LOGGING_DESTINATION|${LOGGING_DESTINATION:-}|" \
|
|
||||||
-e "s|ENABLE_CLUSTER_DNS|${ENABLE_CLUSTER_DNS:-false}|" \
|
|
||||||
-e "s|DNS_SERVER_IP|${DNS_SERVER_IP:-}|" \
|
-e "s|DNS_SERVER_IP|${DNS_SERVER_IP:-}|" \
|
||||||
-e "s|DNS_DOMAIN|${DNS_DOMAIN:-}|" \
|
-e "s|DNS_DOMAIN|${DNS_DOMAIN:-}|" \
|
||||||
|
-e "s|ENABLE_CLUSTER_DNS|${ENABLE_CLUSTER_DNS:-false}|" \
|
||||||
|
-e "s|ENABLE_NODE_MONITORING|${ENABLE_NODE_MONITORING:-false}|" \
|
||||||
|
-e "s|ENABLE_NODE_LOGGING|${ENABLE_NODE_LOGGING:-false}|" \
|
||||||
|
-e "s|INDEX|$((i + 1))|g" \
|
||||||
|
-e "s|KUBELET_TOKEN|${KUBELET_TOKEN}|" \
|
||||||
|
-e "s|KUBE_NETWORK|${KUBE_NETWORK}|" \
|
||||||
|
-e "s|KUBE_PROXY_TOKEN|${KUBE_PROXY_TOKEN}|" \
|
||||||
|
-e "s|LOGGING_DESTINATION|${LOGGING_DESTINATION:-}|" \
|
||||||
$(dirname $0)/rackspace/cloud-config/minion-cloud-config.yaml > $KUBE_TEMP/minion-cloud-config-$(($i + 1)).yaml
|
$(dirname $0)/rackspace/cloud-config/minion-cloud-config.yaml > $KUBE_TEMP/minion-cloud-config-$(($i + 1)).yaml
|
||||||
|
|
||||||
|
|
||||||
@ -276,7 +303,7 @@ kube-up() {
|
|||||||
trap "rm -rf ${KUBE_TEMP}" EXIT
|
trap "rm -rf ${KUBE_TEMP}" EXIT
|
||||||
|
|
||||||
get-password
|
get-password
|
||||||
python $(dirname $0)/../third_party/htpasswd/htpasswd.py -b -c ${KUBE_TEMP}/htpasswd $KUBE_USER $KUBE_PASSWORD
|
python2.7 $(dirname $0)/../third_party/htpasswd/htpasswd.py -b -c ${KUBE_TEMP}/htpasswd $KUBE_USER $KUBE_PASSWORD
|
||||||
HTPASSWD=$(cat ${KUBE_TEMP}/htpasswd)
|
HTPASSWD=$(cat ${KUBE_TEMP}/htpasswd)
|
||||||
|
|
||||||
rax-nova-network
|
rax-nova-network
|
||||||
@ -286,6 +313,8 @@ kube-up() {
|
|||||||
rax-ssh-key
|
rax-ssh-key
|
||||||
|
|
||||||
echo "cluster/rackspace/util.sh: Starting Cloud Servers"
|
echo "cluster/rackspace/util.sh: Starting Cloud Servers"
|
||||||
|
prep_known_tokens
|
||||||
|
|
||||||
rax-boot-master
|
rax-boot-master
|
||||||
|
|
||||||
rax-boot-minions
|
rax-boot-minions
|
||||||
|
@ -39,6 +39,7 @@ Docker Multi Node | Flannel| N/A | local | [docs](docker-multino
|
|||||||
Local | | | _none_ | [docs](../../docs/getting-started-guides/locally.md) | Community (@preillyme) |
|
Local | | | _none_ | [docs](../../docs/getting-started-guides/locally.md) | Community (@preillyme) |
|
||||||
libvirt/KVM | CoreOS | CoreOS | libvirt/KVM | [docs](../../docs/getting-started-guides/libvirt-coreos.md) | Community (@lhuard1A) |
|
libvirt/KVM | CoreOS | CoreOS | libvirt/KVM | [docs](../../docs/getting-started-guides/libvirt-coreos.md) | Community (@lhuard1A) |
|
||||||
oVirt | | | | [docs](../../docs/getting-started-guides/ovirt.md) | Community (@simon3z) |
|
oVirt | | | | [docs](../../docs/getting-started-guides/ovirt.md) | Community (@simon3z) |
|
||||||
|
Rackspace | CoreOS | CoreOS | flannel | [docs](../../docs/getting-started-guides/rackspace.md) | Community (@doublerr) | use k8s version 0.16.2
|
||||||
|
|
||||||
|
|
||||||
*Note*: The above table is ordered by version test/used in notes followed by support level.
|
*Note*: The above table is ordered by version test/used in notes followed by support level.
|
||||||
|
@ -1,25 +1,27 @@
|
|||||||
# Status: Out Of Date
|
|
||||||
|
|
||||||
** Rackspace support is out of date. Please check back later **
|
|
||||||
|
|
||||||
# Rackspace
|
# Rackspace
|
||||||
In general, the dev-build-and-up.sh workflow for Rackspace is the similar to GCE. The specific implementation is different due to the use of CoreOS, Rackspace Cloud Files and network design.
|
|
||||||
|
* Supported Version: v0.16.2
|
||||||
|
* `git checkout v0.16.2`
|
||||||
|
|
||||||
|
In general, the dev-build-and-up.sh workflow for Rackspace is the similar to GCE. The specific implementation is different due to the use of CoreOS, Rackspace Cloud Files and the overall network design.
|
||||||
|
|
||||||
These scripts should be used to deploy development environments for Kubernetes. If your account leverages RackConnect or non-standard networking, these scripts will most likely not work without modification.
|
These scripts should be used to deploy development environments for Kubernetes. If your account leverages RackConnect or non-standard networking, these scripts will most likely not work without modification.
|
||||||
|
|
||||||
NOTE: The rackspace scripts do NOT rely on `saltstack`.
|
NOTE: The rackspace scripts do NOT rely on `saltstack` and instead rely on cloud-init for configuration.
|
||||||
|
|
||||||
The current cluster design is inspired by:
|
The current cluster design is inspired by:
|
||||||
- [corekube](https://github.com/metral/corekube/)
|
- [corekube](https://github.com/metral/corekube/)
|
||||||
- [Angus Lees](https://github.com/anguslees/kube-openstack/)
|
- [Angus Lees](https://github.com/anguslees/kube-openstack/)
|
||||||
|
|
||||||
## Prerequisites
|
## Prerequisites
|
||||||
1. You need to have both `nova` and `swiftly` installed. It's recommended to use a python virtualenv to install these packages into.
|
1. Python2.7
|
||||||
2. Make sure you have the appropriate environment variables set to interact with the OpenStack APIs. See [Rackspace Documentation](http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/section_gs_install_nova.html) for more details.
|
2. You need to have both `nova` and `swiftly` installed. It's recommended to use a python virtualenv to install these packages into.
|
||||||
|
3. Make sure you have the appropriate environment variables set to interact with the OpenStack APIs. See [Rackspace Documentation](http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/section_gs_install_nova.html) for more details.
|
||||||
|
|
||||||
## Provider: Rackspace
|
##Provider: Rackspace
|
||||||
- To use Rackspace as the provider, set the KUBERNETES_PROVIDER ENV variable:
|
|
||||||
`export KUBERNETES_PROVIDER=rackspace` and run the `bash hack/dev-build-and-up.sh` script.
|
- To install the latest released version of kubernetes use `export KUBERNETES_PROVIDER=rackspace; wget -q -O - https://get.k8s.io | bash`
|
||||||
|
- To build your own released version from source use `export KUBERNETES_PROVIDER=rackspace` and run the `bash hack/dev-build-and-up.sh`
|
||||||
|
|
||||||
## Build
|
## Build
|
||||||
1. The kubernetes binaries will be built via the common build scripts in `build/`.
|
1. The kubernetes binaries will be built via the common build scripts in `build/`.
|
||||||
@ -28,18 +30,21 @@ The current cluster design is inspired by:
|
|||||||
3. The built `kubernetes-server-linux-amd64.tar.gz` will be uploaded to this container and the URL will be passed to master/minions nodes when booted.
|
3. The built `kubernetes-server-linux-amd64.tar.gz` will be uploaded to this container and the URL will be passed to master/minions nodes when booted.
|
||||||
|
|
||||||
## Cluster
|
## Cluster
|
||||||
1. There is a specific `cluster/rackspace` directory with the scripts for the following steps:
|
There is a specific `cluster/rackspace` directory with the scripts for the following steps:
|
||||||
2. A cloud network will be created and all instances will be attached to this network. We will connect the master API and minion kubelet service via this network.
|
1. A cloud network will be created and all instances will be attached to this network.
|
||||||
3. A SSH key will be created and uploaded if needed. This key must be used to ssh into the machines since we won't capture the password.
|
- flanneld uses this network for next hop routing. These routes allow the containers running on each node to communicate with one another on this private network.
|
||||||
4. A master and minions will be created via the `nova` CLI. A `cloud-config.yaml` is generated and provided as user-data with the entire configuration for the systems.
|
2. A SSH key will be created and uploaded if needed. This key must be used to ssh into the machines since we won't capture the password.
|
||||||
5. We then boot as many minions as defined via `$RAX_NUM_MINIONS`.
|
3. The master server and additional nodes will be created via the `nova` CLI. A `cloud-config.yaml` is generated and provided as user-data with the entire configuration for the systems.
|
||||||
|
4. We then boot as many nodes as defined via `$RAX_NUM_MINIONS`.
|
||||||
|
|
||||||
## Some notes:
|
## Some notes:
|
||||||
- The scripts expect `eth2` to be the cloud network that the containers will communicate across.
|
- The scripts expect `eth2` to be the cloud network that the containers will communicate across.
|
||||||
- A number of the items in `config-default.sh` are overridable via environment variables.
|
- A number of the items in `config-default.sh` are overridable via environment variables.
|
||||||
- For older versions please either:
|
- For older versions please either:
|
||||||
|
* Sync back to `v0.9` with `git checkout v0.9`
|
||||||
|
* Download a [snapshot of `v0.9`](https://github.com/GoogleCloudPlatform/kubernetes/archive/v0.9.tar.gz)
|
||||||
* Sync back to `v0.3` with `git checkout v0.3`
|
* Sync back to `v0.3` with `git checkout v0.3`
|
||||||
* Download a [snapshot of `v0.3`](https://github.com/GoogleCloudPlatform/kubernetes/archive/v0.3.tar.gz)
|
* Download a [snapshot of `v0.3`](https://github.com/GoogleCloudPlatform/kubernetes/archive/v0.3.tar.gz)
|
||||||
|
|
||||||
## Network Design
|
## Network Design
|
||||||
- eth0 - Public Interface used for servers/containers to reach the internet
|
- eth0 - Public Interface used for servers/containers to reach the internet
|
||||||
|
Loading…
Reference in New Issue
Block a user