Merge pull request #3970 from justinsb/fix_aws
aws cluster/kube-up.sh fixes
This commit is contained in:
commit
db416c4f39
@ -18,7 +18,7 @@
|
|||||||
ZONE=us-west-2
|
ZONE=us-west-2
|
||||||
MASTER_SIZE=t2.micro
|
MASTER_SIZE=t2.micro
|
||||||
MINION_SIZE=t2.micro
|
MINION_SIZE=t2.micro
|
||||||
NUM_MINIONS=4
|
NUM_MINIONS=${NUM_MINIONS:-4}
|
||||||
|
|
||||||
# This is the ubuntu 14.04 image for us-west-2 + ebs
|
# This is the ubuntu 14.04 image for us-west-2 + ebs
|
||||||
# See here: http://cloud-images.ubuntu.com/locator/ec2/ for other images
|
# See here: http://cloud-images.ubuntu.com/locator/ec2/ for other images
|
||||||
|
@ -20,15 +20,33 @@
|
|||||||
|
|
||||||
mkdir -p /srv/salt-overlay/pillar
|
mkdir -p /srv/salt-overlay/pillar
|
||||||
cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
|
cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
|
||||||
node_instance_prefix: $NODE_INSTANCE_PREFIX
|
node_instance_prefix: '$(echo "$NODE_INSTANCE_PREFIX" | sed -e "s/'/''/g")'
|
||||||
portal_net: $PORTAL_NET
|
portal_net: '$(echo "$PORTAL_NET" | sed -e "s/'/''/g")'
|
||||||
enable_node_monitoring: $ENABLE_NODE_MONITORING
|
enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")'
|
||||||
enable_node_logging: $ENABLE_NODE_LOGGING
|
enable_node_monitoring: '$(echo "$ENABLE_NODE_MONITORING" | sed -e "s/'/''/g")'
|
||||||
logging_destination: $LOGGING_DESTINATION
|
enable_cluster_logging: '$(echo "$ENABLE_CLUSTER_LOGGING" | sed -e "s/'/''/g")'
|
||||||
enable_cluster_dns: $ENABLE_CLUSTER_DNS
|
enable_node_logging: '$(echo "$ENABLE_NODE_LOGGING" | sed -e "s/'/''/g")'
|
||||||
dns_server: $DNS_SERVER_IP
|
logging_destination: '$(echo "$LOGGING_DESTINATION" | sed -e "s/'/''/g")'
|
||||||
dns_domain: $DNS_DOMAIN
|
elasticsearch_replicas: '$(echo "$ELASTICSEARCH_LOGGING_REPLICAS" | sed -e "s/'/''/g")'
|
||||||
|
enable_cluster_dns: '$(echo "$ENABLE_CLUSTER_DNS" | sed -e "s/'/''/g")'
|
||||||
|
dns_replicas: '$(echo "$DNS_REPLICAS" | sed -e "s/'/''/g")'
|
||||||
|
dns_server: '$(echo "$DNS_SERVER_IP" | sed -e "s/'/''/g")'
|
||||||
|
dns_domain: '$(echo "$DNS_DOMAIN" | sed -e "s/'/''/g")'
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
mkdir -p /srv/salt-overlay/salt/nginx
|
mkdir -p /srv/salt-overlay/salt/nginx
|
||||||
echo $MASTER_HTPASSWD > /srv/salt-overlay/salt/nginx/htpasswd
|
echo $MASTER_HTPASSWD > /srv/salt-overlay/salt/nginx/htpasswd
|
||||||
|
|
||||||
|
# Generate and distribute a shared secret (bearer token) to
|
||||||
|
# apiserver and kubelet so that kubelet can authenticate to
|
||||||
|
# apiserver to send events.
|
||||||
|
# This works on CoreOS, so it should work on a lot of distros.
|
||||||
|
kubelet_token=$(cat /dev/urandom | base64 | tr -d "=+/" | dd bs=32 count=1 2> /dev/null)
|
||||||
|
|
||||||
|
mkdir -p /srv/salt-overlay/salt/kube-apiserver
|
||||||
|
known_tokens_file="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv"
|
||||||
|
(umask u=rw,go= ; echo "$kubelet_token,kubelet,kubelet" > $known_tokens_file)
|
||||||
|
|
||||||
|
mkdir -p /srv/salt-overlay/salt/kubelet
|
||||||
|
kubelet_auth_file="/srv/salt-overlay/salt/kubelet/kubernetes_auth"
|
||||||
|
(umask u=rw,go= ; echo "{\"BearerToken\": \"$kubelet_token\", \"Insecure\": true }" > $kubelet_auth_file)
|
||||||
|
@ -213,6 +213,24 @@ EOF
|
|||||||
chmod 0600 "$file"
|
chmod 0600 "$file"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Adds a tag to an AWS resource
|
||||||
|
# usage: add-tag <resource-id> <tag-name> <tag-value>
|
||||||
|
function add-tag {
|
||||||
|
echo "Adding tag to ${1}: ${2}=${3}"
|
||||||
|
|
||||||
|
# We need to retry in case the resource isn't yet fully created
|
||||||
|
sleep 3
|
||||||
|
n=0
|
||||||
|
until [ $n -ge 5 ]; do
|
||||||
|
$AWS_CMD create-tags --resources ${1} --tags Key=${2},Value=${3} > $LOG && return
|
||||||
|
n=$[$n+1]
|
||||||
|
sleep 15
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "Unable to add tag to AWS resource"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
function kube-up {
|
function kube-up {
|
||||||
find-release-tars
|
find-release-tars
|
||||||
upload-server-tars
|
upload-server-tars
|
||||||
@ -243,8 +261,7 @@ function kube-up {
|
|||||||
VPC_ID=$($AWS_CMD create-vpc --cidr-block 172.20.0.0/16 | json_val '["Vpc"]["VpcId"]')
|
VPC_ID=$($AWS_CMD create-vpc --cidr-block 172.20.0.0/16 | json_val '["Vpc"]["VpcId"]')
|
||||||
$AWS_CMD modify-vpc-attribute --vpc-id $VPC_ID --enable-dns-support '{"Value": true}' > $LOG
|
$AWS_CMD modify-vpc-attribute --vpc-id $VPC_ID --enable-dns-support '{"Value": true}' > $LOG
|
||||||
$AWS_CMD modify-vpc-attribute --vpc-id $VPC_ID --enable-dns-hostnames '{"Value": true}' > $LOG
|
$AWS_CMD modify-vpc-attribute --vpc-id $VPC_ID --enable-dns-hostnames '{"Value": true}' > $LOG
|
||||||
$AWS_CMD create-tags --resources $VPC_ID --tags Key=Name,Value=kubernetes-vpc > $LOG
|
add-tag $VPC_ID Name kubernetes-vpc
|
||||||
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Using VPC $VPC_ID"
|
echo "Using VPC $VPC_ID"
|
||||||
@ -322,10 +339,13 @@ function kube-up {
|
|||||||
--security-group-ids $SEC_GROUP_ID \
|
--security-group-ids $SEC_GROUP_ID \
|
||||||
--associate-public-ip-address \
|
--associate-public-ip-address \
|
||||||
--user-data file://${KUBE_TEMP}/master-start.sh | json_val '["Instances"][0]["InstanceId"]')
|
--user-data file://${KUBE_TEMP}/master-start.sh | json_val '["Instances"][0]["InstanceId"]')
|
||||||
sleep 3
|
add-tag $master_id Name $MASTER_NAME
|
||||||
$AWS_CMD create-tags --resources $master_id --tags Key=Name,Value=$MASTER_NAME > $LOG
|
add-tag $master_id Role $MASTER_TAG
|
||||||
sleep 3
|
|
||||||
$AWS_CMD create-tags --resources $master_id --tags Key=Role,Value=$MASTER_TAG > $LOG
|
echo "Waiting 1 minute for master to be ready"
|
||||||
|
# TODO(justinsb): Actually poll for the master being ready
|
||||||
|
# (we at least need the salt-master to be up before the minions come up)
|
||||||
|
sleep 60
|
||||||
|
|
||||||
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
|
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
|
||||||
echo "Starting Minion (${MINION_NAMES[$i]})"
|
echo "Starting Minion (${MINION_NAMES[$i]})"
|
||||||
@ -346,21 +366,9 @@ function kube-up {
|
|||||||
--security-group-ids $SEC_GROUP_ID \
|
--security-group-ids $SEC_GROUP_ID \
|
||||||
--associate-public-ip-address \
|
--associate-public-ip-address \
|
||||||
--user-data file://${KUBE_TEMP}/minion-start-${i}.sh | json_val '["Instances"][0]["InstanceId"]')
|
--user-data file://${KUBE_TEMP}/minion-start-${i}.sh | json_val '["Instances"][0]["InstanceId"]')
|
||||||
sleep 3
|
|
||||||
n=0
|
|
||||||
until [ $n -ge 5 ]; do
|
|
||||||
$AWS_CMD create-tags --resources $minion_id --tags Key=Name,Value=${MINION_NAMES[$i]} > $LOG && break
|
|
||||||
n=$[$n+1]
|
|
||||||
sleep 15
|
|
||||||
done
|
|
||||||
|
|
||||||
sleep 3
|
add-tag $minion_id Name ${MINION_NAMES[$i]}
|
||||||
n=0
|
add-tag $minion_id Role $MINION_TAG
|
||||||
until [ $n -ge 5 ]; do
|
|
||||||
$AWS_CMD create-tags --resources $minion_id --tags Key=Role,Value=$MINION_TAG > $LOG && break
|
|
||||||
n=$[$n+1]
|
|
||||||
sleep 15
|
|
||||||
done
|
|
||||||
|
|
||||||
sleep 3
|
sleep 3
|
||||||
$AWS_CMD modify-instance-attribute --instance-id $minion_id --source-dest-check '{"Value": false}' > $LOG
|
$AWS_CMD modify-instance-attribute --instance-id $minion_id --source-dest-check '{"Value": false}' > $LOG
|
||||||
@ -397,7 +405,8 @@ function kube-up {
|
|||||||
|
|
||||||
# Wait 3 minutes for cluster to come up. We hit it with a "highstate" after that to
|
# Wait 3 minutes for cluster to come up. We hit it with a "highstate" after that to
|
||||||
# make sure that everything is well configured.
|
# make sure that everything is well configured.
|
||||||
echo "Waiting for cluster to settle"
|
# TODO: Can we poll here?
|
||||||
|
echo "Waiting 3 minutes for cluster to settle"
|
||||||
local i
|
local i
|
||||||
for (( i=0; i < 6*3; i++)); do
|
for (( i=0; i < 6*3; i++)); do
|
||||||
printf "."
|
printf "."
|
||||||
|
@ -24,7 +24,9 @@ etcd-tar:
|
|||||||
- source_hash: {{ etcd_tar_hash }}
|
- source_hash: {{ etcd_tar_hash }}
|
||||||
- archive_format: tar
|
- archive_format: tar
|
||||||
- if_missing: /usr/local/src/etcd-{{ etcd_version }}-linux-amd64
|
- if_missing: /usr/local/src/etcd-{{ etcd_version }}-linux-amd64
|
||||||
|
{% if grains['saltversioninfo'] <= (2014, 7, 0, 0) %}
|
||||||
- tar_options: xz
|
- tar_options: xz
|
||||||
|
{% endif %}
|
||||||
file.directory:
|
file.directory:
|
||||||
- name: /usr/local/src/etcd-{{ etcd_version }}-linux-amd64
|
- name: /usr/local/src/etcd-{{ etcd_version }}-linux-amd64
|
||||||
- user: root
|
- user: root
|
||||||
|
@ -27,6 +27,10 @@
|
|||||||
{% if grains.cloud == 'gce' -%}
|
{% if grains.cloud == 'gce' -%}
|
||||||
{% set cloud_provider = "--cloud_provider=gce" -%}
|
{% set cloud_provider = "--cloud_provider=gce" -%}
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
|
{% if grains.cloud == 'aws' -%}
|
||||||
|
{% set cloud_provider = "--cloud_provider=aws" -%}
|
||||||
|
{% set cloud_config = "--cloud_config=/etc/aws.conf" -%}
|
||||||
|
{% endif -%}
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
|
|
||||||
{% if pillar['portal_net'] is defined -%}
|
{% if pillar['portal_net'] is defined -%}
|
||||||
@ -40,7 +44,7 @@
|
|||||||
{% set token_auth_file = "--token_auth_file=/dev/null" -%}
|
{% set token_auth_file = "--token_auth_file=/dev/null" -%}
|
||||||
|
|
||||||
{% if grains.cloud is defined -%}
|
{% if grains.cloud is defined -%}
|
||||||
{% if grains.cloud == 'gce' or grains.cloud == 'vagrant' -%}
|
{% if grains.cloud in [ 'aws', 'gce', 'vagrant' ] -%}
|
||||||
# TODO: generate and distribute tokens for other cloud providers.
|
# TODO: generate and distribute tokens for other cloud providers.
|
||||||
{% set token_auth_file = "--token_auth_file=/srv/kubernetes/known_tokens.csv" -%}
|
{% set token_auth_file = "--token_auth_file=/srv/kubernetes/known_tokens.csv" -%}
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
@ -51,4 +55,4 @@
|
|||||||
{% set admission_control = "--admission_control=" + grains.admission_control -%}
|
{% set admission_control = "--admission_control=" + grains.admission_control -%}
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
|
|
||||||
DAEMON_ARGS="{{daemon_args}} {{address}} {{etcd_servers}} {{ cloud_provider }} {{admission_control}} --allow_privileged={{pillar['allow_privileged']}} {{portal_net}} {{cert_file}} {{key_file}} {{secure_port}} {{token_auth_file}} {{publicAddressOverride}} {{pillar['log_level']}}"
|
DAEMON_ARGS="{{daemon_args}} {{address}} {{etcd_servers}} {{ cloud_provider }} {{ cloud_config }} {{admission_control}} --allow_privileged={{pillar['allow_privileged']}} {{portal_net}} {{cert_file}} {{key_file}} {{secure_port}} {{token_auth_file}} {{publicAddressOverride}} {{pillar['log_level']}}"
|
||||||
|
@ -39,7 +39,7 @@
|
|||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{% if grains.cloud is defined %}
|
{% if grains.cloud is defined %}
|
||||||
{% if grains.cloud == 'gce' or grains.cloud == 'vagrant' %}
|
{% if grains.cloud in ['aws', 'gce', 'vagrant'] %}
|
||||||
# TODO: generate and distribute tokens on other cloud providers.
|
# TODO: generate and distribute tokens on other cloud providers.
|
||||||
/srv/kubernetes/known_tokens.csv:
|
/srv/kubernetes/known_tokens.csv:
|
||||||
file.managed:
|
file.managed:
|
||||||
|
@ -26,7 +26,7 @@
|
|||||||
{% set cloud_provider = "--cloud_provider=aws" -%}
|
{% set cloud_provider = "--cloud_provider=aws" -%}
|
||||||
{% set cloud_config = "--cloud_config=/etc/aws.conf" -%}
|
{% set cloud_config = "--cloud_config=/etc/aws.conf" -%}
|
||||||
{% set minion_regexp = "" -%}
|
{% set minion_regexp = "" -%}
|
||||||
{% set machines = "--machines " + ','.join(salt['mine.get']('roles:kubernetes-pool', 'network.ip_addrs', expr_form='grain').keys()) -%}
|
{% set machines = "--machines=" + ','.join(salt['mine.get']('roles:kubernetes-pool', 'network.ip_addrs', expr_form='grain').keys()) -%}
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
{% if grains.cloud == 'azure' -%}
|
{% if grains.cloud == 'azure' -%}
|
||||||
MACHINES="{{ salt['mine.get']('roles:kubernetes-pool', 'grains.items', expr_form='grain').values()|join(',', attribute='hostnamef') }}"
|
MACHINES="{{ salt['mine.get']('roles:kubernetes-pool', 'grains.items', expr_form='grain').values()|join(',', attribute='hostnamef') }}"
|
||||||
|
@ -32,7 +32,7 @@ import (
|
|||||||
// TestKubeletSendsEvent checks that kubelets and scheduler send events about pods scheduling and running.
|
// TestKubeletSendsEvent checks that kubelets and scheduler send events about pods scheduling and running.
|
||||||
func TestKubeletSendsEvent(c *client.Client) bool {
|
func TestKubeletSendsEvent(c *client.Client) bool {
|
||||||
provider := testContext.provider
|
provider := testContext.provider
|
||||||
if len(provider) > 0 && provider != "gce" && provider != "gke" {
|
if len(provider) > 0 && provider != "gce" && provider != "gke" && provider != "aws" {
|
||||||
glog.Infof("skipping TestKubeletSendsEvent on cloud provider %s", provider)
|
glog.Infof("skipping TestKubeletSendsEvent on cloud provider %s", provider)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
@ -29,8 +29,8 @@ import (
|
|||||||
// with the TestBasicImage test. This test is only supported
|
// with the TestBasicImage test. This test is only supported
|
||||||
// for the providers GCE and GKE.
|
// for the providers GCE and GKE.
|
||||||
func TestPrivate(c *client.Client) bool {
|
func TestPrivate(c *client.Client) bool {
|
||||||
if testContext.provider != "gce" && testContext.provider != "gke" {
|
if testContext.provider != "gce" && testContext.provider != "gke" && testContext.provider != "aws" {
|
||||||
glog.Infof("Skipping test private which is only supported for providers gce and gke (not %s)", testContext.provider)
|
glog.Infof("Skipping test private which is only supported for providers gce, gke and aws (not %s)", testContext.provider)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
glog.Info("Calling out to TestBasic")
|
glog.Info("Calling out to TestBasic")
|
||||||
|
Loading…
Reference in New Issue
Block a user