diff --git a/cluster/aws/config-default.sh b/cluster/aws/config-default.sh index 930d5d6da18..c5cd509a456 100644 --- a/cluster/aws/config-default.sh +++ b/cluster/aws/config-default.sh @@ -18,7 +18,7 @@ ZONE=us-west-2 MASTER_SIZE=t2.micro MINION_SIZE=t2.micro -NUM_MINIONS=4 +NUM_MINIONS=${NUM_MINIONS:-4} # This is the ubuntu 14.04 image for us-west-2 + ebs # See here: http://cloud-images.ubuntu.com/locator/ec2/ for other images diff --git a/cluster/aws/templates/create-dynamic-salt-files.sh b/cluster/aws/templates/create-dynamic-salt-files.sh index f9a01c41a9b..5440da94331 100644 --- a/cluster/aws/templates/create-dynamic-salt-files.sh +++ b/cluster/aws/templates/create-dynamic-salt-files.sh @@ -20,15 +20,33 @@ mkdir -p /srv/salt-overlay/pillar cat </srv/salt-overlay/pillar/cluster-params.sls -node_instance_prefix: $NODE_INSTANCE_PREFIX -portal_net: $PORTAL_NET -enable_node_monitoring: $ENABLE_NODE_MONITORING -enable_node_logging: $ENABLE_NODE_LOGGING -logging_destination: $LOGGING_DESTINATION -enable_cluster_dns: $ENABLE_CLUSTER_DNS -dns_server: $DNS_SERVER_IP -dns_domain: $DNS_DOMAIN +node_instance_prefix: '$(echo "$NODE_INSTANCE_PREFIX" | sed -e "s/'/''/g")' +portal_net: '$(echo "$PORTAL_NET" | sed -e "s/'/''/g")' +enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")' +enable_node_monitoring: '$(echo "$ENABLE_NODE_MONITORING" | sed -e "s/'/''/g")' +enable_cluster_logging: '$(echo "$ENABLE_CLUSTER_LOGGING" | sed -e "s/'/''/g")' +enable_node_logging: '$(echo "$ENABLE_NODE_LOGGING" | sed -e "s/'/''/g")' +logging_destination: '$(echo "$LOGGING_DESTINATION" | sed -e "s/'/''/g")' +elasticsearch_replicas: '$(echo "$ELASTICSEARCH_LOGGING_REPLICAS" | sed -e "s/'/''/g")' +enable_cluster_dns: '$(echo "$ENABLE_CLUSTER_DNS" | sed -e "s/'/''/g")' +dns_replicas: '$(echo "$DNS_REPLICAS" | sed -e "s/'/''/g")' +dns_server: '$(echo "$DNS_SERVER_IP" | sed -e "s/'/''/g")' +dns_domain: '$(echo "$DNS_DOMAIN" | sed -e "s/'/''/g")' EOF mkdir -p /srv/salt-overlay/salt/nginx echo $MASTER_HTPASSWD > /srv/salt-overlay/salt/nginx/htpasswd + +# Generate and distribute a shared secret (bearer token) to +# apiserver and kubelet so that kubelet can authenticate to +# apiserver to send events. +# This works on CoreOS, so it should work on a lot of distros. +kubelet_token=$(cat /dev/urandom | base64 | tr -d "=+/" | dd bs=32 count=1 2> /dev/null) + +mkdir -p /srv/salt-overlay/salt/kube-apiserver +known_tokens_file="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv" +(umask u=rw,go= ; echo "$kubelet_token,kubelet,kubelet" > $known_tokens_file) + +mkdir -p /srv/salt-overlay/salt/kubelet +kubelet_auth_file="/srv/salt-overlay/salt/kubelet/kubernetes_auth" +(umask u=rw,go= ; echo "{\"BearerToken\": \"$kubelet_token\", \"Insecure\": true }" > $kubelet_auth_file) diff --git a/cluster/aws/util.sh b/cluster/aws/util.sh index e2b0a6e727e..07b27cd4b68 100644 --- a/cluster/aws/util.sh +++ b/cluster/aws/util.sh @@ -213,6 +213,24 @@ EOF chmod 0600 "$file" } +# Adds a tag to an AWS resource +# usage: add-tag +function add-tag { + echo "Adding tag to ${1}: ${2}=${3}" + + # We need to retry in case the resource isn't yet fully created + sleep 3 + n=0 + until [ $n -ge 5 ]; do + $AWS_CMD create-tags --resources ${1} --tags Key=${2},Value=${3} > $LOG && return + n=$[$n+1] + sleep 15 + done + + echo "Unable to add tag to AWS resource" + exit 1 +} + function kube-up { find-release-tars upload-server-tars @@ -243,8 +261,7 @@ function kube-up { VPC_ID=$($AWS_CMD create-vpc --cidr-block 172.20.0.0/16 | json_val '["Vpc"]["VpcId"]') $AWS_CMD modify-vpc-attribute --vpc-id $VPC_ID --enable-dns-support '{"Value": true}' > $LOG $AWS_CMD modify-vpc-attribute --vpc-id $VPC_ID --enable-dns-hostnames '{"Value": true}' > $LOG - $AWS_CMD create-tags --resources $VPC_ID --tags Key=Name,Value=kubernetes-vpc > $LOG - + add-tag $VPC_ID Name kubernetes-vpc fi echo "Using VPC $VPC_ID" @@ -322,10 +339,13 @@ function kube-up { --security-group-ids $SEC_GROUP_ID \ --associate-public-ip-address \ --user-data file://${KUBE_TEMP}/master-start.sh | json_val '["Instances"][0]["InstanceId"]') - sleep 3 - $AWS_CMD create-tags --resources $master_id --tags Key=Name,Value=$MASTER_NAME > $LOG - sleep 3 - $AWS_CMD create-tags --resources $master_id --tags Key=Role,Value=$MASTER_TAG > $LOG + add-tag $master_id Name $MASTER_NAME + add-tag $master_id Role $MASTER_TAG + + echo "Waiting 1 minute for master to be ready" + # TODO(justinsb): Actually poll for the master being ready + # (we at least need the salt-master to be up before the minions come up) + sleep 60 for (( i=0; i<${#MINION_NAMES[@]}; i++)); do echo "Starting Minion (${MINION_NAMES[$i]})" @@ -346,21 +366,9 @@ function kube-up { --security-group-ids $SEC_GROUP_ID \ --associate-public-ip-address \ --user-data file://${KUBE_TEMP}/minion-start-${i}.sh | json_val '["Instances"][0]["InstanceId"]') - sleep 3 - n=0 - until [ $n -ge 5 ]; do - $AWS_CMD create-tags --resources $minion_id --tags Key=Name,Value=${MINION_NAMES[$i]} > $LOG && break - n=$[$n+1] - sleep 15 - done - sleep 3 - n=0 - until [ $n -ge 5 ]; do - $AWS_CMD create-tags --resources $minion_id --tags Key=Role,Value=$MINION_TAG > $LOG && break - n=$[$n+1] - sleep 15 - done + add-tag $minion_id Name ${MINION_NAMES[$i]} + add-tag $minion_id Role $MINION_TAG sleep 3 $AWS_CMD modify-instance-attribute --instance-id $minion_id --source-dest-check '{"Value": false}' > $LOG @@ -397,7 +405,8 @@ function kube-up { # Wait 3 minutes for cluster to come up. We hit it with a "highstate" after that to # make sure that everything is well configured. - echo "Waiting for cluster to settle" + # TODO: Can we poll here? + echo "Waiting 3 minutes for cluster to settle" local i for (( i=0; i < 6*3; i++)); do printf "." diff --git a/cluster/saltbase/salt/etcd/init.sls b/cluster/saltbase/salt/etcd/init.sls index 71ccfdcce84..e767be609d6 100644 --- a/cluster/saltbase/salt/etcd/init.sls +++ b/cluster/saltbase/salt/etcd/init.sls @@ -24,7 +24,9 @@ etcd-tar: - source_hash: {{ etcd_tar_hash }} - archive_format: tar - if_missing: /usr/local/src/etcd-{{ etcd_version }}-linux-amd64 +{% if grains['saltversioninfo'] <= (2014, 7, 0, 0) %} - tar_options: xz +{% endif %} file.directory: - name: /usr/local/src/etcd-{{ etcd_version }}-linux-amd64 - user: root diff --git a/cluster/saltbase/salt/kube-apiserver/default b/cluster/saltbase/salt/kube-apiserver/default index f40131fd1b6..7cb662ba649 100644 --- a/cluster/saltbase/salt/kube-apiserver/default +++ b/cluster/saltbase/salt/kube-apiserver/default @@ -27,6 +27,10 @@ {% if grains.cloud == 'gce' -%} {% set cloud_provider = "--cloud_provider=gce" -%} {% endif -%} +{% if grains.cloud == 'aws' -%} + {% set cloud_provider = "--cloud_provider=aws" -%} + {% set cloud_config = "--cloud_config=/etc/aws.conf" -%} +{% endif -%} {% endif -%} {% if pillar['portal_net'] is defined -%} @@ -40,7 +44,7 @@ {% set token_auth_file = "--token_auth_file=/dev/null" -%} {% if grains.cloud is defined -%} -{% if grains.cloud == 'gce' or grains.cloud == 'vagrant' -%} +{% if grains.cloud in [ 'aws', 'gce', 'vagrant' ] -%} # TODO: generate and distribute tokens for other cloud providers. {% set token_auth_file = "--token_auth_file=/srv/kubernetes/known_tokens.csv" -%} {% endif -%} @@ -51,4 +55,4 @@ {% set admission_control = "--admission_control=" + grains.admission_control -%} {% endif -%} -DAEMON_ARGS="{{daemon_args}} {{address}} {{etcd_servers}} {{ cloud_provider }} {{admission_control}} --allow_privileged={{pillar['allow_privileged']}} {{portal_net}} {{cert_file}} {{key_file}} {{secure_port}} {{token_auth_file}} {{publicAddressOverride}} {{pillar['log_level']}}" +DAEMON_ARGS="{{daemon_args}} {{address}} {{etcd_servers}} {{ cloud_provider }} {{ cloud_config }} {{admission_control}} --allow_privileged={{pillar['allow_privileged']}} {{portal_net}} {{cert_file}} {{key_file}} {{secure_port}} {{token_auth_file}} {{publicAddressOverride}} {{pillar['log_level']}}" diff --git a/cluster/saltbase/salt/kube-apiserver/init.sls b/cluster/saltbase/salt/kube-apiserver/init.sls index 61323833dc4..b829d449669 100644 --- a/cluster/saltbase/salt/kube-apiserver/init.sls +++ b/cluster/saltbase/salt/kube-apiserver/init.sls @@ -39,7 +39,7 @@ {% endif %} {% if grains.cloud is defined %} -{% if grains.cloud == 'gce' or grains.cloud == 'vagrant' %} +{% if grains.cloud in ['aws', 'gce', 'vagrant'] %} # TODO: generate and distribute tokens on other cloud providers. /srv/kubernetes/known_tokens.csv: file.managed: diff --git a/cluster/saltbase/salt/kube-controller-manager/default b/cluster/saltbase/salt/kube-controller-manager/default index 185d01324b3..c5136546ebe 100644 --- a/cluster/saltbase/salt/kube-controller-manager/default +++ b/cluster/saltbase/salt/kube-controller-manager/default @@ -26,7 +26,7 @@ {% set cloud_provider = "--cloud_provider=aws" -%} {% set cloud_config = "--cloud_config=/etc/aws.conf" -%} {% set minion_regexp = "" -%} - {% set machines = "--machines " + ','.join(salt['mine.get']('roles:kubernetes-pool', 'network.ip_addrs', expr_form='grain').keys()) -%} + {% set machines = "--machines=" + ','.join(salt['mine.get']('roles:kubernetes-pool', 'network.ip_addrs', expr_form='grain').keys()) -%} {% endif -%} {% if grains.cloud == 'azure' -%} MACHINES="{{ salt['mine.get']('roles:kubernetes-pool', 'grains.items', expr_form='grain').values()|join(',', attribute='hostnamef') }}" diff --git a/test/e2e/kubelet_sends_events.go b/test/e2e/kubelet_sends_events.go index b0e8b4b98ca..2b9da6b428c 100644 --- a/test/e2e/kubelet_sends_events.go +++ b/test/e2e/kubelet_sends_events.go @@ -32,7 +32,7 @@ import ( // TestKubeletSendsEvent checks that kubelets and scheduler send events about pods scheduling and running. func TestKubeletSendsEvent(c *client.Client) bool { provider := testContext.provider - if len(provider) > 0 && provider != "gce" && provider != "gke" { + if len(provider) > 0 && provider != "gce" && provider != "gke" && provider != "aws" { glog.Infof("skipping TestKubeletSendsEvent on cloud provider %s", provider) return true } diff --git a/test/e2e/private.go b/test/e2e/private.go index baa721e507d..94076f69beb 100644 --- a/test/e2e/private.go +++ b/test/e2e/private.go @@ -29,8 +29,8 @@ import ( // with the TestBasicImage test. This test is only supported // for the providers GCE and GKE. func TestPrivate(c *client.Client) bool { - if testContext.provider != "gce" && testContext.provider != "gke" { - glog.Infof("Skipping test private which is only supported for providers gce and gke (not %s)", testContext.provider) + if testContext.provider != "gce" && testContext.provider != "gke" && testContext.provider != "aws" { + glog.Infof("Skipping test private which is only supported for providers gce, gke and aws (not %s)", testContext.provider) return true } glog.Info("Calling out to TestBasic")