Get Vagrant to start using TLS client certs.

Also fix up cert generation. It was failing during the first salt highstate when trying to chown the certs as the apiserver user didn't exist yet.  Fix this by creating a 'kube-cert' group and chgrping the files to that.  Then make the apiserver a member of that group.

Fixes #2365
Fixes #2368
This commit is contained in:
Joe Beda 2014-11-13 22:14:56 -08:00
parent 7a6743808a
commit 5a0159ea00
11 changed files with 61 additions and 31 deletions

View File

@ -83,16 +83,9 @@ fi
# When we are using vagrant it has hard coded auth. We repeat that here so that # When we are using vagrant it has hard coded auth. We repeat that here so that
# we don't clobber auth that might be used for a publicly facing cluster. # we don't clobber auth that might be used for a publicly facing cluster.
if [ "$KUBERNETES_PROVIDER" == "vagrant" ]; then if [[ "$KUBERNETES_PROVIDER" == "vagrant" ]]; then
cat >~/.kubernetes_vagrant_auth <<EOF
{
"User": "vagrant",
"Password": "vagrant"
}
EOF
auth_config=( auth_config=(
"-auth" "$HOME/.kubernetes_vagrant_auth" "-auth" "$HOME/.kubernetes_vagrant_auth"
"-insecure_skip_tls_verify"
) )
else else
auth_config=() auth_config=()

View File

@ -84,15 +84,8 @@ fi
# When we are using vagrant it has hard coded auth. We repeat that here so that # When we are using vagrant it has hard coded auth. We repeat that here so that
# we don't clobber auth that might be used for a publicly facing cluster. # we don't clobber auth that might be used for a publicly facing cluster.
if [[ "$KUBERNETES_PROVIDER" == "vagrant" ]]; then if [[ "$KUBERNETES_PROVIDER" == "vagrant" ]]; then
cat >~/.kubernetes_vagrant_auth <<EOF
{
"User": "vagrant",
"Password": "vagrant"
}
EOF
auth_config=( auth_config=(
"--auth-path=$HOME/.kubernetes_vagrant_auth" "--auth-path=$HOME/.kubernetes_vagrant_auth"
"--insecure-skip-tls-verify=true"
) )
else else
auth_config=() auth_config=()

View File

@ -44,6 +44,8 @@ apiserver:
user.present: user.present:
- system: True - system: True
- gid_from_name: True - gid_from_name: True
- groups:
- kube-cert
- shell: /sbin/nologin - shell: /sbin/nologin
- home: /var/apiserver - home: /var/apiserver
- require: - require:

View File

@ -6,7 +6,7 @@
{% set cert_ip='_use_aws_external_ip_' %} {% set cert_ip='_use_aws_external_ip_' %}
{% endif %} {% endif %}
{% if grains.cloud == 'vagrant' %} {% if grains.cloud == 'vagrant' %}
{% set cert_ip=grains.fqdn_ip4 %} {% set cert_ip=grains.ip_interfaces.eth1[0] %}
{% endif %} {% endif %}
{% if grains.cloud == 'vsphere' %} {% if grains.cloud == 'vsphere' %}
{% set cert_ip=grains.ip_interfaces.eth0[0] %} {% set cert_ip=grains.ip_interfaces.eth0[0] %}
@ -23,6 +23,10 @@
{% set certgen="make-ca-cert.sh" %} {% set certgen="make-ca-cert.sh" %}
{% endif %} {% endif %}
kube-cert:
group.present:
- system: True
kubernetes-cert: kubernetes-cert:
cmd.script: cmd.script:
- unless: test -f /srv/kubernetes/server.cert - unless: test -f /srv/kubernetes/server.cert

View File

@ -20,7 +20,7 @@ set -o pipefail
cert_ip=$1 cert_ip=$1
cert_dir=/srv/kubernetes cert_dir=/srv/kubernetes
cert_file_owner=apiserver.apiserver cert_group=kube-cert
mkdir -p "$cert_dir" mkdir -p "$cert_dir"
@ -63,4 +63,5 @@ cp -p pki/ca.crt "${cert_dir}/ca.crt"
cp -p pki/issued/kubecfg.crt "${cert_dir}/kubecfg.crt" cp -p pki/issued/kubecfg.crt "${cert_dir}/kubecfg.crt"
cp -p pki/private/kubecfg.key "${cert_dir}/kubecfg.key" cp -p pki/private/kubecfg.key "${cert_dir}/kubecfg.key"
# Make server certs accessible to apiserver. # Make server certs accessible to apiserver.
chown $cert_file_owner "${cert_dir}/server.key" "${cert_dir}/server.cert" "${cert_dir}/ca.cert" chgrp $cert_group "${cert_dir}/server.key" "${cert_dir}/server.cert" "${cert_dir}/ca.crt"
chmod 660 "${cert_dir}/server.key" "${cert_dir}/server.cert" "${cert_dir}/ca.crt"

View File

@ -15,11 +15,12 @@
# limitations under the License. # limitations under the License.
cert_dir=/srv/kubernetes cert_dir=/srv/kubernetes
cert_file_owner=apiserver.apiserver cert_group=kube-cert
mkdir -p "$cert_dir" mkdir -p "$cert_dir"
openssl req -new -newkey rsa:4096 -days 365 -nodes -x509 \ openssl req -new -newkey rsa:4096 -days 365 -nodes -x509 \
-subj "/CN=kubernetes.invalid/O=Kubernetes" \ -subj "/CN=kubernetes.invalid/O=Kubernetes" \
-keyout "${cert_dir}/server.key" -out "${cert_dir}/server.cert" -keyout "${cert_dir}/server.key" -out "${cert_dir}/server.cert"
chown $cert_file_owner "${cert_dir}/server.key" "${cert_dir}/server.cert" chgrp $cert_group "${cert_dir}/server.key" "${cert_dir}/server.cert"
chmod 660 "${cert_dir}/server.key" "${cert_dir}/server.cert"

View File

@ -1,14 +1,6 @@
nginx: nginx:
pkg: pkg:
- installed - installed
service:
- running
- watch:
- pkg: nginx
- file: /etc/nginx/nginx.conf
- file: /etc/nginx/sites-enabled/default
- file: /usr/share/nginx/htpasswd
- cmd: kubernetes-cert
/etc/nginx/nginx.conf: /etc/nginx/nginx.conf:
file: file:
@ -36,3 +28,13 @@ nginx:
- group: root - group: root
- mode: 644 - mode: 644
nginx-service:
service:
- running
- name: nginx
- watch:
- pkg: nginx
- file: /etc/nginx/nginx.conf
- file: /etc/nginx/sites-enabled/default
- file: /usr/share/nginx/htpasswd
- cmd: kubernetes-cert

View File

@ -70,6 +70,7 @@ grains:
master_ip: $MASTER_IP master_ip: $MASTER_IP
network_mode: openvswitch network_mode: openvswitch
etcd_servers: $MASTER_IP etcd_servers: $MASTER_IP
cloud: vagrant
cloud_provider: vagrant cloud_provider: vagrant
roles: roles:
- kubernetes-master - kubernetes-master
@ -78,6 +79,7 @@ EOF
mkdir -p /srv/salt-overlay/pillar mkdir -p /srv/salt-overlay/pillar
cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
portal_net: $PORTAL_NET portal_net: $PORTAL_NET
cert_ip: $MASTER_IP
EOF EOF
# Configure the salt-master # Configure the salt-master

View File

@ -43,6 +43,29 @@ function kube-up {
get-password get-password
vagrant up vagrant up
local kube_cert=".kubecfg.vagrant.crt"
local kube_key=".kubecfg.vagrant.key"
local ca_cert=".kubernetes.vagrant.ca.crt"
(umask 077
vagrant ssh master -- sudo cat /srv/kubernetes/kubecfg.crt >"${HOME}/${kube_cert}" 2>/dev/null
vagrant ssh master -- sudo cat /srv/kubernetes/kubecfg.key >"${HOME}/${kube_key}" 2>/dev/null
vagrant ssh master -- sudo cat /srv/kubernetes/ca.crt >"${HOME}/${ca_cert}" 2>/dev/null
cat << EOF > ~/.kubernetes_vagrant_auth
{
"User": "$KUBE_USER",
"Password": "$KUBE_PASSWORD",
"CAFile": "$HOME/$ca_cert",
"CertFile": "$HOME/$kube_cert",
"KeyFile": "$HOME/$kube_key"
}
EOF
chmod 0600 ~/.kubernetes_auth "${HOME}/${kube_cert}" \
"${HOME}/${kube_key}" "${HOME}/${ca_cert}"
)
echo "Each machine instance has been created." echo "Each machine instance has been created."
echo " Now waiting for the Salt provisioning process to complete on each machine." echo " Now waiting for the Salt provisioning process to complete on each machine."
echo " This can take some time based on your network, disk, and cpu speed." echo " This can take some time based on your network, disk, and cpu speed."
@ -108,7 +131,7 @@ function kube-up {
echo echo
echo " https://${KUBE_MASTER_IP}" echo " https://${KUBE_MASTER_IP}"
echo echo
echo "The user name and password to use is located in ~/.kubernetes_auth." echo "The user name and password to use is located in ~/.kubernetes_vagrant_auth."
echo echo
} }

View File

@ -52,7 +52,7 @@ The following enumerates the set of defined key/value pairs that are supported t
Key | Value Key | Value
------------- | ------------- ------------- | -------------
`cbr-cidr` | (Optional) The minion IP address range used for the docker container bridge. `cbr-cidr` | (Optional) The minion IP address range used for the docker container bridge.
`cloud` | (Optional) Which IaaS platform is used to host kubernetes, *gce*, *azure* `cloud` | (Optional) Which IaaS platform is used to host kubernetes, *gce*, *azure*, *aws*, *vagrant*
`cloud_provider` | (Optional) The cloud_provider used by apiserver: *gce*, *azure*, *vagrant* `cloud_provider` | (Optional) The cloud_provider used by apiserver: *gce*, *azure*, *vagrant*
`etcd_servers` | (Optional) Comma-delimited list of IP addresses the apiserver and kubelet use to reach etcd. Uses the IP of the first machine in the kubernetes_master role. `etcd_servers` | (Optional) Comma-delimited list of IP addresses the apiserver and kubelet use to reach etcd. Uses the IP of the first machine in the kubernetes_master role.
`hostnamef` | (Optional) The full host name of the machine, i.e. hostname -f `hostnamef` | (Optional) The full host name of the machine, i.e. hostname -f

View File

@ -63,5 +63,14 @@ locations=(
) )
e2e=$( (ls -t "${locations[@]}" 2>/dev/null || true) | head -1 ) e2e=$( (ls -t "${locations[@]}" 2>/dev/null || true) | head -1 )
# When we are using vagrant it has hard coded auth. We repeat that here so that
# we don't clobber auth that might be used for a publicly facing cluster.
if [[ "$KUBERNETES_PROVIDER" == "vagrant" ]]; then
auth_config=(
"--auth_config=$HOME/.kubernetes_vagrant_auth"
)
else
auth_config=()
fi
"${e2e}" -host="https://${KUBE_MASTER_IP-}" "${e2e}" "${auth_config[@]:+${auth_config[@]}}" -host="https://${KUBE_MASTER_IP-}"