Merge pull request #10378 from jeffbean/ansible-final
Ansible rework for Kubernetes cluster deployment
This commit is contained in:
@@ -4,13 +4,15 @@ This playbook helps you to set up a Kubernetes cluster on machines where you
|
|||||||
can't or don't want to use the salt scripts and cluster up/down tools. They
|
can't or don't want to use the salt scripts and cluster up/down tools. They
|
||||||
can be real hardware, VMs, things in a public cloud, etc.
|
can be real hardware, VMs, things in a public cloud, etc.
|
||||||
|
|
||||||
## Usage
|
## Before starting
|
||||||
|
|
||||||
* Record the IP address of which machine you want to be your master
|
* Record the IP address/hostname of which machine you want to be your master (only support a single master)
|
||||||
* Record the IP address of the machine you want to be your etcd server (often same as master)
|
* Record the IP address/hostname of the machine you want to be your etcd server (often same as master, only one)
|
||||||
* Record the IP addresses of the machines you want to be your minions. (master can be a minion)
|
* Record the IP addresses/hostname of the machines you want to be your nodes. (the master can also be a node)
|
||||||
|
|
||||||
Stick the system information into the 'inventory' file.
|
### Configure the inventory file
|
||||||
|
|
||||||
|
Stick the system information gathered above into the 'inventory' file.
|
||||||
|
|
||||||
### Configure your cluster
|
### Configure your cluster
|
||||||
|
|
||||||
@@ -22,24 +24,31 @@ in full detail.
|
|||||||
|
|
||||||
Now run the setup:
|
Now run the setup:
|
||||||
|
|
||||||
$ ansible-playbook -i inventory cluster.yml
|
`$ ./setup.sh`
|
||||||
|
|
||||||
In generel this will work on very recent Fedora, rawhide or F21. Future work to
|
In generel this will work on very recent Fedora, rawhide or F21. Future work to
|
||||||
support RHEL7, CentOS, and possible other distros should be forthcoming.
|
support RHEL7, CentOS, and possible other distros should be forthcoming.
|
||||||
|
|
||||||
### You can just set up certain parts instead of doing it all
|
### You can just set up certain parts instead of doing it all
|
||||||
|
|
||||||
Only the kubernetes daemons:
|
|
||||||
|
|
||||||
$ ansible-playbook -i inventory kubernetes-services.yml
|
|
||||||
|
|
||||||
Only etcd:
|
Only etcd:
|
||||||
|
|
||||||
$ ansible-playbook -i inventory etcd.yml
|
`$ ./setup.sh --tags=etcd`
|
||||||
|
|
||||||
|
Only the kubernetes master:
|
||||||
|
|
||||||
|
`$ ./setup.sh --tags=masters`
|
||||||
|
|
||||||
|
Only the kubernetes nodes:
|
||||||
|
|
||||||
|
`$ ./setup.sh --tags=nodes`
|
||||||
|
|
||||||
|
### You may overwrite the inventory file by doing
|
||||||
|
|
||||||
|
`INVENTORY=myinventory ./setup.sh`
|
||||||
|
|
||||||
Only flannel:
|
Only flannel:
|
||||||
|
|
||||||
$ ansible-playbook -i inventory flannel.yml
|
$ ./setup.sh --tags=flannel
|
||||||
|
|
||||||
|
|
||||||
[]()
|
[]()
|
||||||
|
@@ -1,4 +1,52 @@
|
|||||||
# Set up a whole working cluster!
|
---
|
||||||
- include: etcd.yml
|
- hosts: all
|
||||||
- include: kubernetes-services.yml
|
gather_facts: false
|
||||||
|
sudo: yes
|
||||||
|
roles:
|
||||||
|
- pre-ansible
|
||||||
|
tags:
|
||||||
|
- pre-ansible
|
||||||
|
|
||||||
|
# Install etcd
|
||||||
|
- hosts: etcd
|
||||||
|
sudo: yes
|
||||||
|
roles:
|
||||||
|
- etcd
|
||||||
|
tags:
|
||||||
|
- etcd
|
||||||
|
|
||||||
|
# install flannel
|
||||||
|
- hosts:
|
||||||
|
- etcd
|
||||||
|
- masters
|
||||||
|
- nodes
|
||||||
|
sudo: yes
|
||||||
|
roles:
|
||||||
|
- flannel
|
||||||
|
tags:
|
||||||
|
- flannel
|
||||||
|
|
||||||
|
# install kube master services
|
||||||
|
- hosts: masters
|
||||||
|
sudo: yes
|
||||||
|
roles:
|
||||||
|
- master
|
||||||
|
tags:
|
||||||
|
- masters
|
||||||
|
|
||||||
|
# launch addons, like dns
|
||||||
|
- hosts: masters
|
||||||
|
sudo: yes
|
||||||
|
roles:
|
||||||
|
- kubernetes-addons
|
||||||
|
tags:
|
||||||
|
- addons
|
||||||
|
- dns
|
||||||
|
|
||||||
|
# install kubernetes on the nodes
|
||||||
|
- hosts: nodes
|
||||||
|
sudo: yes
|
||||||
|
roles:
|
||||||
|
- node
|
||||||
|
tags:
|
||||||
|
- nodes
|
||||||
|
@@ -1,6 +0,0 @@
|
|||||||
---
|
|
||||||
- hosts: etcd
|
|
||||||
sudo: yes
|
|
||||||
roles:
|
|
||||||
- common
|
|
||||||
- etcd
|
|
@@ -1,7 +1,11 @@
|
|||||||
|
# Only used for the location to store flannel info in etcd, but may be used
|
||||||
|
# for dns purposes and cluster id purposes in the future.
|
||||||
|
cluster_name: kube.local
|
||||||
|
|
||||||
# Account name of remote user. Ansible will use this user account to ssh into
|
# Account name of remote user. Ansible will use this user account to ssh into
|
||||||
# the managed machines. The user must be able to use sudo without asking
|
# the managed machines. The user must be able to use sudo without asking
|
||||||
# for password unless ansible_sudo_pass is set
|
# for password unless ansible_sudo_pass is set
|
||||||
ansible_ssh_user: root
|
#ansible_ssh_user: root
|
||||||
|
|
||||||
# password for the ansible_ssh_user. If this is unset you will need to set up
|
# password for the ansible_ssh_user. If this is unset you will need to set up
|
||||||
# ssh keys so a password is not needed.
|
# ssh keys so a password is not needed.
|
||||||
@@ -10,8 +14,50 @@ ansible_ssh_user: root
|
|||||||
# If a password is needed to sudo to root that password must be set here
|
# If a password is needed to sudo to root that password must be set here
|
||||||
#ansible_sudo_pass: password
|
#ansible_sudo_pass: password
|
||||||
|
|
||||||
|
# A list of insecure registrys you night need to define
|
||||||
|
insecure_registrys:
|
||||||
|
# - "gcr.io"
|
||||||
|
|
||||||
|
# If you need a proxy for the docker daemon define these here
|
||||||
|
#http_proxy: "http://proxy.example.com:3128"
|
||||||
|
#https_proxy: "http://proxy.example.com:3128"
|
||||||
|
|
||||||
# Kubernetes internal network for services.
|
# Kubernetes internal network for services.
|
||||||
# Kubernetes services will get fake IP addresses from this range.
|
# Kubernetes services will get fake IP addresses from this range.
|
||||||
# This range must not conflict with anything in your infrastructure. These
|
# This range must not conflict with anything in your infrastructure. These
|
||||||
# addresses do not need to be routable and must just be an unused block of space.
|
# addresses do not need to be routable and must just be an unused block of space.
|
||||||
kube_service_addresses: 10.254.0.0/16
|
kube_service_addresses: 10.254.0.0/16
|
||||||
|
|
||||||
|
# Flannel internal network (optional). When flannel is used, it will assign IP
|
||||||
|
# addresses from this range to individual pods.
|
||||||
|
# This network must be unused in your network infrastructure!
|
||||||
|
flannel_subnet: 172.16.0.0
|
||||||
|
|
||||||
|
# Flannel internal network total size (optional). This is the prefix of the
|
||||||
|
# entire flannel overlay network. So the entirety of 172.16.0.0/12 must be
|
||||||
|
# unused in your environment.
|
||||||
|
flannel_prefix: 12
|
||||||
|
|
||||||
|
# Flannel internal network (optional). This is the size allocation that flannel
|
||||||
|
# will give to each node on your network. With these defaults you should have
|
||||||
|
# room for 4096 nodes with 254 pods per node.
|
||||||
|
flannel_host_prefix: 24
|
||||||
|
|
||||||
|
# Turn this varable to 'false' to disable whole DNS configuration.
|
||||||
|
dns_setup: true
|
||||||
|
# How many replicas in the Replication Controller
|
||||||
|
dns_replicas: 1
|
||||||
|
|
||||||
|
# Internal DNS domain name.
|
||||||
|
# This domain must not be used in your network. Services will be discoverable
|
||||||
|
# under <service-name>.<namespace>.<domainname>, e.g.
|
||||||
|
# myservice.default.kube.local
|
||||||
|
dns_domain: kube.local
|
||||||
|
|
||||||
|
# IP address of the DNS server.
|
||||||
|
# Kubernetes will create a pod with several containers, serving as the DNS
|
||||||
|
# server and expose it under this IP address. The IP address must be from
|
||||||
|
# the range specified as kube_service_addresses above.
|
||||||
|
# And this is the IP address you should use as address of the DNS server
|
||||||
|
# in your containers.
|
||||||
|
dns_server: 10.254.0.10
|
||||||
|
@@ -1,10 +1,10 @@
|
|||||||
|
|
||||||
[masters]
|
[masters]
|
||||||
10.0.0.1
|
kube-master-test-01.example.com
|
||||||
|
|
||||||
[etcd]
|
[etcd]
|
||||||
10.0.0.2
|
kube-master-test-01.example.com
|
||||||
|
|
||||||
[minions]
|
[nodes]
|
||||||
10.0.0.3
|
kube-minion-test-01.example.com
|
||||||
10.0.0.4
|
kube-minion-test-02.example.com
|
||||||
10.0.0.5
|
|
||||||
|
@@ -1,17 +0,0 @@
|
|||||||
---
|
|
||||||
- hosts: masters:minions
|
|
||||||
sudo: yes
|
|
||||||
roles:
|
|
||||||
- common
|
|
||||||
|
|
||||||
- hosts: masters
|
|
||||||
sudo: yes
|
|
||||||
roles:
|
|
||||||
- kubernetes
|
|
||||||
- master
|
|
||||||
|
|
||||||
- hosts: minions
|
|
||||||
sudo: yes
|
|
||||||
roles:
|
|
||||||
- kubernetes
|
|
||||||
- minion
|
|
5
contrib/ansible/roles/common/files/virt7-testing.repo
Normal file
5
contrib/ansible/roles/common/files/virt7-testing.repo
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
[virt7-testing]
|
||||||
|
name=virt7-testing
|
||||||
|
baseurl=http://cbs.centos.org/repos/virt7-testing/x86_64/os/
|
||||||
|
enabled=0
|
||||||
|
gpgcheck=0
|
3
contrib/ansible/roles/common/tasks/centos.yml
Normal file
3
contrib/ansible/roles/common/tasks/centos.yml
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
---
|
||||||
|
- name: CentOS | Install Testing centos7 repo for new tool versions
|
||||||
|
copy: src=virt7-testing.repo dest=/etc/yum.repos.d/virt7-testing.repo
|
7
contrib/ansible/roles/common/tasks/fedora-install.yml
Normal file
7
contrib/ansible/roles/common/tasks/fedora-install.yml
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
---
|
||||||
|
- name: Generic | Install Firewalld Python2 Package
|
||||||
|
action: "{{ ansible_pkg_mgr }}"
|
||||||
|
args:
|
||||||
|
name: python-firewall
|
||||||
|
state: latest
|
||||||
|
when: ansible_distribution_major_version|int >= 22
|
@@ -13,6 +13,26 @@
|
|||||||
is_atomic: true
|
is_atomic: true
|
||||||
when: s.stat.exists
|
when: s.stat.exists
|
||||||
|
|
||||||
|
- name: Determine if has rpm
|
||||||
|
stat: path=/usr/bin/rpm
|
||||||
|
register: s
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Init the has_rpm fact
|
||||||
|
set_fact:
|
||||||
|
has_rpm: false
|
||||||
|
|
||||||
|
- name: Set the has_rpm fact
|
||||||
|
set_fact:
|
||||||
|
has_rpm: true
|
||||||
|
when: s.stat.exists
|
||||||
|
|
||||||
# collect information about what packages are installed
|
# collect information about what packages are installed
|
||||||
- include: rpm.yml
|
- include: rpm.yml
|
||||||
when: ansible_pkg_mgr == "yum"
|
when: has_rpm
|
||||||
|
|
||||||
|
- include: centos.yml
|
||||||
|
when: ansible_distribution == "CentOS"
|
||||||
|
|
||||||
|
- include: fedora-install.yml
|
||||||
|
when: not is_atomic and ansible_distribution == "Fedora"
|
||||||
|
@@ -1,5 +1,5 @@
|
|||||||
---
|
---
|
||||||
- name: Determine if firewalld installed
|
- name: RPM | Determine if firewalld installed
|
||||||
command: "rpm -q firewalld"
|
command: "rpm -q firewalld"
|
||||||
register: s
|
register: s
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
1
contrib/ansible/roles/docker/defaults/main.yml
Normal file
1
contrib/ansible/roles/docker/defaults/main.yml
Normal file
@@ -0,0 +1 @@
|
|||||||
|
no_proxy: "localhost,127.0.0.0/8,::1,/var/run/docker.sock"
|
3
contrib/ansible/roles/docker/handlers/main.yml
Normal file
3
contrib/ansible/roles/docker/handlers/main.yml
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
---
|
||||||
|
- name: restart docker
|
||||||
|
service: name=docker state=restarted
|
6
contrib/ansible/roles/docker/tasks/generic-install.yml
Normal file
6
contrib/ansible/roles/docker/tasks/generic-install.yml
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
- name: Generic | Install Docker
|
||||||
|
action: "{{ ansible_pkg_mgr }}"
|
||||||
|
args:
|
||||||
|
name: docker
|
||||||
|
state: latest
|
35
contrib/ansible/roles/docker/tasks/main.yml
Normal file
35
contrib/ansible/roles/docker/tasks/main.yml
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
---
|
||||||
|
- include: generic-install.yml
|
||||||
|
when: not is_atomic
|
||||||
|
|
||||||
|
- name: Turn down docker logging
|
||||||
|
lineinfile: dest=/etc/sysconfig/docker regexp=^OPTIONS= line=OPTIONS="--selinux-enabled --log-level=warn"
|
||||||
|
notify:
|
||||||
|
- restart docker
|
||||||
|
|
||||||
|
- name: Install http_proxy into docker-network
|
||||||
|
lineinfile: dest=/etc/sysconfig/docker-network regexp=^HTTP_PROXY= line=HTTP_PROXY="{{ http_proxy }}"
|
||||||
|
when: http_proxy is defined
|
||||||
|
notify:
|
||||||
|
- restart docker
|
||||||
|
|
||||||
|
- name: Install https_proxy into docker-network
|
||||||
|
lineinfile: dest=/etc/sysconfig/docker-network regexp=^HTTPS_PROXY= line=HTTPS_PROXY="{{ https_proxy }}"
|
||||||
|
when: https_proxy is defined
|
||||||
|
notify:
|
||||||
|
- restart docker
|
||||||
|
|
||||||
|
- name: Install no-proxy into docker-network
|
||||||
|
lineinfile: dest=/etc/sysconfig/docker-network regexp=^NO_PROXY= line=NO_PROXY="{{ no_proxy }}"
|
||||||
|
when: no_proxy is defined
|
||||||
|
notify:
|
||||||
|
- restart docker
|
||||||
|
|
||||||
|
- name: Add any insecure registrys to docker config
|
||||||
|
lineinfile: dest=/etc/sysconfig/docker regexp=^INSECURE_REGISTRY= line=INSECURE_REGISTRY='{% for reg in insecure_registrys %}--insecure-registry="{{ reg }}" {% endfor %}'
|
||||||
|
when: insecure_registrys is defined and insecure_registrys > 0
|
||||||
|
notify:
|
||||||
|
- restart docker
|
||||||
|
|
||||||
|
- name: Enable Docker
|
||||||
|
service: name=docker enabled=yes state=started
|
@@ -1,6 +1,7 @@
|
|||||||
---
|
---
|
||||||
- name: restart etcd
|
- name: restart etcd
|
||||||
service: name=etcd state=restarted
|
service: name=etcd state=restarted
|
||||||
|
when: etcd_started.changed == false
|
||||||
|
|
||||||
- name: Save iptables rules
|
- name: Save iptables rules
|
||||||
command: service iptables save
|
command: service iptables save
|
||||||
|
3
contrib/ansible/roles/etcd/meta/main.yml
Normal file
3
contrib/ansible/roles/etcd/meta/main.yml
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
---
|
||||||
|
dependencies:
|
||||||
|
- { role: common }
|
@@ -1,16 +1,16 @@
|
|||||||
---
|
---
|
||||||
- name: Open firewalld port for etcd
|
- name: Open firewalld port for etcd
|
||||||
firewalld: port={{ item }}/tcp permanent=false state=enabled
|
firewalld: port={{ item }}/tcp permanent=false state=enabled
|
||||||
# in case this is also a minion where firewalld turned off
|
# in case this is also a node where firewalld turned off
|
||||||
ignore_errors: yes
|
ignore_errors: yes
|
||||||
with_items:
|
with_items:
|
||||||
- 2379
|
- 2379
|
||||||
- 2380
|
- 2380
|
||||||
|
|
||||||
- name: Save firewalld port for etcd
|
- name: Save firewalld port for etcd
|
||||||
firewalld: port={{ item }}/tcp permanent=true state=enabled
|
firewalld: port={{ item }}/tcp permanent=true state=enabled
|
||||||
# in case this is also a minion where firewalld turned off
|
# in case this is also a node where firewalld turned off
|
||||||
ignore_errors: yes
|
ignore_errors: yes
|
||||||
with_items:
|
with_items:
|
||||||
- 2379
|
- 2379
|
||||||
- 2380
|
- 2380
|
||||||
|
@@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
- name: Get iptables rules
|
- name: Get iptables rules
|
||||||
shell: iptables -L
|
command: iptables -L
|
||||||
register: iptablesrules
|
register: iptablesrules
|
||||||
always_run: yes
|
always_run: yes
|
||||||
|
|
||||||
@@ -11,7 +11,7 @@
|
|||||||
command: /sbin/iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "etcd"
|
command: /sbin/iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "etcd"
|
||||||
when: etcd not in iptablesrules.stdout
|
when: etcd not in iptablesrules.stdout
|
||||||
notify:
|
notify:
|
||||||
- Save iptables rules
|
- Save iptables rules
|
||||||
with_items:
|
with_items:
|
||||||
- 2379
|
- 2379
|
||||||
- 2380
|
- 2380
|
||||||
|
@@ -1,17 +1,24 @@
|
|||||||
---
|
---
|
||||||
- name: Install etcd
|
- name: Install etcd
|
||||||
yum: pkg=etcd state=latest
|
action: "{{ ansible_pkg_mgr }}"
|
||||||
|
args:
|
||||||
|
name: etcd
|
||||||
|
state: latest
|
||||||
notify:
|
notify:
|
||||||
- restart etcd
|
- restart etcd
|
||||||
when: not is_atomic
|
when: not is_atomic
|
||||||
|
|
||||||
- name: Write etcd config file
|
- name: Write etcd config file
|
||||||
template: src=etcd.conf.j2 dest=/etc/etcd/etcd.conf
|
template: src=etcd.conf.j2 dest=/etc/etcd/etcd.conf
|
||||||
notify:
|
notify:
|
||||||
- restart etcd
|
- restart etcd
|
||||||
|
|
||||||
- name: Enable etcd
|
- name: Enable etcd
|
||||||
service: name=etcd enabled=yes state=started
|
service: name=etcd enabled=yes
|
||||||
|
|
||||||
|
- name: Start etcd
|
||||||
|
service: name=etcd state=started
|
||||||
|
register: etcd_started
|
||||||
|
|
||||||
- include: firewalld.yml
|
- include: firewalld.yml
|
||||||
when: has_firewalld
|
when: has_firewalld
|
||||||
|
@@ -2,3 +2,4 @@
|
|||||||
ETCD_NAME=default
|
ETCD_NAME=default
|
||||||
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
|
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
|
||||||
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
|
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
|
||||||
|
ETCD_ADVERTISE_CLIENT_URLS="http://0.0.0.0:2379"
|
||||||
|
18
contrib/ansible/roles/flannel/handlers/main.yml
Normal file
18
contrib/ansible/roles/flannel/handlers/main.yml
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
---
|
||||||
|
- name: restart flannel
|
||||||
|
service: name=flanneld state=restarted
|
||||||
|
notify:
|
||||||
|
- stop docker
|
||||||
|
- delete docker0
|
||||||
|
- start docker
|
||||||
|
when: inventory_hostname in groups['nodes']
|
||||||
|
|
||||||
|
- name: stop docker
|
||||||
|
service: name=docker state=stopped
|
||||||
|
|
||||||
|
- name: delete docker0
|
||||||
|
command: ip link delete docker0
|
||||||
|
ignore_errors: yes
|
||||||
|
|
||||||
|
- name: start docker
|
||||||
|
service: name=docker state=started
|
3
contrib/ansible/roles/flannel/meta/main.yml
Normal file
3
contrib/ansible/roles/flannel/meta/main.yml
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
---
|
||||||
|
dependencies:
|
||||||
|
- { role: common }
|
17
contrib/ansible/roles/flannel/tasks/client.yml
Normal file
17
contrib/ansible/roles/flannel/tasks/client.yml
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
---
|
||||||
|
- name: Install flannel
|
||||||
|
action: "{{ ansible_pkg_mgr }}"
|
||||||
|
args:
|
||||||
|
name: flannel
|
||||||
|
state: latest
|
||||||
|
when: not is_atomic
|
||||||
|
|
||||||
|
- name: Install Flannel config file
|
||||||
|
template: src=flanneld.j2 dest=/etc/sysconfig/flanneld
|
||||||
|
notify:
|
||||||
|
- restart flannel
|
||||||
|
|
||||||
|
- name: Launch Flannel
|
||||||
|
service: name=flanneld state=started enabled=yes
|
||||||
|
notify:
|
||||||
|
- restart flannel
|
23
contrib/ansible/roles/flannel/tasks/config.yml
Normal file
23
contrib/ansible/roles/flannel/tasks/config.yml
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
---
|
||||||
|
- name: Set facts about etcdctl command
|
||||||
|
set_fact:
|
||||||
|
peers: "{% for hostname in groups['etcd'] %}http://{{ hostname }}:2379{% if not loop.last %},{% endif %}{% endfor %}"
|
||||||
|
conf_file: "/tmp/flannel-conf.json"
|
||||||
|
conf_loc: "/{{ cluster_name }}/network/config"
|
||||||
|
run_once: true
|
||||||
|
delegate_to: "{{ groups['etcd'][0] }}"
|
||||||
|
|
||||||
|
- name: Create flannel config file to go in etcd
|
||||||
|
template: src=flannel-conf.json.j2 dest={{ conf_file }}
|
||||||
|
run_once: true
|
||||||
|
delegate_to: "{{ groups['etcd'][0] }}"
|
||||||
|
|
||||||
|
- name: Load the flannel config file into etcd
|
||||||
|
shell: "/usr/bin/etcdctl --no-sync --peers={{ peers }} set {{ conf_loc }} < {{ conf_file }}"
|
||||||
|
run_once: true
|
||||||
|
delegate_to: "{{ groups['etcd'][0] }}"
|
||||||
|
|
||||||
|
- name: Clean up the flannel config file
|
||||||
|
file: path=/tmp/flannel-config.json state=absent
|
||||||
|
run_once: true
|
||||||
|
delegate_to: "{{ groups['etcd'][0] }}"
|
5
contrib/ansible/roles/flannel/tasks/main.yml
Normal file
5
contrib/ansible/roles/flannel/tasks/main.yml
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
- include: config.yml
|
||||||
|
|
||||||
|
- include: client.yml
|
||||||
|
when: inventory_hostname in groups['masters'] + groups['nodes']
|
@@ -0,0 +1 @@
|
|||||||
|
{ "Network": "{{ flannel_subnet }}/{{ flannel_prefix }}", "SubnetLen": {{ flannel_host_prefix }}, "Backend": { "Type": "vxlan" } }
|
11
contrib/ansible/roles/flannel/templates/flanneld.j2
Normal file
11
contrib/ansible/roles/flannel/templates/flanneld.j2
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
# Flanneld configuration options
|
||||||
|
|
||||||
|
# etcd url location. Point this to the server where etcd runs
|
||||||
|
FLANNEL_ETCD="{% for node in groups['etcd'] %}http://{{ node }}:2379{% if not loop.last %},{% endif %}{% endfor %}"
|
||||||
|
|
||||||
|
# etcd config key. This is the configuration key that flannel queries
|
||||||
|
# For address range assignment
|
||||||
|
FLANNEL_ETCD_KEY="/{{ cluster_name }}/network"
|
||||||
|
|
||||||
|
# Any additional options that you want to pass
|
||||||
|
#FLANNEL_OPTIONS=""
|
@@ -0,0 +1 @@
|
|||||||
|
local_temp_addon_dir: /tmp/kubernetes/addons
|
445
contrib/ansible/roles/kubernetes-addons/files/kube-addon-update.sh
Executable file
445
contrib/ansible/roles/kubernetes-addons/files/kube-addon-update.sh
Executable file
@@ -0,0 +1,445 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# The business logic for whether a given object should be created
|
||||||
|
# was already enforced by salt, and /etc/kubernetes/addons is the
|
||||||
|
# managed result is of that. Start everything below that directory.
|
||||||
|
|
||||||
|
# Parameters
|
||||||
|
# $1 path to add-ons
|
||||||
|
|
||||||
|
|
||||||
|
# LIMITATIONS
|
||||||
|
# 1. controllers are not updated unless their name is changed
|
||||||
|
# 3. Services will not be updated unless their name is changed,
|
||||||
|
# but for services we acually want updates without name change.
|
||||||
|
# 4. Json files are not handled at all. Currently addons must be
|
||||||
|
# in yaml files
|
||||||
|
# 5. exit code is probably not always correct (I haven't checked
|
||||||
|
# carefully if it works in 100% cases)
|
||||||
|
# 6. There are no unittests
|
||||||
|
# 8. Will not work if the total length of paths to addons is greater than
|
||||||
|
# bash can handle. Probably it is not a problem: ARG_MAX=2097152 on GCE.
|
||||||
|
# 9. Performance issue: yaml files are read many times in a single execution.
|
||||||
|
|
||||||
|
# cosmetic improvements to be done
|
||||||
|
# 1. improve the log function; add timestamp, file name, etc.
|
||||||
|
# 2. logging doesn't work from files that print things out.
|
||||||
|
# 3. kubectl prints the output to stderr (the output should be captured and then
|
||||||
|
# logged)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# global config
|
||||||
|
KUBECTL=${TEST_KUBECTL:-} # substitute for tests
|
||||||
|
KUBECTL=${KUBECTL:-${KUBECTL_BIN:-}}
|
||||||
|
KUBECTL=${KUBECTL:-/usr/local/bin/kubectl}
|
||||||
|
NUM_TRIES_FOR_CREATE=${TEST_NUM_TRIES:-100}
|
||||||
|
DELAY_AFTER_CREATE_ERROR_SEC=${TEST_DELAY_AFTER_ERROR_SEC:=10}
|
||||||
|
NUM_TRIES_FOR_STOP=${TEST_NUM_TRIES:-100}
|
||||||
|
DELAY_AFTER_STOP_ERROR_SEC=${TEST_DELAY_AFTER_ERROR_SEC:=10}
|
||||||
|
|
||||||
|
if [[ ! -x ${KUBECTL} ]]; then
|
||||||
|
echo "ERROR: kubectl command (${KUBECTL}) not found or is not executable" 1>&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
# remember that you can't log from functions that print some output (because
|
||||||
|
# logs are also printed on stdout)
|
||||||
|
# $1 level
|
||||||
|
# $2 message
|
||||||
|
function log() {
|
||||||
|
# manage log levels manually here
|
||||||
|
|
||||||
|
# add the timestamp if you find it useful
|
||||||
|
case $1 in
|
||||||
|
DB3 )
|
||||||
|
# echo "$1: $2"
|
||||||
|
;;
|
||||||
|
DB2 )
|
||||||
|
# echo "$1: $2"
|
||||||
|
;;
|
||||||
|
DBG )
|
||||||
|
# echo "$1: $2"
|
||||||
|
;;
|
||||||
|
INFO )
|
||||||
|
echo "$1: $2"
|
||||||
|
;;
|
||||||
|
WRN )
|
||||||
|
echo "$1: $2"
|
||||||
|
;;
|
||||||
|
ERR )
|
||||||
|
echo "$1: $2"
|
||||||
|
;;
|
||||||
|
* )
|
||||||
|
echo "INVALID_LOG_LEVEL $1: $2"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
#$1 yaml file path
|
||||||
|
function get-object-kind-from-file() {
|
||||||
|
# prints to stdout, so log cannot be used
|
||||||
|
#WARNING: only yaml is supported
|
||||||
|
cat $1 | python -c '''
|
||||||
|
try:
|
||||||
|
import pipes,sys,yaml
|
||||||
|
y = yaml.load(sys.stdin)
|
||||||
|
labels = y["metadata"]["labels"]
|
||||||
|
if ("kubernetes.io/cluster-service", "true") not in labels.iteritems():
|
||||||
|
# all add-ons must have the label "kubernetes.io/cluster-service".
|
||||||
|
# Otherwise we are ignoring them (the update will not work anyway)
|
||||||
|
print "ERROR"
|
||||||
|
else:
|
||||||
|
print y["kind"]
|
||||||
|
except Exception, ex:
|
||||||
|
print "ERROR"
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
|
||||||
|
# $1 yaml file path
|
||||||
|
function get-object-name-from-file() {
|
||||||
|
# prints to stdout, so log cannot be used
|
||||||
|
#WARNING: only yaml is supported
|
||||||
|
cat $1 | python -c '''
|
||||||
|
try:
|
||||||
|
import pipes,sys,yaml
|
||||||
|
y = yaml.load(sys.stdin)
|
||||||
|
labels = y["metadata"]["labels"]
|
||||||
|
if ("kubernetes.io/cluster-service", "true") not in labels.iteritems():
|
||||||
|
# all add-ons must have the label "kubernetes.io/cluster-service".
|
||||||
|
# Otherwise we are ignoring them (the update will not work anyway)
|
||||||
|
print "ERROR"
|
||||||
|
else:
|
||||||
|
print y["metadata"]["name"]
|
||||||
|
except Exception, ex:
|
||||||
|
print "ERROR"
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
|
||||||
|
# $1 addon directory path
|
||||||
|
# $2 addon type (e.g. ReplicationController)
|
||||||
|
# echoes the string with paths to files containing addon for the given type
|
||||||
|
# works only for yaml files (!) (ignores json files)
|
||||||
|
function get-addons-from-disk() {
|
||||||
|
# prints to stdout, so log cannot be used
|
||||||
|
local -r addon_dir=$1
|
||||||
|
local -r obj_type=$2
|
||||||
|
local kind
|
||||||
|
local file_path
|
||||||
|
for file_path in $(find ${addon_dir} -name \*.yaml); do
|
||||||
|
kind=$(get-object-kind-from-file ${file_path})
|
||||||
|
# WARNING: assumption that the topmost indentation is zero (I'm not sure yaml allows for topmost indentation)
|
||||||
|
if [[ "${kind}" == "${obj_type}" ]]; then
|
||||||
|
echo ${file_path}
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
# waits for all subprocesses
|
||||||
|
# returns 0 if all of them were successful and 1 otherwise
|
||||||
|
function wait-for-jobs() {
|
||||||
|
local rv=0
|
||||||
|
local pid
|
||||||
|
for pid in $(jobs -p); do
|
||||||
|
wait ${pid} || (rv=1; log ERR "error in pid ${pid}")
|
||||||
|
log DB2 "pid ${pid} completed, current error code: ${rv}"
|
||||||
|
done
|
||||||
|
return ${rv}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
function run-until-success() {
|
||||||
|
local -r command=$1
|
||||||
|
local tries=$2
|
||||||
|
local -r delay=$3
|
||||||
|
local -r command_name=$1
|
||||||
|
while [ ${tries} -gt 0 ]; do
|
||||||
|
log DBG "executing: '$command'"
|
||||||
|
# let's give the command as an argument to bash -c, so that we can use
|
||||||
|
# && and || inside the command itself
|
||||||
|
/bin/bash -c "${command}" && \
|
||||||
|
log DB3 "== Successfully executed ${command_name} at $(date -Is) ==" && \
|
||||||
|
return 0
|
||||||
|
let tries=tries-1
|
||||||
|
log INFO "== Failed to execute ${command_name} at $(date -Is). ${tries} tries remaining. =="
|
||||||
|
sleep ${delay}
|
||||||
|
done
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# $1 object type
|
||||||
|
function get-addons-from-server() {
|
||||||
|
local -r obj_type=$1
|
||||||
|
"${KUBECTL}" get "${obj_type}" -o template -t "{{range.items}}{{.metadata.name}} {{end}}" --api-version=v1beta3 -l kubernetes.io/cluster-service=true
|
||||||
|
}
|
||||||
|
|
||||||
|
# returns the characters after the last separator (including)
|
||||||
|
# If the separator is empty or if it doesn't appear in the string,
|
||||||
|
# an empty string is printed
|
||||||
|
# $1 input string
|
||||||
|
# $2 separator (must be single character, or empty)
|
||||||
|
function get-suffix() {
|
||||||
|
# prints to stdout, so log cannot be used
|
||||||
|
local -r input_string=$1
|
||||||
|
local -r separator=$2
|
||||||
|
local suffix
|
||||||
|
|
||||||
|
if [[ "${separator}" == "" ]]; then
|
||||||
|
echo ""
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "${input_string}" == *"${separator}"* ]]; then
|
||||||
|
suffix=$(echo "${input_string}" | rev | cut -d "${separator}" -f1 | rev)
|
||||||
|
echo "${separator}${suffix}"
|
||||||
|
else
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# returns the characters up to the last '-' (without it)
|
||||||
|
# $1 input string
|
||||||
|
# $2 separator
|
||||||
|
function get-basename() {
|
||||||
|
# prints to stdout, so log cannot be used
|
||||||
|
local -r input_string=$1
|
||||||
|
local -r separator=$2
|
||||||
|
local suffix
|
||||||
|
suffix="$(get-suffix ${input_string} ${separator})"
|
||||||
|
# this will strip the suffix (if matches)
|
||||||
|
echo ${input_string%$suffix}
|
||||||
|
}
|
||||||
|
|
||||||
|
function stop-object() {
|
||||||
|
local -r obj_type=$1
|
||||||
|
local -r obj_name=$2
|
||||||
|
log INFO "Stopping ${obj_type} ${obj_name}"
|
||||||
|
run-until-success "${KUBECTL} stop ${obj_type} ${obj_name}" ${NUM_TRIES_FOR_STOP} ${DELAY_AFTER_STOP_ERROR_SEC}
|
||||||
|
}
|
||||||
|
|
||||||
|
function create-object() {
|
||||||
|
local -r obj_type=$1
|
||||||
|
local -r file_path=$2
|
||||||
|
log INFO "Creating new ${obj_type} from file ${file_path}"
|
||||||
|
run-until-success "${KUBECTL} create -f ${file_path}" ${NUM_TRIES_FOR_CREATE} ${DELAY_AFTER_CREATE_ERROR_SEC}
|
||||||
|
}
|
||||||
|
|
||||||
|
function update-object() {
|
||||||
|
local -r obj_type=$1
|
||||||
|
local -r obj_name=$2
|
||||||
|
local -r file_path=$3
|
||||||
|
log INFO "updating the ${obj_type} ${obj_name} with the new definition ${file_path}"
|
||||||
|
stop-object ${obj_type} ${obj_name}
|
||||||
|
create-object ${obj_type} ${file_path}
|
||||||
|
}
|
||||||
|
|
||||||
|
# deletes the objects from the server
|
||||||
|
# $1 object type
|
||||||
|
# $2 a list of object names
|
||||||
|
function stop-objects() {
|
||||||
|
local -r obj_type=$1
|
||||||
|
local -r obj_names=$2
|
||||||
|
local obj_name
|
||||||
|
for obj_name in ${obj_names}; do
|
||||||
|
stop-object ${obj_type} ${obj_names} &
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
# creates objects from the given files
|
||||||
|
# $1 object type
|
||||||
|
# $2 a list of paths to definition files
|
||||||
|
function create-objects() {
|
||||||
|
local -r obj_type=$1
|
||||||
|
local -r file_paths=$2
|
||||||
|
local file_path
|
||||||
|
for file_path in ${file_paths}; do
|
||||||
|
create-object ${obj_type} ${file_path} &
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
# updates objects
|
||||||
|
# $1 object type
|
||||||
|
# $2 a list of update specifications
|
||||||
|
# each update specification is a ';' separated pair: <object name>;<file path>
|
||||||
|
function update-objects() {
|
||||||
|
local -r obj_type=$1 # ignored
|
||||||
|
local -r update_spec=$2
|
||||||
|
local objdesc
|
||||||
|
for objdesc in ${update_spec}; do
|
||||||
|
IFS=';' read -a array <<< ${objdesc}
|
||||||
|
update-object ${obj_type} ${array[0]} ${array[1]} &
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
# Global variables set by function match-objects.
|
||||||
|
for_delete="" # a list of object names to be deleted
|
||||||
|
for_update="" # a list of pairs <obj_name>;<filePath> for objects that should be updated
|
||||||
|
for_ignore="" # a list of object nanes that can be ignored
|
||||||
|
new_files="" # a list of file paths that weren't matched by any existing objects (these objects must be created now)
|
||||||
|
|
||||||
|
|
||||||
|
# $1 path to files with objects
|
||||||
|
# $2 object type in the API (ReplicationController or Service)
|
||||||
|
# $3 name separator (single character or empty)
|
||||||
|
function match-objects() {
|
||||||
|
local -r addon_dir=$1
|
||||||
|
local -r obj_type=$2
|
||||||
|
local -r separator=$3
|
||||||
|
|
||||||
|
# output variables (globals)
|
||||||
|
for_delete=""
|
||||||
|
for_update=""
|
||||||
|
for_ignore=""
|
||||||
|
new_files=""
|
||||||
|
|
||||||
|
addon_names_on_server=$(get-addons-from-server "${obj_type}")
|
||||||
|
addon_paths_in_files=$(get-addons-from-disk "${addon_dir}" "${obj_type}")
|
||||||
|
|
||||||
|
log DB2 "addon_names_on_server=${addon_names_on_server}"
|
||||||
|
log DB2 "addon_paths_in_files=${addon_paths_in_files}"
|
||||||
|
|
||||||
|
local matched_files=""
|
||||||
|
|
||||||
|
local basename_on_server=""
|
||||||
|
local name_on_server=""
|
||||||
|
local suffix_on_server=""
|
||||||
|
local name_from_file=""
|
||||||
|
local suffix_from_file=""
|
||||||
|
local found=0
|
||||||
|
local addon_path=""
|
||||||
|
|
||||||
|
for name_on_server in ${addon_names_on_server}; do
|
||||||
|
basename_on_server=$(get-basename ${name_on_server} ${separator})
|
||||||
|
suffix_on_server="$(get-suffix ${name_on_server} ${separator})"
|
||||||
|
|
||||||
|
log DB3 "Found existing addon ${name_on_server}, basename=${basename_on_server}"
|
||||||
|
|
||||||
|
# check if the addon is present in the directory and decide
|
||||||
|
# what to do with it
|
||||||
|
# this is not optimal because we're reading the files over and over
|
||||||
|
# again. But for small number of addons it doesn't matter so much.
|
||||||
|
found=0
|
||||||
|
for addon_path in ${addon_paths_in_files}; do
|
||||||
|
name_from_file=$(get-object-name-from-file ${addon_path})
|
||||||
|
if [[ "${name_from_file}" == "ERROR" ]]; then
|
||||||
|
log INFO "Cannot read object name from ${addon_path}. Ignoring"
|
||||||
|
continue
|
||||||
|
else
|
||||||
|
log DB2 "Found object name '${name_from_file}' in file ${addon_path}"
|
||||||
|
fi
|
||||||
|
suffix_from_file="$(get-suffix ${name_from_file} ${separator})"
|
||||||
|
|
||||||
|
log DB3 "matching: ${basename_on_server}${suffix_from_file} == ${name_from_file}"
|
||||||
|
if [[ "${basename_on_server}${suffix_from_file}" == "${name_from_file}" ]]; then
|
||||||
|
log DB3 "matched existing ${obj_type} ${name_on_server} to file ${addon_path}; suffix_on_server=${suffix_on_server}, suffix_from_file=${suffix_from_file}"
|
||||||
|
found=1
|
||||||
|
matched_files="${matched_files} ${addon_path}"
|
||||||
|
if [[ "${suffix_on_server}" == "${suffix_from_file}" ]]; then
|
||||||
|
for_ignore="${for_ignore} ${name_from_file}"
|
||||||
|
else
|
||||||
|
for_update="${for_update} ${name_on_server};${addon_path}"
|
||||||
|
fi
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
if [[ ${found} -eq 0 ]]; then
|
||||||
|
log DB2 "No definition file found for replication controller ${name_on_server}. Scheduling for deletion"
|
||||||
|
for_delete="${for_delete} ${name_on_server}"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
log DB3 "matched_files=${matched_files}"
|
||||||
|
|
||||||
|
for addon_path in ${addon_paths_in_files}; do
|
||||||
|
echo ${matched_files} | grep "${addon_path}" >/dev/null
|
||||||
|
if [[ $? -ne 0 ]]; then
|
||||||
|
new_files="${new_files} ${addon_path}"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
function reconcile-objects() {
|
||||||
|
local -r addon_path=$1
|
||||||
|
local -r obj_type=$2
|
||||||
|
local -r separator=$3 # name separator
|
||||||
|
match-objects ${addon_path} ${obj_type} ${separator}
|
||||||
|
|
||||||
|
log DBG "${obj_type}: for_delete=${for_delete}"
|
||||||
|
log DBG "${obj_type}: for_update=${for_update}"
|
||||||
|
log DBG "${obj_type}: for_ignore=${for_ignore}"
|
||||||
|
log DBG "${obj_type}: new_files=${new_files}"
|
||||||
|
|
||||||
|
stop-objects "${obj_type}" "${for_delete}"
|
||||||
|
# wait for jobs below is a protection against changing the basename
|
||||||
|
# of a replication controllerm without changing the selector.
|
||||||
|
# If we don't wait, the new rc may be created before the old one is deleted
|
||||||
|
# In such case the old one will wait for all its pods to be gone, but the pods
|
||||||
|
# are created by the new replication controller.
|
||||||
|
# passing --cascade=false could solve the problem, but we want
|
||||||
|
# all orphan pods to be deleted.
|
||||||
|
wait-for-jobs
|
||||||
|
stopResult=$?
|
||||||
|
|
||||||
|
create-objects "${obj_type}" "${new_files}"
|
||||||
|
update-objects "${obj_type}" "${for_update}"
|
||||||
|
|
||||||
|
local obj
|
||||||
|
for obj in ${for_ignore}; do
|
||||||
|
log DB2 "The ${obj_type} ${obj} is already up to date"
|
||||||
|
done
|
||||||
|
|
||||||
|
wait-for-jobs
|
||||||
|
createUpdateResult=$?
|
||||||
|
|
||||||
|
if [[ ${stopResult} -eq 0 ]] && [[ ${createUpdateResult} -eq 0 ]]; then
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function update-addons() {
|
||||||
|
local -r addon_path=$1
|
||||||
|
# be careful, reconcile-objects uses global variables
|
||||||
|
reconcile-objects ${addon_path} ReplicationController "-" &
|
||||||
|
|
||||||
|
# We don't expect service names to be versioned, so
|
||||||
|
# we match entire name, ignoring version suffix.
|
||||||
|
# That's why we pass an empty string as the version separator.
|
||||||
|
# If the service description differs on disk, the service should be recreated.
|
||||||
|
# This is not implemented in this version.
|
||||||
|
reconcile-objects ${addon_path} Service "" &
|
||||||
|
|
||||||
|
wait-for-jobs
|
||||||
|
if [[ $? -eq 0 ]]; then
|
||||||
|
log INFO "== Kubernetes addon update completed successfully at $(date -Is) =="
|
||||||
|
else
|
||||||
|
log WRN "== Kubernetes addon update completed with errors at $(date -Is) =="
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
if [[ $# -ne 1 ]]; then
|
||||||
|
echo "Illegal number of parameters" 1>&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
addon_path=$1
|
||||||
|
update-addons ${addon_path}
|
||||||
|
|
178
contrib/ansible/roles/kubernetes-addons/files/kube-addons.sh
Normal file
178
contrib/ansible/roles/kubernetes-addons/files/kube-addons.sh
Normal file
@@ -0,0 +1,178 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# The business logic for whether a given object should be created
|
||||||
|
# was already enforced by salt, and /etc/kubernetes/addons is the
|
||||||
|
# managed result is of that. Start everything below that directory.
|
||||||
|
KUBECTL=${KUBECTL_BIN:-/usr/local/bin/kubectl}
|
||||||
|
|
||||||
|
ADDON_CHECK_INTERVAL_SEC=${TEST_ADDON_CHECK_INTERVAL_SEC:-600}
|
||||||
|
|
||||||
|
token_dir=${TOKEN_DIR:-/srv/kubernetes}
|
||||||
|
|
||||||
|
function create-kubeconfig-secret() {
|
||||||
|
local -r token=$1
|
||||||
|
local -r username=$2
|
||||||
|
local -r server=$3
|
||||||
|
local -r safe_username=$(tr -s ':_' '--' <<< "${username}")
|
||||||
|
|
||||||
|
# Make a kubeconfig file with the token.
|
||||||
|
if [[ ! -z "${CA_CERT:-}" ]]; then
|
||||||
|
# If the CA cert is available, put it into the secret rather than using
|
||||||
|
# insecure-skip-tls-verify.
|
||||||
|
read -r -d '' kubeconfig <<EOF
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Config
|
||||||
|
users:
|
||||||
|
- name: ${username}
|
||||||
|
user:
|
||||||
|
token: ${token}
|
||||||
|
clusters:
|
||||||
|
- name: local
|
||||||
|
cluster:
|
||||||
|
server: ${server}
|
||||||
|
certificate-authority-data: ${CA_CERT}
|
||||||
|
contexts:
|
||||||
|
- context:
|
||||||
|
cluster: local
|
||||||
|
user: ${username}
|
||||||
|
name: service-account-context
|
||||||
|
current-context: service-account-context
|
||||||
|
EOF
|
||||||
|
else
|
||||||
|
read -r -d '' kubeconfig <<EOF
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Config
|
||||||
|
users:
|
||||||
|
- name: ${username}
|
||||||
|
user:
|
||||||
|
token: ${token}
|
||||||
|
clusters:
|
||||||
|
- name: local
|
||||||
|
cluster:
|
||||||
|
server: ${server}
|
||||||
|
insecure-skip-tls-verify: true
|
||||||
|
contexts:
|
||||||
|
- context:
|
||||||
|
cluster: local
|
||||||
|
user: ${username}
|
||||||
|
name: service-account-context
|
||||||
|
current-context: service-account-context
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
|
||||||
|
local -r kubeconfig_base64=$(echo "${kubeconfig}" | base64 -w0)
|
||||||
|
read -r -d '' secretyaml <<EOF
|
||||||
|
apiVersion: v1beta3
|
||||||
|
data:
|
||||||
|
kubeconfig: ${kubeconfig_base64}
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
name: token-${safe_username}
|
||||||
|
type: Opaque
|
||||||
|
EOF
|
||||||
|
create-resource-from-string "${secretyaml}" 100 10 "Secret-for-token-for-user-${username}" &
|
||||||
|
# TODO: label the secrets with special label so kubectl does not show these?
|
||||||
|
}
|
||||||
|
|
||||||
|
# $1 filename of addon to start.
|
||||||
|
# $2 count of tries to start the addon.
|
||||||
|
# $3 delay in seconds between two consecutive tries
|
||||||
|
function start_addon() {
|
||||||
|
local -r addon_filename=$1;
|
||||||
|
local -r tries=$2;
|
||||||
|
local -r delay=$3;
|
||||||
|
|
||||||
|
create-resource-from-string "$(cat ${addon_filename})" "${tries}" "${delay}" "${addon_filename}"
|
||||||
|
}
|
||||||
|
|
||||||
|
# $1 string with json or yaml.
|
||||||
|
# $2 count of tries to start the addon.
|
||||||
|
# $3 delay in seconds between two consecutive tries
|
||||||
|
# $3 name of this object to use when logging about it.
|
||||||
|
function create-resource-from-string() {
|
||||||
|
local -r config_string=$1;
|
||||||
|
local tries=$2;
|
||||||
|
local -r delay=$3;
|
||||||
|
local -r config_name=$4;
|
||||||
|
while [ ${tries} -gt 0 ]; do
|
||||||
|
echo "${config_string}" | ${KUBECTL} create -f - && \
|
||||||
|
echo "== Successfully started ${config_name} at $(date -Is)" && \
|
||||||
|
return 0;
|
||||||
|
let tries=tries-1;
|
||||||
|
echo "== Failed to start ${config_name} at $(date -Is). ${tries} tries remaining. =="
|
||||||
|
sleep ${delay};
|
||||||
|
done
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
# The business logic for whether a given object should be created
|
||||||
|
# was already enforced by salt, and /etc/kubernetes/addons is the
|
||||||
|
# managed result is of that. Start everything below that directory.
|
||||||
|
echo "== Kubernetes addon manager started at $(date -Is) with ADDON_CHECK_INTERVAL_SEC=${ADDON_CHECK_INTERVAL_SEC}=="
|
||||||
|
|
||||||
|
# Load the kube-env, which has all the environment variables we care
|
||||||
|
# about, in a flat yaml format.
|
||||||
|
kube_env_yaml="/var/cache/kubernetes-install/kube_env.yaml"
|
||||||
|
if [ ! -e "${kubelet_kubeconfig_file}" ]; then
|
||||||
|
eval $(python -c '''
|
||||||
|
import pipes,sys,yaml
|
||||||
|
|
||||||
|
for k,v in yaml.load(sys.stdin).iteritems():
|
||||||
|
print "readonly {var}={value}".format(var = k, value = pipes.quote(str(v)))
|
||||||
|
''' < "${kube_env_yaml}")
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Generate secrets for "internal service accounts".
|
||||||
|
# TODO(etune): move to a completely yaml/object based
|
||||||
|
# workflow so that service accounts can be created
|
||||||
|
# at the same time as the services that use them.
|
||||||
|
# NOTE: needs to run as root to read this file.
|
||||||
|
# Read each line in the csv file of tokens.
|
||||||
|
# Expect errors when the script is started again.
|
||||||
|
while read line; do
|
||||||
|
# Split each line into the token and username.
|
||||||
|
IFS=',' read -a parts <<< "${line}"
|
||||||
|
token=${parts[0]}
|
||||||
|
username=${parts[1]}
|
||||||
|
# DNS is special, since it's necessary for cluster bootstrapping.
|
||||||
|
if [[ "${username}" == "system:dns" ]] && [[ ! -z "${KUBERNETES_MASTER_NAME:-}" ]]; then
|
||||||
|
create-kubeconfig-secret "${token}" "${username}" "https://${KUBERNETES_MASTER_NAME}"
|
||||||
|
else
|
||||||
|
# Set the server to https://kubernetes. Pods/components that
|
||||||
|
# do not have DNS available will have to override the server.
|
||||||
|
create-kubeconfig-secret "${token}" "${username}" "https://kubernetes"
|
||||||
|
fi
|
||||||
|
done < ${token_dir}/known_tokens.csv
|
||||||
|
|
||||||
|
# Create admission_control objects if defined before any other addon services. If the limits
|
||||||
|
# are defined in a namespace other than default, we should still create the limits for the
|
||||||
|
# default namespace.
|
||||||
|
for obj in $(find /etc/kubernetes/admission-controls \( -name \*.yaml -o -name \*.json \)); do
|
||||||
|
start_addon ${obj} 100 10 &
|
||||||
|
echo "++ obj ${obj} is created ++"
|
||||||
|
done
|
||||||
|
|
||||||
|
# Check if the configuration has changed recently - in case the user
|
||||||
|
# created/updated/deleted the files on the master.
|
||||||
|
while true; do
|
||||||
|
#kube-addon-update.sh must be deployed in the same directory as this file
|
||||||
|
`dirname $0`/kube-addon-update.sh /etc/kubernetes/addons
|
||||||
|
sleep $ADDON_CHECK_INTERVAL_SEC
|
||||||
|
done
|
||||||
|
|
||||||
|
|
||||||
|
|
@@ -0,0 +1,30 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
token_dir=${TOKEN_DIR:-/var/srv/kubernetes}
|
||||||
|
token_file="${token_dir}/known_tokens.csv"
|
||||||
|
|
||||||
|
create_accounts=($@)
|
||||||
|
|
||||||
|
touch "${token_file}"
|
||||||
|
for account in "${create_accounts[@]}"; do
|
||||||
|
if grep "${account}" "${token_file}" ; then
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
token=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||||
|
echo "${token},${account},${account}" >> "${token_file}"
|
||||||
|
echo "Added ${account}"
|
||||||
|
done
|
@@ -0,0 +1,8 @@
|
|||||||
|
---
|
||||||
|
- name: reload and restart kube-addons
|
||||||
|
command: systemctl daemon-reload
|
||||||
|
notify:
|
||||||
|
- restart kube-addons
|
||||||
|
|
||||||
|
- name: restart kube-addons
|
||||||
|
service: name=kube-addons state=restarted
|
3
contrib/ansible/roles/kubernetes-addons/meta/main.yml
Normal file
3
contrib/ansible/roles/kubernetes-addons/meta/main.yml
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
---
|
||||||
|
dependencies:
|
||||||
|
- { role: master }
|
55
contrib/ansible/roles/kubernetes-addons/tasks/dns.yml
Normal file
55
contrib/ansible/roles/kubernetes-addons/tasks/dns.yml
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
---
|
||||||
|
- name: DNS | Assures {{ kube_config_dir }}/addons/dns dir exists
|
||||||
|
file: path={{ kube_config_dir }}/addons/dns state=directory
|
||||||
|
|
||||||
|
- name: DNS | Assures local dns addon dir exists
|
||||||
|
local_action: file
|
||||||
|
path={{ local_temp_addon_dir }}/dns
|
||||||
|
state=directory
|
||||||
|
sudo: no
|
||||||
|
|
||||||
|
- name: DNS | Download skydns-rc.yaml file from Kubernetes repo
|
||||||
|
local_action: get_url
|
||||||
|
url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/addons/dns/skydns-rc.yaml.in
|
||||||
|
dest="{{ local_temp_addon_dir }}/dns/skydns-rc.yaml.j2"
|
||||||
|
force=yes
|
||||||
|
sudo: no
|
||||||
|
|
||||||
|
- name: DNS | Convert pillar vars to ansible vars for skydns-rc.yaml
|
||||||
|
local_action: replace
|
||||||
|
dest="{{ local_temp_addon_dir }}/dns/skydns-rc.yaml.j2"
|
||||||
|
regexp="pillar\[\'(\w*)\'\]"
|
||||||
|
replace="\1"
|
||||||
|
sudo: no
|
||||||
|
|
||||||
|
- name: DNS | Install Template from converted saltfile
|
||||||
|
template:
|
||||||
|
args:
|
||||||
|
src: "{{ local_temp_addon_dir }}/dns/skydns-rc.yaml.j2"
|
||||||
|
dest: "{{ kube_config_dir }}/addons/dns/skydns-rc.yaml"
|
||||||
|
mode: 0755
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
|
||||||
|
- name: DNS | Download skydns-svc.yaml file from Kubernetes repo
|
||||||
|
local_action: get_url
|
||||||
|
url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/addons/dns/skydns-svc.yaml.in
|
||||||
|
dest="{{ local_temp_addon_dir }}/dns/skydns-svc.yaml.j2"
|
||||||
|
force=yes
|
||||||
|
sudo: no
|
||||||
|
|
||||||
|
- name: DNS | Convert pillar vars to ansible vars for skydns-rc.yaml
|
||||||
|
local_action: replace
|
||||||
|
dest="{{ local_temp_addon_dir }}/dns/skydns-svc.yaml.j2"
|
||||||
|
regexp="pillar\[\'(\w*)\'\]"
|
||||||
|
replace="\1"
|
||||||
|
sudo: no
|
||||||
|
|
||||||
|
- name: DNS | Install Template from converted saltfile
|
||||||
|
template:
|
||||||
|
args:
|
||||||
|
src: "{{ local_temp_addon_dir }}/dns/skydns-svc.yaml.j2"
|
||||||
|
dest: "{{ kube_config_dir }}/addons/dns/skydns-svc.yaml"
|
||||||
|
mode: 0755
|
||||||
|
owner: root
|
||||||
|
group: root
|
@@ -0,0 +1,5 @@
|
|||||||
|
- name: Install PyYAML
|
||||||
|
action: "{{ ansible_pkg_mgr }}"
|
||||||
|
args:
|
||||||
|
name: PyYAML
|
||||||
|
state: latest
|
57
contrib/ansible/roles/kubernetes-addons/tasks/main.yml
Normal file
57
contrib/ansible/roles/kubernetes-addons/tasks/main.yml
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
---
|
||||||
|
- name: Assures /etc/kubernetes/addons/ dir exists
|
||||||
|
file: path=/etc/kubernetes/addons/ state=directory
|
||||||
|
|
||||||
|
- include: generic-install.yml
|
||||||
|
when: not is_atomic
|
||||||
|
|
||||||
|
- name: Assures local addon dir exists
|
||||||
|
local_action: file
|
||||||
|
path={{ local_temp_addon_dir }}
|
||||||
|
state=directory
|
||||||
|
sudo: no
|
||||||
|
|
||||||
|
- include: dns.yml
|
||||||
|
when: dns_setup
|
||||||
|
tags: dns
|
||||||
|
|
||||||
|
#- name: Get kube-addons script from Kubernetes
|
||||||
|
# get_url:
|
||||||
|
# url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/saltbase/salt/kube-addons/kube-addons.sh
|
||||||
|
# dest={{ kube_script_dir }}/kube-addons.sh mode=0755
|
||||||
|
# force=yes
|
||||||
|
|
||||||
|
#- name: Get kube-addon-update script from Kubernetes
|
||||||
|
# get_url:
|
||||||
|
# url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/saltbase/salt/kube-addons/kube-addon-update.sh
|
||||||
|
# dest={{ kube_script_dir }}/kube-addon-update.sh mode=0755
|
||||||
|
# force=yes
|
||||||
|
|
||||||
|
- name: HACK | copy local kube-addons.sh
|
||||||
|
copy: src=kube-addons.sh dest={{ kube_script_dir }}/kube-addons.sh mode=0755
|
||||||
|
|
||||||
|
- name: HACK | copy local kube-addon-update.sh
|
||||||
|
copy: src=kube-addon-update.sh dest={{ kube_script_dir }}/kube-addon-update.sh mode=0755
|
||||||
|
|
||||||
|
- name: Copy script to create known_tokens.csv
|
||||||
|
copy: src=kube-gen-token.sh dest={{ kube_script_dir }}/kube-gen-token.sh mode=0755
|
||||||
|
|
||||||
|
- name: Run kube-gen-token script to create {{ kube_config_dir }}/known_tokens.csv
|
||||||
|
command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item }}"
|
||||||
|
environment:
|
||||||
|
TOKEN_DIR: "{{ kube_config_dir }}"
|
||||||
|
with_items:
|
||||||
|
- "system:dns"
|
||||||
|
register: gentoken
|
||||||
|
changed_when: "'Added' in gentoken.stdout"
|
||||||
|
notify:
|
||||||
|
- restart apiserver
|
||||||
|
- restart kube-addons
|
||||||
|
|
||||||
|
- name: Install kube-addons service
|
||||||
|
template: src=kube-addons.service.j2 dest=/etc/systemd/system/kube-addons.service
|
||||||
|
notify:
|
||||||
|
- reload and restart kube-addons
|
||||||
|
|
||||||
|
- name: Enable and start kube addons
|
||||||
|
service: name=kube-addons.service enabled=yes state=started
|
@@ -0,0 +1,12 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=Kubernetes Addon Object Manager
|
||||||
|
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Environment="TOKEN_DIR={{ kube_config_dir }}"
|
||||||
|
Environment="KUBECTL_BIN=/usr/bin/kubectl"
|
||||||
|
Environment="KUBERNETES_MASTER_NAME={{ groups['masters'][0] }}"
|
||||||
|
ExecStart={{ kube_script_dir }}/kube-addons.sh
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
20
contrib/ansible/roles/kubernetes/defaults/main.yml
Normal file
20
contrib/ansible/roles/kubernetes/defaults/main.yml
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
# This directory is where all the additional scripts go
|
||||||
|
# that Kubernetes normally puts in /srv/kubernetes.
|
||||||
|
# This puts them in a sane location
|
||||||
|
kube_script_dir: /usr/libexec/kubernetes
|
||||||
|
|
||||||
|
# This directory is where all the additional config stuff goes
|
||||||
|
# the kubernetes normally puts in /srv/kubernets.
|
||||||
|
# This puts them in a sane location.
|
||||||
|
# Editting this value will almost surely break something. Don't
|
||||||
|
# change it. Things like the systemd scripts are hard coded to
|
||||||
|
# look in here. Don't do it.
|
||||||
|
kube_config_dir: /etc/kubernetes
|
||||||
|
|
||||||
|
# This is where all the cert scripts and certs will be located
|
||||||
|
kube_cert_dir: "{{ kube_config_dir }}/certs"
|
||||||
|
|
||||||
|
|
||||||
|
# This is the group that the cert creation scripts chgrp the
|
||||||
|
# cert files to. Not really changable...
|
||||||
|
kube_cert_group: kube-cert
|
80
contrib/ansible/roles/kubernetes/files/make-ca-cert.sh
Executable file
80
contrib/ansible/roles/kubernetes/files/make-ca-cert.sh
Executable file
@@ -0,0 +1,80 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
set -o errexit
|
||||||
|
set -o nounset
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
cert_ip=$1
|
||||||
|
cert_dir=${CERT_DIR:-/srv/kubernetes}
|
||||||
|
cert_group=${CERT_GROUP:-kube-cert}
|
||||||
|
|
||||||
|
mkdir -p "$cert_dir"
|
||||||
|
|
||||||
|
use_cn=false
|
||||||
|
|
||||||
|
# TODO: Add support for discovery on other providers?
|
||||||
|
if [ "$cert_ip" == "_use_gce_external_ip_" ]; then
|
||||||
|
cert_ip=$(curl -s -H Metadata-Flavor:Google http://metadata.google.internal./computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip)
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$cert_ip" == "_use_aws_external_ip_" ]; then
|
||||||
|
cert_ip=$(curl -s http://169.254.169.254/latest/meta-data/public-ipv4)
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$cert_ip" == "_use_azure_dns_name_" ]; then
|
||||||
|
cert_ip=$(uname -n | awk -F. '{ print $2 }').cloudapp.net
|
||||||
|
use_cn=true
|
||||||
|
fi
|
||||||
|
|
||||||
|
tmpdir=$(mktemp -d -t kubernetes_cacert.XXXXXX)
|
||||||
|
trap 'rm -rf "${tmpdir}"' EXIT
|
||||||
|
cd "${tmpdir}"
|
||||||
|
|
||||||
|
# TODO: For now, this is a patched tool that makes subject-alt-name work, when
|
||||||
|
# the fix is upstream move back to the upstream easyrsa. This is cached in GCS
|
||||||
|
# but is originally taken from:
|
||||||
|
# https://github.com/brendandburns/easy-rsa/archive/master.tar.gz
|
||||||
|
#
|
||||||
|
# To update, do the following:
|
||||||
|
# curl -o easy-rsa.tar.gz https://github.com/brendandburns/easy-rsa/archive/master.tar.gz
|
||||||
|
# gsutil cp easy-rsa.tar.gz gs://kubernetes-release/easy-rsa/easy-rsa.tar.gz
|
||||||
|
# gsutil acl ch -R -g all:R gs://kubernetes-release/easy-rsa/easy-rsa.tar.gz
|
||||||
|
#
|
||||||
|
# Due to GCS caching of public objects, it may take time for this to be widely
|
||||||
|
# distributed.
|
||||||
|
curl -L -O https://storage.googleapis.com/kubernetes-release/easy-rsa/easy-rsa.tar.gz > /dev/null 2>&1
|
||||||
|
tar xzf easy-rsa.tar.gz > /dev/null 2>&1
|
||||||
|
|
||||||
|
cd easy-rsa-master/easyrsa3
|
||||||
|
./easyrsa init-pki > /dev/null 2>&1
|
||||||
|
./easyrsa --batch "--req-cn=$cert_ip@`date +%s`" build-ca nopass > /dev/null 2>&1
|
||||||
|
if [ $use_cn = "true" ]; then
|
||||||
|
./easyrsa build-server-full $cert_ip nopass > /dev/null 2>&1
|
||||||
|
cp -p pki/issued/$cert_ip.crt "${cert_dir}/server.cert" > /dev/null 2>&1
|
||||||
|
cp -p pki/private/$cert_ip.key "${cert_dir}/server.key" > /dev/null 2>&1
|
||||||
|
else
|
||||||
|
./easyrsa --subject-alt-name=IP:$cert_ip build-server-full kubernetes-master nopass > /dev/null 2>&1
|
||||||
|
cp -p pki/issued/kubernetes-master.crt "${cert_dir}/server.cert" > /dev/null 2>&1
|
||||||
|
cp -p pki/private/kubernetes-master.key "${cert_dir}/server.key" > /dev/null 2>&1
|
||||||
|
fi
|
||||||
|
./easyrsa build-client-full kubecfg nopass > /dev/null 2>&1
|
||||||
|
cp -p pki/ca.crt "${cert_dir}/ca.crt"
|
||||||
|
cp -p pki/issued/kubecfg.crt "${cert_dir}/kubecfg.crt"
|
||||||
|
cp -p pki/private/kubecfg.key "${cert_dir}/kubecfg.key"
|
||||||
|
# Make server certs accessible to apiserver.
|
||||||
|
chgrp $cert_group "${cert_dir}/server.key" "${cert_dir}/server.cert" "${cert_dir}/ca.crt"
|
||||||
|
chmod 660 "${cert_dir}/server.key" "${cert_dir}/server.cert" "${cert_dir}/ca.crt"
|
26
contrib/ansible/roles/kubernetes/files/make-cert.sh
Executable file
26
contrib/ansible/roles/kubernetes/files/make-cert.sh
Executable file
@@ -0,0 +1,26 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
cert_dir=${CERT_DIR:-/srv/kubernetes}
|
||||||
|
cert_group=${CERT_GROUP:-kube-cert}
|
||||||
|
|
||||||
|
mkdir -p "$cert_dir"
|
||||||
|
|
||||||
|
openssl req -new -newkey rsa:4096 -days 365 -nodes -x509 \
|
||||||
|
-subj "/CN=kubernetes.invalid/O=Kubernetes" \
|
||||||
|
-keyout "${cert_dir}/server.key" -out "${cert_dir}/server.cert"
|
||||||
|
chgrp $cert_group "${cert_dir}/server.key" "${cert_dir}/server.cert"
|
||||||
|
chmod 660 "${cert_dir}/server.key" "${cert_dir}/server.cert"
|
52
contrib/ansible/roles/kubernetes/tasks/certs.yml
Normal file
52
contrib/ansible/roles/kubernetes/tasks/certs.yml
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
---
|
||||||
|
- name: Create system kube-cert groups
|
||||||
|
group: name={{ kube_cert_group }} state=present system=yes
|
||||||
|
|
||||||
|
- name: Create system kube user
|
||||||
|
user:
|
||||||
|
name=kube
|
||||||
|
comment="Kubernetes user"
|
||||||
|
shell=/sbin/nologin
|
||||||
|
state=present
|
||||||
|
system=yes
|
||||||
|
groups={{ kube_cert_group }}
|
||||||
|
|
||||||
|
- name: make sure the certificate directory exits
|
||||||
|
file:
|
||||||
|
path={{ kube_cert_dir }}
|
||||||
|
state=directory
|
||||||
|
mode=o-rwx
|
||||||
|
group={{ kube_cert_group }}
|
||||||
|
|
||||||
|
- name: Install rsync to push certs around
|
||||||
|
action: "{{ ansible_pkg_mgr }}"
|
||||||
|
args:
|
||||||
|
name: rsync
|
||||||
|
state: latest
|
||||||
|
when: not is_atomic
|
||||||
|
|
||||||
|
- name: Generating RSA key for cert node to push to others
|
||||||
|
user: name=root generate_ssh_key=yes
|
||||||
|
run_once: true
|
||||||
|
delegate_to: "{{ groups['masters'][0] }}"
|
||||||
|
|
||||||
|
- name: Downloading pub key
|
||||||
|
fetch:
|
||||||
|
src=/root/.ssh/id_rsa.pub
|
||||||
|
dest=/tmp/id_rsa.pub
|
||||||
|
flat=yes
|
||||||
|
fail_on_missing=true
|
||||||
|
run_once: true
|
||||||
|
delegate_to: "{{ groups['masters'][0] }}"
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- include: gen_certs.yml
|
||||||
|
when: inventory_hostname == groups['masters'][0]
|
||||||
|
|
||||||
|
- include: place_certs.yml
|
||||||
|
|
||||||
|
- name: Delete the downloaded pub key
|
||||||
|
local_action: file path=/tmp/id_rsa.pub state=absent
|
||||||
|
sudo: false
|
||||||
|
run_once: true
|
||||||
|
changed_when: false
|
@@ -1,4 +1,4 @@
|
|||||||
---
|
---
|
||||||
- name: Remove docker window manager on F20
|
- name: Fedora | Remove docker window manager on F20
|
||||||
yum: pkg=docker state=absent
|
yum: pkg=docker state=absent
|
||||||
when: not is_atomic and ansible_distribution_major_version == "20"
|
when: not is_atomic and ansible_distribution_major_version == "20"
|
||||||
|
49
contrib/ansible/roles/kubernetes/tasks/gen_certs.yml
Normal file
49
contrib/ansible/roles/kubernetes/tasks/gen_certs.yml
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
---
|
||||||
|
#- name: Get create cert script from Kubernetes
|
||||||
|
# get_url:
|
||||||
|
# url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/saltbase/salt/generate-cert/make-cert.sh
|
||||||
|
# dest={{ kube_script_dir }}/make-cert.sh mode=0500
|
||||||
|
# force=yes
|
||||||
|
|
||||||
|
#- name: Get create ca cert script from Kubernetes
|
||||||
|
# get_url:
|
||||||
|
# url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/saltbase/salt/generate-cert/make-ca-cert.sh
|
||||||
|
# dest={{ kube_script_dir }}/make-ca-cert.sh mode=0500
|
||||||
|
# force=yes
|
||||||
|
|
||||||
|
- name: HACK | overwrite make-cert.sh from local copy
|
||||||
|
copy:
|
||||||
|
src=make-cert.sh
|
||||||
|
dest={{ kube_script_dir }}
|
||||||
|
mode=0500
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: HACK | overwrite make-ca-cert.sh from local copy
|
||||||
|
copy:
|
||||||
|
src=make-ca-cert.sh
|
||||||
|
dest={{ kube_script_dir }}
|
||||||
|
mode=0500
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
# FIXME This only generates a cert for one master...
|
||||||
|
- name: Run create cert script on master
|
||||||
|
command:
|
||||||
|
"{{ kube_script_dir }}/make-ca-cert.sh {{ inventory_hostname }}"
|
||||||
|
args:
|
||||||
|
creates: "{{ kube_cert_dir }}/server.cert"
|
||||||
|
environment:
|
||||||
|
CERT_DIR: "{{ kube_cert_dir }}"
|
||||||
|
CERT_GROUP: "{{ kube_cert_group }}"
|
||||||
|
|
||||||
|
- name: Verify certificate permissions
|
||||||
|
file:
|
||||||
|
path={{ item }}
|
||||||
|
group={{ kube_cert_group }}
|
||||||
|
owner=kube
|
||||||
|
mode=0440
|
||||||
|
with_items:
|
||||||
|
- "{{ kube_cert_dir }}/ca.crt"
|
||||||
|
- "{{ kube_cert_dir }}/server.cert"
|
||||||
|
- "{{ kube_cert_dir }}/server.key"
|
||||||
|
- "{{ kube_cert_dir }}/kubecfg.crt"
|
||||||
|
- "{{ kube_cert_dir }}/kubecfg.key"
|
@@ -2,13 +2,22 @@
|
|||||||
- include: fedora.yml
|
- include: fedora.yml
|
||||||
when: ansible_distribution == "Fedora"
|
when: ansible_distribution == "Fedora"
|
||||||
|
|
||||||
- name: Install kubernetes
|
- name: Update {{ kube_script_dir }} if this is atomic
|
||||||
yum: pkg=kubernetes state=latest
|
set_fact:
|
||||||
notify:
|
kube_script_dir: "/usr/local/libexec/kubernretes"
|
||||||
- restart daemons
|
when: is_atomic and kube_script_dir == "/usr/libexec/kubernetes"
|
||||||
when: not is_atomic
|
|
||||||
|
- name: Create kubernetes config directory
|
||||||
|
file: path={{ kube_config_dir }} state=directory
|
||||||
|
|
||||||
|
- name: Create kubernetes script directory
|
||||||
|
file: path={{ kube_script_dir }} state=directory
|
||||||
|
|
||||||
- name: write the global config file
|
- name: write the global config file
|
||||||
template: src=config.j2 dest=/etc/kubernetes/config
|
template: src=config.j2 dest={{ kube_config_dir }}/config
|
||||||
notify:
|
notify:
|
||||||
- restart daemons
|
- restart daemons
|
||||||
|
|
||||||
|
- include: certs.yml
|
||||||
|
tags:
|
||||||
|
certs
|
||||||
|
26
contrib/ansible/roles/kubernetes/tasks/place_certs.yml
Normal file
26
contrib/ansible/roles/kubernetes/tasks/place_certs.yml
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
---
|
||||||
|
- name: place ssh public key on other nodes so apiserver can push certs
|
||||||
|
authorized_key: user=root key="{{ item }}" state=present
|
||||||
|
with_file:
|
||||||
|
- '/tmp/id_rsa.pub'
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Copy certificates directly from the apiserver to nodes
|
||||||
|
synchronize:
|
||||||
|
src={{ kube_cert_dir }}/{{ item }}
|
||||||
|
dest={{ kube_cert_dir }}/{{ item }}
|
||||||
|
rsync_timeout=30
|
||||||
|
set_remote_user=no
|
||||||
|
delegate_to: "{{ groups['masters'][0] }}"
|
||||||
|
with_items:
|
||||||
|
- "ca.crt"
|
||||||
|
- "kubecfg.crt"
|
||||||
|
- "kubecfg.key"
|
||||||
|
notify:
|
||||||
|
- restart daemons
|
||||||
|
|
||||||
|
- name: remove ssh public key so apiserver can not push stuff
|
||||||
|
authorized_key: user=root key="{{ item }}" state=absent
|
||||||
|
with_file:
|
||||||
|
- '/tmp/id_rsa.pub'
|
||||||
|
changed_when: false
|
@@ -20,4 +20,4 @@ KUBE_LOG_LEVEL="--v=0"
|
|||||||
KUBE_ALLOW_PRIV="--allow_privileged=true"
|
KUBE_ALLOW_PRIV="--allow_privileged=true"
|
||||||
|
|
||||||
# How the replication controller, scheduler, and proxy
|
# How the replication controller, scheduler, and proxy
|
||||||
KUBE_MASTER="--master=http://{{ groups['masters'][0] }}:8080"
|
KUBE_MASTER="--master=https://{{ groups['masters'][0] }}:443"
|
||||||
|
@@ -2,9 +2,9 @@
|
|||||||
- name: restart daemons
|
- name: restart daemons
|
||||||
command: /bin/true
|
command: /bin/true
|
||||||
notify:
|
notify:
|
||||||
- restart apiserver
|
- restart apiserver
|
||||||
- restart controller-manager
|
- restart controller-manager
|
||||||
- restart scheduler
|
- restart scheduler
|
||||||
|
|
||||||
- name: restart apiserver
|
- name: restart apiserver
|
||||||
service: name=kube-apiserver state=restarted
|
service: name=kube-apiserver state=restarted
|
||||||
|
4
contrib/ansible/roles/master/meta/main.yml
Normal file
4
contrib/ansible/roles/master/meta/main.yml
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
---
|
||||||
|
dependencies:
|
||||||
|
- { role: common }
|
||||||
|
- { role: kubernetes }
|
8
contrib/ansible/roles/master/tasks/centos.yml
Normal file
8
contrib/ansible/roles/master/tasks/centos.yml
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
---
|
||||||
|
- name: CentOS | Install kubernetes CentOS style
|
||||||
|
yum:
|
||||||
|
pkg=kubernetes-master
|
||||||
|
state=latest
|
||||||
|
enablerepo=virt7-testing
|
||||||
|
notify:
|
||||||
|
- restart daemons
|
@@ -1,10 +1,10 @@
|
|||||||
---
|
---
|
||||||
- name: Open firewalld port for apiserver
|
- name: Open firewalld port for apiserver
|
||||||
firewalld: port=8080/tcp permanent=false state=enabled
|
firewalld: port=443/tcp permanent=false state=enabled
|
||||||
# in case this is also a minion with firewalld turned off
|
# in case this is also a node with firewalld turned off
|
||||||
ignore_errors: yes
|
ignore_errors: yes
|
||||||
|
|
||||||
- name: Save firewalld port for apiserver
|
- name: Save firewalld port for apiserver
|
||||||
firewalld: port=8080/tcp permanent=true state=enabled
|
firewalld: port=443/tcp permanent=true state=enabled
|
||||||
# in case this is also a minion with firewalld turned off
|
# in case this is also a node with firewalld turned off
|
||||||
ignore_errors: yes
|
ignore_errors: yes
|
||||||
|
7
contrib/ansible/roles/master/tasks/generic-install.yml
Normal file
7
contrib/ansible/roles/master/tasks/generic-install.yml
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
- name: Install kubernetes
|
||||||
|
action: "{{ ansible_pkg_mgr }}"
|
||||||
|
args:
|
||||||
|
name: kubernetes-master
|
||||||
|
state: latest
|
||||||
|
notify:
|
||||||
|
- restart daemons
|
@@ -1,14 +1,14 @@
|
|||||||
---
|
---
|
||||||
- name: Get iptables rules
|
- name: Get iptables rules
|
||||||
shell: iptables -L
|
command: iptables -L
|
||||||
register: iptablesrules
|
register: iptablesrules
|
||||||
always_run: yes
|
always_run: yes
|
||||||
|
|
||||||
- name: Open apiserver port with iptables
|
- name: Open apiserver port with iptables
|
||||||
command: /sbin/iptables -I INPUT 1 -p tcp --dport 8080 -j ACCEPT -m comment --comment "kube-apiserver"
|
command: /sbin/iptables -I INPUT 1 -p tcp --dport 443 -j ACCEPT -m comment --comment "kube-apiserver"
|
||||||
when: kube-apiserver not in iptablesrules.stdout
|
when: kube-apiserver not in iptablesrules.stdout
|
||||||
notify:
|
notify:
|
||||||
- restart iptables
|
- restart iptables
|
||||||
|
|
||||||
- name: Save iptables rules
|
- name: Save iptables rules
|
||||||
command: service iptables save
|
command: service iptables save
|
||||||
|
@@ -1,18 +1,42 @@
|
|||||||
---
|
---
|
||||||
|
- include: generic-install.yml
|
||||||
|
when: not is_atomic and not ansible_distribution == "CentOS"
|
||||||
|
|
||||||
|
- include: centos.yml
|
||||||
|
when: not is_atomic and ansible_distribution == "CentOS"
|
||||||
|
|
||||||
- name: write the config file for the api server
|
- name: write the config file for the api server
|
||||||
template: src=apiserver.j2 dest=/etc/kubernetes/apiserver
|
template: src=apiserver.j2 dest={{ kube_config_dir }}/apiserver
|
||||||
notify:
|
notify:
|
||||||
- restart apiserver
|
- restart apiserver
|
||||||
|
|
||||||
|
- name: Ensure that a token auth file exists (addons may populate it)
|
||||||
|
file: path={{kube_config_dir }}/known_tokens.csv state=touch
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
- name: write the config file for the controller-manager
|
- name: write the config file for the controller-manager
|
||||||
copy: src=controller-manager dest=/etc/kubernetes/controller-manager
|
template: src=controller-manager.j2 dest={{ kube_config_dir }}/controller-manager
|
||||||
notify:
|
notify:
|
||||||
- restart controller-manager
|
- restart controller-manager
|
||||||
|
|
||||||
- name: write the config file for the scheduler
|
- name: write the config file for the scheduler
|
||||||
copy: src=scheduler dest=/etc/kubernetes/scheduler
|
template: src=scheduler.j2 dest={{ kube_config_dir }}/scheduler
|
||||||
notify:
|
notify:
|
||||||
- restart scheduler
|
- restart scheduler
|
||||||
|
|
||||||
|
- name: add cap_net_bind_service to kube-apiserver
|
||||||
|
capabilities: path=/usr/bin/kube-apiserver capability=cap_net_bind_service=ep state=present
|
||||||
|
when: not is_atomic
|
||||||
|
|
||||||
|
- name: write the kubecfg (auth) file for controller-manager
|
||||||
|
template: src=controller-manager.kubeconfig.j2 dest={{ kube_config_dir }}/controller-manager.kubeconfig
|
||||||
|
notify:
|
||||||
|
- restart controller-manager
|
||||||
|
|
||||||
|
- name: write the kubecfg (auth) file for scheduler
|
||||||
|
template: src=scheduler.kubeconfig.j2 dest={{ kube_config_dir }}/scheduler.kubeconfig
|
||||||
|
notify:
|
||||||
|
- restart scheduler
|
||||||
|
|
||||||
- name: Enable apiserver
|
- name: Enable apiserver
|
||||||
service: name=kube-apiserver enabled=yes state=started
|
service: name=kube-apiserver enabled=yes state=started
|
||||||
@@ -23,29 +47,6 @@
|
|||||||
- name: Enable scheduler
|
- name: Enable scheduler
|
||||||
service: name=kube-scheduler enabled=yes state=started
|
service: name=kube-scheduler enabled=yes state=started
|
||||||
|
|
||||||
- name: Copy minion definition json files to master
|
|
||||||
template: src=node.j2 dest=/tmp/node-{{ item }}.json
|
|
||||||
changed_when: false
|
|
||||||
with_items:
|
|
||||||
groups['minions']
|
|
||||||
when: inventory_hostname == groups['masters'][0]
|
|
||||||
|
|
||||||
- name: Load minion definition into master
|
|
||||||
command: /usr/bin/kubectl create -f /tmp/node-{{ item }}.json
|
|
||||||
register: command_result
|
|
||||||
failed_when: command_result.rc != 0 and 'already exists' not in command_result.stderr
|
|
||||||
changed_when: "command_result.rc == 0"
|
|
||||||
with_items:
|
|
||||||
groups['minions']
|
|
||||||
when: inventory_hostname == groups['masters'][0]
|
|
||||||
|
|
||||||
- name: Delete minion definitions from master
|
|
||||||
file: path=/tmp/node-{{ item }}.json state=absent
|
|
||||||
changed_when: false
|
|
||||||
with_items:
|
|
||||||
groups['minions']
|
|
||||||
when: inventory_hostname == groups['masters'][0]
|
|
||||||
|
|
||||||
- include: firewalld.yml
|
- include: firewalld.yml
|
||||||
when: has_firewalld
|
when: has_firewalld
|
||||||
|
|
||||||
|
@@ -5,22 +5,22 @@
|
|||||||
#
|
#
|
||||||
|
|
||||||
# The address on the local server to listen to.
|
# The address on the local server to listen to.
|
||||||
KUBE_API_ADDRESS="--address=0.0.0.0"
|
KUBE_API_ADDRESS="--insecure-bind-address=127.0.0.1"
|
||||||
|
|
||||||
# The port on the local server to listen on.
|
# The port on the local server to listen on.
|
||||||
# KUBE_API_PORT="--port=8080"
|
KUBE_API_PORT="--secure-port=443"
|
||||||
|
|
||||||
# Port minions listen on
|
# Port nodes listen on
|
||||||
# KUBELET_PORT="--kubelet_port=10250"
|
# KUBELET_PORT="--kubelet_port=10250"
|
||||||
|
|
||||||
# Address range to use for services
|
# Address range to use for services
|
||||||
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range={{ kube_service_addresses }}"
|
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range={{ kube_service_addresses }}"
|
||||||
|
|
||||||
# Location of the etcd cluster
|
# Location of the etcd cluster
|
||||||
KUBE_ETCD_SERVERS="--etcd_servers=http://{{ groups['etcd'][0] }}:2379"
|
KUBE_ETCD_SERVERS="--etcd_servers={% for node in groups['etcd'] %}http://{{ node }}:2379{% if not loop.last %},{% endif %}{% endfor %}"
|
||||||
|
|
||||||
# default admission control policies
|
# default admission control policies
|
||||||
KUBE_ADMISSION_CONTROL="--admission_control=NamespaceAutoProvision,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota"
|
KUBE_ADMISSION_CONTROL="--admission_control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota"
|
||||||
|
|
||||||
# Add your own!
|
# Add your own!
|
||||||
KUBE_API_ARGS=""
|
KUBE_API_ARGS="--tls_cert_file={{ kube_cert_dir }}/server.cert --tls_private_key_file={{ kube_cert_dir }}/server.key --client_ca_file={{ kube_cert_dir }}/ca.crt --token_auth_file={{ kube_config_dir }}/known_tokens.csv"
|
||||||
|
@@ -4,4 +4,4 @@
|
|||||||
# defaults from config and apiserver should be adequate
|
# defaults from config and apiserver should be adequate
|
||||||
|
|
||||||
# Add your own!
|
# Add your own!
|
||||||
KUBE_CONTROLLER_MANAGER_ARGS=""
|
KUBE_CONTROLLER_MANAGER_ARGS="--kubeconfig={{ kube_config_dir }}/controller-manager.kubeconfig"
|
@@ -0,0 +1,19 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
clusters:
|
||||||
|
- cluster:
|
||||||
|
certificate-authority: {{ kube_cert_dir }}/ca.crt
|
||||||
|
server: http://{{ groups['masters'][0] }}:443
|
||||||
|
name: {{ cluster_name }}
|
||||||
|
contexts:
|
||||||
|
- context:
|
||||||
|
cluster: {{ cluster_name }}
|
||||||
|
user: kubelet
|
||||||
|
name: kubelet-to-{{ cluster_name }}
|
||||||
|
current-context: kubelet-to-{{ cluster_name }}
|
||||||
|
kind: Config
|
||||||
|
preferences: {}
|
||||||
|
users:
|
||||||
|
- name: kubelet
|
||||||
|
user:
|
||||||
|
client-certificate: {{ kube_cert_dir }}/kubecfg.crt
|
||||||
|
client-key: {{ kube_cert_dir }}/kubecfg.key
|
@@ -1,16 +0,0 @@
|
|||||||
{
|
|
||||||
"apiVersion": "v1beta3",
|
|
||||||
"kind": "Node",
|
|
||||||
"metadata": {
|
|
||||||
"name": "{{ item }}"
|
|
||||||
},
|
|
||||||
"spec": {
|
|
||||||
"externalID": "{{ item }}"
|
|
||||||
},
|
|
||||||
"status": {
|
|
||||||
"capacity": {
|
|
||||||
"cpu": "1",
|
|
||||||
"memory": "1"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@@ -4,4 +4,4 @@
|
|||||||
# default config should be adequate
|
# default config should be adequate
|
||||||
|
|
||||||
# Add your own!
|
# Add your own!
|
||||||
KUBE_SCHEDULER_ARGS=""
|
KUBE_SCHEDULER_ARGS="--kubeconfig={{ kube_config_dir }}/scheduler.kubeconfig"
|
@@ -0,0 +1,19 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
clusters:
|
||||||
|
- cluster:
|
||||||
|
certificate-authority: {{ kube_cert_dir }}/ca.crt
|
||||||
|
server: http://{{ groups['masters'][0] }}:443
|
||||||
|
name: {{ cluster_name }}
|
||||||
|
contexts:
|
||||||
|
- context:
|
||||||
|
cluster: {{ cluster_name }}
|
||||||
|
user: kubelet
|
||||||
|
name: kubelet-to-{{ cluster_name }}
|
||||||
|
current-context: kubelet-to-{{ cluster_name }}
|
||||||
|
kind: Config
|
||||||
|
preferences: {}
|
||||||
|
users:
|
||||||
|
- name: kubelet
|
||||||
|
user:
|
||||||
|
client-certificate: {{ kube_cert_dir }}/kubecfg.crt
|
||||||
|
client-key: {{ kube_cert_dir }}/kubecfg.key
|
@@ -1,22 +0,0 @@
|
|||||||
---
|
|
||||||
- name: write the config files for kubelet
|
|
||||||
template: src=kubelet.j2 dest=/etc/kubernetes/kubelet
|
|
||||||
notify:
|
|
||||||
- restart kubelet
|
|
||||||
|
|
||||||
- name: write the config files for proxy
|
|
||||||
copy: src=proxy dest=/etc/kubernetes/proxy
|
|
||||||
notify:
|
|
||||||
- restart proxy
|
|
||||||
|
|
||||||
- name: Enable kubelet
|
|
||||||
service: name=kubelet enabled=yes state=started
|
|
||||||
|
|
||||||
- name: Enable proxy
|
|
||||||
service: name=kube-proxy enabled=yes state=started
|
|
||||||
|
|
||||||
- include: firewalld.yml
|
|
||||||
when: has_firewalld
|
|
||||||
|
|
||||||
- include: iptables.yml
|
|
||||||
when: not has_firewalld and has_iptables
|
|
@@ -2,8 +2,8 @@
|
|||||||
- name: restart daemons
|
- name: restart daemons
|
||||||
command: /bin/true
|
command: /bin/true
|
||||||
notify:
|
notify:
|
||||||
- restart kubelet
|
- restart kubelet
|
||||||
- restart proxy
|
- restart proxy
|
||||||
|
|
||||||
- name: restart kubelet
|
- name: restart kubelet
|
||||||
service: name=kubelet state=restarted
|
service: name=kubelet state=restarted
|
5
contrib/ansible/roles/node/meta/main.yml
Normal file
5
contrib/ansible/roles/node/meta/main.yml
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
dependencies:
|
||||||
|
- { role: common }
|
||||||
|
- { role: docker }
|
||||||
|
- { role: kubernetes }
|
5
contrib/ansible/roles/node/tasks/centos.yml
Normal file
5
contrib/ansible/roles/node/tasks/centos.yml
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
- name: CentOS | Install kubernetes CentOS style
|
||||||
|
yum: pkg=kubernetes-node state=latest enablerepo=virt7-testing
|
||||||
|
notify:
|
||||||
|
- restart daemons
|
7
contrib/ansible/roles/node/tasks/generic-install.yml
Normal file
7
contrib/ansible/roles/node/tasks/generic-install.yml
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
- name: Install kubernetes
|
||||||
|
action: "{{ ansible_pkg_mgr }}"
|
||||||
|
args:
|
||||||
|
name: kubernetes-node
|
||||||
|
state: latest
|
||||||
|
notify:
|
||||||
|
- restart daemons
|
@@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
- name: Get iptables rules
|
- name: Get iptables rules
|
||||||
shell: iptables -L
|
command: iptables -L
|
||||||
register: iptablesrules
|
register: iptablesrules
|
||||||
always_run: yes
|
always_run: yes
|
||||||
|
|
||||||
@@ -11,7 +11,7 @@
|
|||||||
command: /sbin/iptables -I INPUT 1 -p tcp --dport 10250 -j ACCEPT -m comment --comment "kubelet"
|
command: /sbin/iptables -I INPUT 1 -p tcp --dport 10250 -j ACCEPT -m comment --comment "kubelet"
|
||||||
when: kubelet not in iptablesrules.stdout
|
when: kubelet not in iptablesrules.stdout
|
||||||
notify:
|
notify:
|
||||||
- restart iptables
|
- restart iptables
|
||||||
|
|
||||||
- name: Save iptables rules
|
- name: Save iptables rules
|
||||||
command: service iptables save
|
command: service iptables save
|
47
contrib/ansible/roles/node/tasks/main.yml
Normal file
47
contrib/ansible/roles/node/tasks/main.yml
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
---
|
||||||
|
- name: Check if selinux enforcing
|
||||||
|
command: getenforce
|
||||||
|
register: selinux
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Set selinux permissive because tokens and selinux don't work together
|
||||||
|
selinux: state=permissive policy=targeted
|
||||||
|
when: "'Enforcing' in selinux.stdout"
|
||||||
|
|
||||||
|
- include: generic-install.yml
|
||||||
|
when: not is_atomic and not ansible_distribution == "CentOS"
|
||||||
|
|
||||||
|
- include: centos.yml
|
||||||
|
when: not is_atomic and ansible_distribution == "CentOS"
|
||||||
|
|
||||||
|
- name: write the config files for kubelet
|
||||||
|
template: src=kubelet.j2 dest={{ kube_config_dir }}/kubelet
|
||||||
|
notify:
|
||||||
|
- restart kubelet
|
||||||
|
|
||||||
|
- name: write the config files for proxy
|
||||||
|
template: src=proxy.j2 dest={{ kube_config_dir }}/proxy
|
||||||
|
notify:
|
||||||
|
- restart proxy
|
||||||
|
|
||||||
|
- name: write the kubecfg (auth) file for kubelet
|
||||||
|
template: src=kubelet.kubeconfig.j2 dest={{ kube_config_dir }}/kubelet.kubeconfig
|
||||||
|
notify:
|
||||||
|
- restart kubelet
|
||||||
|
|
||||||
|
- name: write the kubecfg (auth) file for kube-proxy
|
||||||
|
template: src=proxy.kubeconfig.j2 dest={{ kube_config_dir }}/proxy.kubeconfig
|
||||||
|
notify:
|
||||||
|
- restart proxy
|
||||||
|
|
||||||
|
- name: Enable kubelet
|
||||||
|
service: name=kubelet enabled=yes state=started
|
||||||
|
|
||||||
|
- name: Enable proxy
|
||||||
|
service: name=kube-proxy enabled=yes state=started
|
||||||
|
|
||||||
|
- include: firewalld.yml
|
||||||
|
when: has_firewalld
|
||||||
|
|
||||||
|
- include: iptables.yml
|
||||||
|
when: not has_firewalld and has_iptables
|
@@ -1,5 +1,5 @@
|
|||||||
###
|
###
|
||||||
# kubernetes kubelet (minion) config
|
# kubernetes kubelet (node) config
|
||||||
|
|
||||||
# The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
|
# The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
|
||||||
KUBELET_ADDRESS="--address=0.0.0.0"
|
KUBELET_ADDRESS="--address=0.0.0.0"
|
||||||
@@ -11,7 +11,11 @@ KUBELET_ADDRESS="--address=0.0.0.0"
|
|||||||
KUBELET_HOSTNAME="--hostname_override={{ inventory_hostname }}"
|
KUBELET_HOSTNAME="--hostname_override={{ inventory_hostname }}"
|
||||||
|
|
||||||
# location of the api-server
|
# location of the api-server
|
||||||
KUBELET_API_SERVER="--api_servers=http://{{ groups['masters'][0]}}:8080"
|
KUBELET_API_SERVER="--api_servers=https://{{ groups['masters'][0]}}:443"
|
||||||
|
|
||||||
# Add your own!
|
# Add your own!
|
||||||
KUBELET_ARGS=""
|
{% if dns_setup %}
|
||||||
|
KUBELET_ARGS="--cluster_dns={{ dns_server }} --cluster_domain={{ dns_domain }} --kubeconfig={{ kube_config_dir}}/kubelet.kubeconfig"
|
||||||
|
{% else %}
|
||||||
|
KUBELET_ARGS="--kubeconfig={{ kube_config_dir}}/kubelet.kubeconfig"
|
||||||
|
{% endif %}
|
19
contrib/ansible/roles/node/templates/kubelet.kubeconfig.j2
Normal file
19
contrib/ansible/roles/node/templates/kubelet.kubeconfig.j2
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
clusters:
|
||||||
|
- cluster:
|
||||||
|
certificate-authority: {{ kube_cert_dir }}/ca.crt
|
||||||
|
server: http://{{ groups['masters'][0] }}:443
|
||||||
|
name: {{ cluster_name }}
|
||||||
|
contexts:
|
||||||
|
- context:
|
||||||
|
cluster: {{ cluster_name }}
|
||||||
|
user: kubelet
|
||||||
|
name: kubelet-to-{{ cluster_name }}
|
||||||
|
current-context: kubelet-to-{{ cluster_name }}
|
||||||
|
kind: Config
|
||||||
|
preferences: {}
|
||||||
|
users:
|
||||||
|
- name: kubelet
|
||||||
|
user:
|
||||||
|
client-certificate: {{ kube_cert_dir }}/kubecfg.crt
|
||||||
|
client-key: {{ kube_cert_dir }}/kubecfg.key
|
@@ -4,4 +4,4 @@
|
|||||||
# default config should be adequate
|
# default config should be adequate
|
||||||
|
|
||||||
# Add your own!
|
# Add your own!
|
||||||
KUBE_PROXY_ARGS=""
|
KUBE_PROXY_ARGS="--kubeconfig={{ kube_config_dir }}/proxy.kubeconfig"
|
19
contrib/ansible/roles/node/templates/proxy.kubeconfig.j2
Normal file
19
contrib/ansible/roles/node/templates/proxy.kubeconfig.j2
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
clusters:
|
||||||
|
- cluster:
|
||||||
|
certificate-authority: {{ kube_cert_dir }}/ca.crt
|
||||||
|
server: http://{{ groups['masters'][0] }}:443
|
||||||
|
name: {{ cluster_name }}
|
||||||
|
contexts:
|
||||||
|
- context:
|
||||||
|
cluster: {{ cluster_name }}
|
||||||
|
user: kubelet
|
||||||
|
name: kubelet-to-{{ cluster_name }}
|
||||||
|
current-context: kubelet-to-{{ cluster_name }}
|
||||||
|
kind: Config
|
||||||
|
preferences: {}
|
||||||
|
users:
|
||||||
|
- name: kubelet
|
||||||
|
user:
|
||||||
|
client-certificate: {{ kube_cert_dir }}/kubecfg.crt
|
||||||
|
client-key: {{ kube_cert_dir }}/kubecfg.key
|
8
contrib/ansible/roles/pre-ansible/tasks/fedora-dnf.yml
Normal file
8
contrib/ansible/roles/pre-ansible/tasks/fedora-dnf.yml
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
---
|
||||||
|
- name: Install minimal packages
|
||||||
|
raw: dnf install -y {{ item }}
|
||||||
|
with_items:
|
||||||
|
- python # everyone need python2
|
||||||
|
- python-dnf # some versions of ansible (2.0) use dnf directly
|
||||||
|
- yum # some versions of ansible use yum
|
||||||
|
- libselinux-python
|
11
contrib/ansible/roles/pre-ansible/tasks/main.yml
Normal file
11
contrib/ansible/roles/pre-ansible/tasks/main.yml
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
- name: Get os_version from /etc/os-release
|
||||||
|
raw: "grep '^VERSION_ID=' /etc/os-release | sed s'/VERSION_ID=//'"
|
||||||
|
register: os_version
|
||||||
|
|
||||||
|
- name: Get distro name from /etc/os-release
|
||||||
|
raw: "grep '^NAME=' /etc/os-release | sed s'/NAME=//'"
|
||||||
|
register: distro
|
||||||
|
|
||||||
|
- include: fedora-dnf.yml
|
||||||
|
when: os_version.stdout|int >= 22 and 'Fedora' in distro.stdout
|
19
contrib/ansible/setup.sh
Executable file
19
contrib/ansible/setup.sh
Executable file
@@ -0,0 +1,19 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
inventory=${INVENTORY:-inventory}
|
||||||
|
|
||||||
|
ansible-playbook -i ${inventory} cluster.yml $@
|
46
contrib/ansible/vagrant/Vagrantfile
vendored
Normal file
46
contrib/ansible/vagrant/Vagrantfile
vendored
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
# -*- mode: ruby -*-
|
||||||
|
# vi: set ft=ruby :
|
||||||
|
|
||||||
|
# All Vagrant configuration is done below. The "2" in Vagrant.configure
|
||||||
|
# configures the configuration version (we support older styles for
|
||||||
|
# backwards compatibility). Please don't change it unless you know what
|
||||||
|
# you're doing.
|
||||||
|
Vagrant.configure(2) do |config|
|
||||||
|
config.vm.box = "chef/centos-7.0"
|
||||||
|
|
||||||
|
# config.vm.network "public_network"
|
||||||
|
|
||||||
|
config.vm.define "master", primary: true do |master|
|
||||||
|
master.vm.hostname = "master.vms.local"
|
||||||
|
master.vm.network "private_network", ip: "192.168.1.100"
|
||||||
|
end
|
||||||
|
|
||||||
|
(1..1).each do |i|
|
||||||
|
config.vm.define "node-#{i}" do |node|
|
||||||
|
node.vm.hostname = "node-#{i}.vms.local"
|
||||||
|
node.vm.network "private_network", ip: "192.168.1.1#{i}"
|
||||||
|
node.vm.provision :ansible do |ansible|
|
||||||
|
ansible.host_key_checking = false
|
||||||
|
ansible.extra_vars = {
|
||||||
|
ansible_ssh_user: 'vagrant',
|
||||||
|
ansible_ssh_pass: 'vagrant',
|
||||||
|
user: 'vagrant'
|
||||||
|
}
|
||||||
|
#ansible.verbose = 'vvv'
|
||||||
|
ansible.playbook = "../cluster.yml"
|
||||||
|
ansible.inventory_path = "vinventory"
|
||||||
|
|
||||||
|
ansible.limit = 'all'
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
|
||||||
|
config.vm.provider "virtualbox" do |vb|
|
||||||
|
# Display the VirtualBox GUI when booting the machine
|
||||||
|
vb.gui = false
|
||||||
|
# Customize the amount of memory on the VM:
|
||||||
|
vb.memory = "2048"
|
||||||
|
# vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
|
||||||
|
end
|
||||||
|
end
|
8
contrib/ansible/vagrant/vinventory
Normal file
8
contrib/ansible/vagrant/vinventory
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
[masters]
|
||||||
|
192.168.1.100
|
||||||
|
|
||||||
|
[etcd]
|
||||||
|
192.168.1.100
|
||||||
|
|
||||||
|
[nodes]
|
||||||
|
192.168.1.11
|
Reference in New Issue
Block a user