kubernetes/contrib/ansible/vagrant/Vagrantfile
Eric Paris a25b34e1a4 Ansible: Vagrant: allow passing ansible tags to vagrant provision
Creating a cluster from scratch takes about 7 minutes. But if you just
rebuild the binaries and want to update those you don't want to have to
rerun the entire thing. There is an ansible tag 'binary-update' which
will do that. Now one can do
```
ANSIBLE_TAGS=binary-update vagrant provision
```
And it will push the new binaries.
2015-07-24 09:58:31 -04:00

143 lines
4.6 KiB
Ruby

# -*- mode: ruby -*-
# vi: set ft=ruby :
require "yaml"
### This is a new provider, different then cloudbau's.
### RUN: vagrant plugin uninstall vagrant-openstack-plugin"
### Then RUN: "vagrant plugin install vagrant-openstack-provider"
require 'vagrant-openstack-provider'
$num_nodes = (ENV['NUM_NODES'] || 2).to_i
ansible_tags = ENV['ANSIBLE_TAGS']
VAGRANTFILE_API_VERSION = "2"
# Openstack providers are best used with latest versions.
Vagrant.require_version ">= 1.7"
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# By default, Vagrant 1.7+ automatically inserts a different
# insecure keypair for each new VM created. The easiest way
# to use the same keypair for all the machines is to disable
# this feature and rely on the legacy insecure key.
config.ssh.insert_key = false
# This explicitly sets the order that vagrant will use by default if no --provider given
config.vm.provider "openstack"
config.vm.provider "libvirt"
config.vm.provider "virtualbox"
def set_openstack(os, config, n)
# common config
config.vm.box = "dummy"
config.vm.box_url = "https://github.com/cloudbau/vagrant-openstack-plugin/raw/master/dummy.box"
# this crap is to make it not fail if the file doesn't exist (which is ok if we are using a different provisioner)
__filename = File.join(File.dirname(__FILE__), "openstack_config.yml")
if File.exist?(__filename)
_config = YAML.load(File.open(__filename, File::RDONLY).read)
else
_config = Hash.new("")
_config['security_group'] = []
end
config.ssh.username = "fedora"
config.ssh.private_key_path = "~/.ssh/id_rsa"
config.vm.boot_timeout = 60*10
### The below parameters need to be modified per your openstack instance.
os.username = _config['os_username']
os.password = _config['os_password']
os.tenant_name = _config['os_tenant']
os.keypair_name = _config['os_ssh_key_name']
os.openstack_auth_url = _config['os_auth_url']
os.region = _config['os_region_name']
os.floating_ip_pool = _config['os_floating_ip_pool']
os.flavor = _config['os_flavor']
os.image = _config['os_image']
os.security_groups = _config['os_security_groups']
os.server_name = n.vm.hostname
end
def set_vbox(vb, config)
config.vm.box = "chef/centos-7.0"
config.vm.network "private_network", type: "dhcp"
vb.gui = false
vb.memory = 2048
vb.cpus = 2
# Use faster paravirtualized networking
vb.customize ["modifyvm", :id, "--nictype1", "virtio"]
vb.customize ["modifyvm", :id, "--nictype2", "virtio"]
end
def set_libvirt(lv, config)
config.vm.box = "kube-centos-7"
config.vm.box_url = "http://cloud.centos.org/centos/7/vagrant/x86_64/images/CentOS-7.LibVirt.box"
lv.memory = 2048
lv.cpus = 2
lv.nested = true
lv.volume_cache = 'none'
end
def set_provider(n)
n.vm.provider :openstack do |os, override|
set_openstack(os, override, n)
end
n.vm.provider :virtualbox do |vb, override|
set_vbox(vb, override)
end
n.vm.provider :libvirt do |lv, override|
set_libvirt(lv, override)
end
end
config.vm.synced_folder ".", "/vagrant", disabled: true
nodes = Array.new()
$num_nodes.times do |i|
# multi vm config
name = "kube-node-#{i+1}"
nodes.push(name)
config.vm.define "#{name}" do |n|
n.vm.hostname = name
set_provider(n)
end
end
# This is how we create the ansible inventory, see it in .vagrant
# if you want to debug, run 'VAGRANT_LOG=info vagrant up'
# and you'll see exactly how the cluster comes up via ansible inv.
groups = {
"etcd" => ["kube-master"],
"masters" => ["kube-master"],
"nodes" => nodes,
"all_groups:children" => ["etcd","masters","nodes"]
}
config.vm.define "kube-master" do |n|
name = "kube-master"
n.vm.hostname = name
set_provider(n)
if ansible_tags.nil?
# This set up the vagrant hosts before we run the main playbook
# Today this just creates /etc/hosts so machines can talk via their
# 'internal' IPs instead of the openstack public ip.
n.vm.provision :ansible do |ansible|
ansible.groups = groups
ansible.playbook = "./vagrant-ansible.yml"
ansible.limit = "all" #otherwise the metadata wont be there for ipv4?
end
end
# This sets up both flannel and kube.
n.vm.provision :ansible do |ansible|
ansible.groups = groups
ansible.playbook = "../cluster.yml"
ansible.limit = "all" #otherwise the metadata wont be there for ipv4?
ansible.tags = ansible_tags
end
end
end