Merge branch 'master' into jnowak/sol_103

This commit is contained in:
Jedrzej Nowak 2015-09-03 09:51:20 +02:00
commit 9291303ce6
73 changed files with 3072 additions and 194 deletions

5
.gitignore vendored
View File

@ -26,3 +26,8 @@ celery*.log
*.dot
*.png
resources_compiled.py
# bootstrap
bootstrap/trusty64
bootstrap/solar-master.box
vagrant-settings.yml

72
Vagrantfile vendored
View File

@ -1,42 +1,45 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
require 'yaml'
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
VAGRANTFILE_API_VERSION = "2"
SLAVES_COUNT = 3
init_script = <<SCRIPT
apt-get update
apt-get -y install python-pip python-dev
pip install ansible
ansible-playbook -i "localhost," -c local /vagrant/main.yml /vagrant/docker.yml
SCRIPT
# configs, custom updates _defaults
defaults_cfg = YAML.load_file('vagrant-settings.yml_defaults')
if File.exist?('vagrant-settings.yml')
custom_cfg = YAML.load_file('vagrant-settings.yml')
cfg = defaults_cfg.merge(custom_cfg)
else
cfg = defaults_cfg
end
slave_script = <<SCRIPT
apt-get update
apt-get upgrade
apt-get dist-upgrade
apt-get -y install python-pip python-dev
pip install ansible
ansible-playbook -i "localhost," -c local /vagrant/main.yml /vagrant/docker.yml /vagrant/slave.yml /vagrant/slave_cinder.yml
SCRIPT
SLAVES_COUNT = cfg["slaves_count"]
SLAVES_RAM = cfg["slaves_ram"]
MASTER_RAM = cfg["master_ram"]
master_celery = <<SCRIPT
ansible-playbook -i "localhost," -c local /vagrant/celery.yml --skip-tags slave
SCRIPT
def ansible_playbook_command(filename, args=[])
"ansible-playbook -v -i \"localhost,\" -c local /vagrant/bootstrap/playbooks/#{filename} #{args.join ' '}"
end
slave_celery = <<SCRIPT
ansible-playbook -i "localhost," -c local /vagrant/celery.yml --skip-tags master
SCRIPT
solar_script = ansible_playbook_command("solar.yml")
slave_script = ansible_playbook_command("custom-configs.yml", ["-e", "master_ip=10.0.0.2"])
master_celery = ansible_playbook_command("celery.yml", ["--skip-tags", "slave"])
slave_celery = ansible_playbook_command("celery.yml", ["--skip-tags", "master"])
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
#config.vm.box = "deb/jessie-amd64"
#config.vm.box = "rustyrobot/deb-jessie-amd64"
config.vm.box = "ubuntu/trusty64"
config.vm.define "solar-dev", primary: true do |config|
config.vm.provision "shell", inline: init_script, privileged: true
#config.vm.box = "deb/jessie-amd64"
#config.vm.box = "rustyrobot/deb-jessie-amd64"
#config.vm.box = "ubuntu/trusty64"
config.vm.box = "cgenie/solar-master"
config.vm.provision "shell", inline: solar_script, privileged: true
config.vm.provision "shell", inline: master_celery, privileged: true
config.vm.provision "file", source: "~/.vagrant.d/insecure_private_key", destination: "/vagrant/tmp/keys/ssh_private"
config.vm.provision "file", source: "ansible.cfg", destination: "/home/vagrant/.ansible.cfg"
@ -44,7 +47,11 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.host_name = "solar-dev"
config.vm.provider :virtualbox do |v|
v.customize ["modifyvm", :id, "--memory", 1024]
v.customize [
"modifyvm", :id,
"--memory", MASTER_RAM,
"--paravirtprovider", "kvm" # for linux guest
]
v.name = "solar-dev"
end
end
@ -53,14 +60,21 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
index = i + 1
ip_index = i + 3
config.vm.define "solar-dev#{index}" do |config|
config.vm.provision "shell", inline: init_script, privileged: true
# standard box with all stuff preinstalled
config.vm.box = "cgenie/solar-master"
config.vm.provision "shell", inline: slave_script, privileged: true
config.vm.provision "shell", inline: solar_script, privileged: true
config.vm.provision "shell", inline: slave_celery, privileged: true
config.vm.network "private_network", ip: "10.0.0.#{ip_index}"
config.vm.host_name = "solar-dev#{index}"
config.vm.provider :virtualbox do |v|
v.customize ["modifyvm", :id, "--memory", 1024]
v.customize [
"modifyvm", :id,
"--memory", SLAVES_RAM,
"--paravirtprovider", "kvm" # for linux guest
]
v.name = "solar-dev#{index}"
end
end

25
bootstrap/README.md Normal file
View File

@ -0,0 +1,25 @@
# Solar image building
In `bootstrap/trusty64` directory there are `box.ovf` and `box-disk1.vmdk`
files from the `trusty64` Vagrant box (normally found in
`~/.vagrant.d/boxes/trusty64/0/virtualbox`).
To build, install Packer (https://www.packer.io/):
```
cd bootstrap
packer build solar-master.json
cp solar-master.box ../
cd ..
vagrant up
```
If Vagrant throws error about `vboxsf` try this:
```
vagrant plugin install vagrant-vbguest
```
(see https://github.com/shiguredo/packer-templates/issues/16).
If you're rebuilding the same box, make sure Vagrant reimports it:
```
vagrant box remove solar-master
```

2
bootstrap/ansible.cfg Normal file
View File

@ -0,0 +1,2 @@
[defaults]
host_key_checking = False

View File

@ -0,0 +1,8 @@
---
- hosts: all
sudo: yes
tasks:
#- shell: vagrant init ubuntu/trusty64
- shell: /usr/local/bin/packer build solar.json -var 'is_master=true'

12
bootstrap/packer.yml Normal file
View File

@ -0,0 +1,12 @@
---
- hosts: all
sudo: yes
tasks:
- shell: wget 'https://dl.bintray.com/mitchellh/packer/packer_0.8.2_linux_amd64.zip' -O /tmp/packer-0.8.2.zip
args:
creates: /tmp/packer-0.8.2.zip
- unarchive:
src: /tmp/packer-0.8.2.zip
dest: /usr/local/bin
copy: no

View File

@ -0,0 +1,12 @@
---
- name: Main build script
hosts: all
sudo: yes
tasks:
- include: tasks/base.yml
- include: tasks/puppet.yml
- include: tasks/docker.yml
#- include: celery.yml tags=['master'] celery_dir=/var/run/celery
- include: tasks/cinder.yml
- include: tasks/cloud_archive.yml

View File

@ -0,0 +1,21 @@
---
- name: Solar Celery config
hosts: all
sudo: yes
vars:
celery_dir: /var/run/celery
tasks:
- shell: mkdir -p {{ celery_dir }}
- shell: pip install celery
- shell: hostname
register: hostname
- shell: celery multi kill 2
chdir={{ celery_dir }}
tags: [stop]
- shell: celery multi start 2 -A solar.orchestration.runner -Q:1 scheduler,system_log -Q:2 celery,{{ hostname.stdout }}
chdir={{ celery_dir }}
tags: [master]
- shell: celery multi start 1 -A solar.orchestration.runner -Q:1 {{ hostname.stdout }}
chdir={{ celery_dir }}
tags: [slave]

View File

@ -0,0 +1,8 @@
---
- name: Custom Solar configs
hosts: all
sudo: yes
tasks:
- lineinfile: line='slaveof {{ master_ip }} 6379' dest=/etc/redis/redis.conf
- service: name=redis-server state=restarted

View File

@ -0,0 +1,15 @@
:backends:
- redis
#- yaml
#- json
:yaml:
:datadir: /etc/puppet/hieradata
:json:
:datadir: /etc/puppet/hieradata
:redis:
:port: 6379
:db: 0
:host: localhost
:deserialize: :json
:hierarchy:
- resource

View File

@ -0,0 +1,10 @@
#!/bin/sh
# TODO: maybe this is better:
# http://docs.ansible.com/ansible/intro_installation.html#latest-releases-via-apt-ubuntu
apt-get remove -f python-pip
sudo apt-get install -y python-setuptools
sudo easy_install pip
sudo pip install -U pip
sudo pip install ansible

View File

@ -0,0 +1,7 @@
---
- hosts: all
sudo: yes
tasks:
# Setup development env for solar
- shell: python setup.py develop chdir=/vagrant/solar

View File

@ -0,0 +1,56 @@
---
- shell: apt-get update
- name: Base packages
apt: name={{ item }} state=present
with_items:
- git
- python-mock
- python-keystoneclient
- python-mysqldb
- python-setuptools
- ruby-dev
- unzip
# Redis
- redis-server
# Graph drawing
- python-pygraphviz
# Other tools
- htop
- jq
- tmux
- vim
# Dev
- ipython
- python-pudb
# Required by packer
- build-essential
# PIP
#- apt: name=python-pip state=absent
#- shell: easy_install pip
#- shell: pip install -U pip
#- shell: pip install -U setuptools
- shell: pip install httpie
- shell: pip install docker-py==1.1.0
# Redis
- shell: pip install redis
- lineinfile: dest=/etc/redis/redis.conf regexp='^bind ' line='bind 0.0.0.0'
- service: name=redis-server state=restarted
# Ubuntu OpenStack packages
#- apt: name=ubuntu-cloud-keyring state=present
#- shell: echo "deb http://ubuntu-cloud.archive.canonical.com/ubuntu trusty-updates/kilo main" > /etc/apt/sources.list.d/cloudarchive-kilo.list
#- shell: echo "deb http://osci-mirror-poz.infra.mirantis.net/pkgs/ubuntu-2015-06-25-194717 trusty-updates main" > /etc/apt/sources.list.d/fuel-kilo.list
#- shell: echo "deb http://osci-mirror-poz.infra.mirantis.net/pkgs/ubuntu-latest trusty main" > /etc/apt/sources.list.d/fuel-kilo.list
# cloudarchive key
#- shell: apt-key adv --recv-key --keyserver keyserver.ubuntu.com 5EDB1B62EC4926EA
# some other keys
#- shell: apt-key adv --recv-key --keyserver keyserver.ubuntu.com 9D6D8F6BC857C906
#- shell: apt-key adv --recv-key --keyserver keyserver.ubuntu.com 7638D0442B90D010
# mirantis poznan
#- shell: apt-key adv --recv-key --keyserver keyserver.ubuntu.com 40976EAF437D05B5
#- shell: apt-key adv --recv-key --keyserver keyserver.ubuntu.com 3B4FE6ACC0B21F32
#- shell: apt-get update

View File

@ -0,0 +1,15 @@
---
- apt: name=lvm2 state=present
- command: sudo truncate -s 10G /root/cinder.img creates=/root/cinder.img
- shell: sudo losetup -a|grep cinder
register: loop_created
ignore_errors: True
- command: sudo losetup /dev/loop0 /root/cinder.img
when: loop_created|failed
# retries: 5
# delay: 1
- lvg: vg=cinder-volumes pvs=/dev/loop0
when: loop_created|failed
# retries: 5
# delay: 1

View File

@ -0,0 +1,7 @@
---
- shell: apt-get update
- shell: apt-get -y upgrade
- shell: add-apt-repository -y cloud-archive:juno
- shell: apt-get update
- shell: apt-get update --fix-missing

View File

@ -0,0 +1,17 @@
---
- shell: docker --version
ignore_errors: true
register: docker_version
# This script is completely broken, it has so many sleeps...
- shell: curl -sSL https://get.docker.com/ | sudo sh
when: docker_version | failed
# Here's a raw paste of what the above script really does for Ubuntu
#- shell: apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
#- shell: mkdir -p /etc/apt/sources.list.d
#- shell: echo deb https://apt.dockerproject.org/repo ubuntu-trusty main > /etc/apt/sources.list.d/docker.list
# args:
# creates: /etc/apt/sources.list.d/docker.list
#- shell: apt-get update
#- shell: apt-get install -y -q docker-engine

View File

@ -0,0 +1,24 @@
---
# Puppet
- shell: wget https://apt.puppetlabs.com/puppetlabs-release-trusty.deb -O /root/puppetlabs-release-trusty.deb
- shell: dpkg -i /root/puppetlabs-release-trusty.deb
- shell: apt-get update
- apt: name=puppet state=present
- git: repo=https://github.com/CGenie/hiera-redis dest=/root/hiera-redis
- shell: gem build hiera-redis.gemspec && gem install hiera-redis-3.0.0.gem chdir=/root/hiera-redis
- template: src=files/hiera.yaml dest=/etc/puppet/hiera.yaml
- file: path=/etc/puppet/hieradata state=directory
# Make paths puppet 4 compatible
- file: path=/etc/puppetlabs/code/ state=directory
- file: src=/etc/puppet/hiera.yaml dest=/etc/puppetlabs/code/hiera.yaml state=link
- file: path=/var/tmp/puppet/modules state=directory owner=puppet
- file: path=/var/tmp/puppet/Puppetfile state=touch owner=puppet
- file: path=/etc/puppet/modules state=absent
- file: path=/etc/puppetlabs/code/modules state=absent
- file: path=/etc/puppetlabs/code/hieradata state=absent
- file: src=/var/tmp/puppet/modules dest=/etc/puppet/modules state=link
- file: src=/var/tmp/puppet/modules dest=/etc/puppetlabs/code/modules state=link
- file: src=/etc/puppet/hieradata dest=/etc/puppetlabs/code/hieradata state=link
- shell: gem install librarian-puppet --no-ri --no-rdoc

View File

@ -0,0 +1,47 @@
{
"variables": {
"ansible_config_path": "/etc/ansible",
"ansible_config_file": "ansible.cfg",
"ansible_log_file": "/tmp/ansible.log"
},
"builders": [{
"type": "virtualbox-ovf",
"source_path": "trusty64/box.ovf",
"ssh_username": "vagrant",
"ssh_password": "vagrant",
"shutdown_command": "echo 'vagrant' | sudo -S shutdown -P now"
}],
"provisioners": [
{
"type": "shell",
"inline": ["sudo mkdir -p {{ user `ansible_config_path` }}"]
},
{
"type": "file",
"source": "ansible.cfg",
"destination": "/tmp/ansible.cfg"
},
{
"type": "shell",
"inline": ["sudo mv /tmp/ansible.cfg {{ user `ansible_config_path` }}/{{ user `ansible_config_file` }}"]
},
{
"type": "shell",
"inline": [
"sudo echo 'log_path = {{ user `ansible_log_file` }}' >> {{ user `ansible_config_path` }}/{{ user `ansible_config_file` }}"
]
},
{
"type": "shell",
"script": "playbooks/files/ubuntu-ansible.sh"
}, {
"type": "ansible-local",
"playbook_dir": "playbooks",
"playbook_file": "playbooks/build-main.yml",
"extra_arguments": ["--verbose"]
}],
"post-processors": [{
"type": "vagrant",
"output": "solar-master.box"
}]
}

109
example-bootstrap.py Normal file
View File

@ -0,0 +1,109 @@
import click
import sys
import time
from solar.core import actions
from solar.core import resource
from solar.core import signals
from solar.core import validation
from solar.core.resource import virtual_resource as vr
from solar import errors
from solar.interfaces.db import get_db
GIT_PUPPET_LIBS_URL = 'https://github.com/CGenie/puppet-libs-resource'
# TODO
# Resource for repository OR puppet apt-module in run.pp
# add-apt-repository cloud-archive:juno
# To discuss: install stuff in Docker container
# NOTE
# No copy of manifests, pull from upstream (implemented in the puppet handler)
# Official puppet manifests, not fuel-library
db = get_db()
@click.group()
def main():
pass
def setup_resources():
db.clear()
signals.Connections.clear()
node3 = vr.create('node3', 'resources/ro_node/', {
'ip': '10.0.0.5',
'ssh_key': '/vagrant/.vagrant/machines/solar-dev3/virtualbox/private_key',
'ssh_user': 'vagrant'
})[0]
solar_bootstrap3 = vr.create('solar_bootstrap3', 'resources/solar_bootstrap', {'master_ip': '10.0.0.2'})[0]
signals.connect(node3, solar_bootstrap3)
has_errors = False
for r in locals().values():
if not isinstance(r, resource.Resource):
continue
print 'Validating {}'.format(r.name)
errors = validation.validate_resource(r)
if errors:
has_errors = True
print 'ERROR: %s: %s' % (r.name, errors)
if has_errors:
sys.exit(1)
resources_to_run = [
'solar_bootstrap3',
]
@click.command()
def deploy():
setup_resources()
# run
resources = map(resource.wrap_resource, db.get_list(collection=db.COLLECTIONS.resource))
resources = {r.name: r for r in resources}
for name in resources_to_run:
try:
actions.resource_action(resources[name], 'run')
except errors.SolarError as e:
print 'WARNING: %s' % str(e)
raise
time.sleep(10)
@click.command()
def undeploy():
resources = map(resource.wrap_resource, db.get_list(collection=db.COLLECTIONS.resource))
resources = {r.name: r for r in resources}
for name in reversed(resources_to_run):
try:
actions.resource_action(resources[name], 'remove')
except errors.SolarError as e:
print 'WARNING: %s' % str(e)
db.clear()
signals.Connections.clear()
main.add_command(deploy)
main.add_command(undeploy)
if __name__ == '__main__':
main()

160
example-puppet.py Normal file → Executable file
View File

@ -1,3 +1,5 @@
#!/usr/bin/env python
import click
import sys
import time
@ -160,9 +162,29 @@ def setup_resources():
signals.connect(admin_user, openrc, {'user_name': 'user_name','user_password':'password', 'tenant_name': 'tenant'})
# NEUTRON
# TODO: vhost cannot be specified in neutron Puppet manifests so this user has to be admin anyways
neutron_puppet = vr.create('neutron_puppet', 'resources/neutron_puppet', {})[0]
# Deploy chain neutron -> (plugins) -> neutron_server -> ( agents )
neutron_puppet = vr.create('neutron_puppet', 'resources/neutron_puppet', {
'core_plugin': 'neutron.plugins.ml2.plugin.Ml2Plugin'
})[0]
signals.connect(node1, neutron_puppet)
signals.connect(rabbitmq_service1, neutron_puppet, {
'ip': 'rabbit_host',
'port': 'rabbit_port'
})
signals.connect(openstack_rabbitmq_user, neutron_puppet, {
'user_name': 'rabbit_user',
'password': 'rabbit_password'})
signals.connect(openstack_vhost, neutron_puppet, {
'vhost_name': 'rabbit_virtual_host'})
# NEUTRON API (SERVER)
neutron_server_puppet = vr.create('neutron_server_puppet', 'resources/neutron_server_puppet', {
'sync_db': True,
})[0]
neutron_db = vr.create('neutron_db', 'resources/mariadb_db/', {
'db_name': 'neutron_db', 'login_user': 'root'})[0]
neutron_db_user = vr.create('neutron_db_user', 'resources/mariadb_user/', {
'user_name': 'neutron', 'user_password': 'neutron', 'login_user': 'root'})[0]
neutron_keystone_user = vr.create('neutron_keystone_user', 'resources/keystone_user', {
'user_name': 'neutron',
'user_password': 'neutron'
@ -179,24 +201,29 @@ def setup_resources():
'type': 'network'
})[0]
signals.connect(node1, neutron_puppet)
signals.connect(rabbitmq_service1, neutron_puppet, {
'ip': 'rabbitmq_host',
'port': 'rabbitmq_port'
signals.connect(node1, neutron_db)
signals.connect(node1, neutron_db_user)
signals.connect(mariadb_service1, neutron_db, {
'port': 'login_port',
'root_password': 'login_password',
'root_user': 'login_user',
'ip' : 'db_host'})
signals.connect(mariadb_service1, neutron_db_user, {'port': 'login_port', 'root_password': 'login_password'})
signals.connect(neutron_db, neutron_db_user, {'db_name', 'db_host'})
signals.connect(neutron_db_user, neutron_server_puppet, {
'user_name':'db_user',
'db_name':'db_name',
'user_password':'db_password',
'db_host' : 'db_host'})
signals.connect(node1, neutron_server_puppet)
signals.connect(admin_user, neutron_server_puppet, {
'user_name': 'auth_user',
'user_password': 'auth_password',
'tenant_name': 'auth_tenant'
})
signals.connect(openstack_rabbitmq_user, neutron_puppet, {
'user_name': 'rabbitmq_user',
'password': 'rabbitmq_password'})
signals.connect(openstack_vhost, neutron_puppet, {
'vhost_name': 'rabbitmq_virtual_host'})
signals.connect(admin_user, neutron_puppet, {
'user_name': 'keystone_user',
'user_password': 'keystone_password',
'tenant_name': 'keystone_tenant'
})
signals.connect(keystone_puppet, neutron_puppet, {
'ip': 'keystone_host',
'port': 'keystone_port'
signals.connect(keystone_puppet, neutron_server_puppet, {
'ip': 'auth_host',
'port': 'auth_port'
})
signals.connect(services_tenant, neutron_keystone_user)
signals.connect(neutron_keystone_user, neutron_keystone_role)
@ -209,9 +236,60 @@ def setup_resources():
})
signals.connect(neutron_puppet, neutron_keystone_service_endpoint, {
'ip': ['admin_ip', 'internal_ip', 'public_ip'],
'port': ['admin_port', 'internal_port', 'public_port'],
'bind_port': ['admin_port', 'internal_port', 'public_port'],
})
# NEUTRON ML2 PLUGIN & ML2-OVS AGENT WITH GRE
neutron_plugins_ml2 = vr.create('neutron_plugins_ml2', 'resources/neutron_plugins_ml2_puppet', {})[0]
signals.connect(node1, neutron_plugins_ml2)
neutron_agents_ml2 = vr.create('neutron_agents_ml2', 'resources/neutron_agents_ml2_ovs_puppet', {
# TODO(bogdando) these should come from the node network resource
'enable_tunneling': True,
'tunnel_types': ['gre'],
'local_ip': '10.1.0.13' # should be the IP addr of the br-mesh int.
})[0]
signals.connect(node1, neutron_agents_ml2)
# NEUTRON DHCP, L3, metadata agents
neutron_agents_dhcp = vr.create('neutron_agents_dhcp', 'resources/neutron_agents_dhcp_puppet', {})[0]
signals.connect(node1, neutron_agents_dhcp)
neutron_agents_l3 = vr.create('neutron_agents_l3', 'resources/neutron_agents_l3_puppet', {
# TODO(bogdando) these should come from the node network resource
'metadata_port': 8775,
'external_network_bridge': 'br-floating',
})[0]
signals.connect(node1, neutron_agents_l3)
neutron_agents_metadata = vr.create('neutron_agents_metadata', 'resources/neutron_agents_metadata_puppet', {
'shared_secret': 'secret',
})[0]
signals.connect(node1, neutron_agents_metadata)
signals.connect(neutron_server_puppet, neutron_agents_metadata, {
'auth_host', 'auth_port', 'auth_password',
'auth_tenant', 'auth_user',
})
# NEUTRON FOR COMPUTE (node2)
# Deploy chain neutron -> (plugins) -> ( agents )
neutron_puppet2 = vr.create('neutron_puppet2', 'resources/neutron_puppet', {})[0]
signals.connect(node2, neutron_puppet2)
signals.connect(neutron_puppet, neutron_puppet2, {
'rabbit_host', 'rabbit_port',
'rabbit_user', 'rabbit_password',
'rabbit_virtual_host',
'package_ensure', 'core_plugin',
})
# NEUTRON OVS PLUGIN & AGENT WITH GRE FOR COMPUTE (node2)
neutron_plugins_ml22 = vr.create('neutron_plugins_ml22', 'resources/neutron_plugins_ml2_puppet', {})[0]
signals.connect(node2, neutron_plugins_ml22)
neutron_agents_ml22 = vr.create('neutron_agents_ml22', 'resources/neutron_agents_ml2_ovs_puppet', {
# TODO(bogdando) these should come from the node network resource
'enable_tunneling': True,
'tunnel_types': ['gre'],
'local_ip': '10.1.0.14' # Should be the IP addr of the br-mesh int.
})[0]
signals.connect(node2, neutron_agents_ml22)
# CINDER
cinder_puppet = vr.create('cinder_puppet', 'resources/cinder_puppet', {})[0]
cinder_db = vr.create('cinder_db', 'resources/mariadb_db/', {
@ -229,7 +307,7 @@ def setup_resources():
'adminurl': 'http://{{admin_ip}}:{{admin_port}}/v2/%(tenant_id)s',
'internalurl': 'http://{{internal_ip}}:{{internal_port}}/v2/%(tenant_id)s',
'publicurl': 'http://{{public_ip}}:{{public_port}}/v2/%(tenant_id)s',
'description': 'OpenStack Block Storage Service', 'type': 'volume'})[0]
'description': 'OpenStack Block Storage Service', 'type': 'volumev2'})[0]
signals.connect(node1, cinder_puppet)
signals.connect(node1, cinder_db)
@ -286,6 +364,7 @@ def setup_resources():
signals.connect(node1, cinder_volume_puppet)
signals.connect(cinder_puppet, cinder_volume_puppet)
evapi.add_react(cinder_puppet.name, cinder_volume_puppet.name, actions=('update',))
# NOVA
nova_puppet = vr.create('nova_puppet', 'resources/nova_puppet', {})[0]
nova_db = vr.create('nova_db', 'resources/mariadb_db/', {
@ -362,6 +441,7 @@ def setup_resources():
'keystone_password': 'admin_password',
'keystone_host': 'auth_host',
'keystone_port': 'auth_port'})
signals.connect(nova_api_puppet, neutron_agents_metadata, {'ip': 'metadata_ip'})
# NOVA CONDUCTOR
nova_conductor_puppet = vr.create('nova_conductor_puppet', 'resources/nova_conductor_puppet', {})[0]
@ -391,8 +471,20 @@ def setup_resources():
# NOTE(bogdando): changes nova config, so should notify nova compute service
nova_compute_libvirt_puppet = vr.create('nova_compute_libvirt_puppet', 'resources/nova_compute_libvirt_puppet', {})[0]
signals.connect(node2, nova_compute_libvirt_puppet)
# compute configuration for neutron, use http auth/endpoint protocols, keystone v2 auth hardcoded for the resource
nova_neutron_puppet = vr.create('nova_neutron_puppet', 'resources/nova_neutron_puppet', {})[0]
signals.connect(node2, nova_neutron_puppet)
signals.connect(neutron_server_puppet, nova_neutron_puppet, {
'auth_password': 'neutron_admin_password',
'auth_user': 'neutron_admin_username',
'auth_type': 'neutron_auth_strategy',
'auth_host': 'auth_host', 'auth_port': 'auth_port',
'auth_protocol': 'auth_protocol',
})
signals.connect(neutron_keystone_service_endpoint, nova_neutron_puppet, {
'internal_ip':'neutron_endpoint_host',
'internal_port':'neutron_endpoint_port',
})
# signals.connect(keystone_puppet, nova_network_puppet, {'ip': 'keystone_host', 'port': 'keystone_port'})
# signals.connect(keystone_puppet, nova_keystone_service_endpoint, {'ip': 'keystone_host', 'admin_port': 'keystone_port', 'admin_token': 'admin_token'})
@ -415,7 +507,7 @@ def setup_resources():
'adminurl': 'http://{{admin_ip}}:{{admin_port}}',
'internalurl': 'http://{{internal_ip}}:{{internal_port}}',
'publicurl': 'http://{{public_ip}}:{{public_port}}',
'description': 'OpenStack Image Service', 'type': 'volume'})[0]
'description': 'OpenStack Image Service', 'type': 'image'})[0]
signals.connect(node1, glance_api_puppet)
signals.connect(node1, glance_db)
@ -510,10 +602,18 @@ resources_to_run = [
'keystone_service_endpoint',
'services_tenant',
'neutron_db',
'neutron_db_user',
'neutron_keystone_user',
'neutron_keystone_role',
'neutron_puppet',
'neutron_keystone_service_endpoint',
'neutron_plugins_ml2',
'neutron_server_puppet',
'neutron_agents_ml2',
'neutron_agents_dhcp',
'neutron_agents_l3',
'neutron_agents_metadata',
'cinder_db',
'cinder_db_user',
@ -535,11 +635,6 @@ resources_to_run = [
'nova_api_puppet',
'nova_conductor_puppet',
'nova_puppet2',
'nova_compute_libvirt_puppet',
'nova_neutron_puppet',
'nova_compute_puppet',
'glance_db',
'glance_db_user',
'glance_keystone_user',
@ -547,10 +642,17 @@ resources_to_run = [
'glance_keystone_service_endpoint',
'glance_api_puppet',
'glance_registry_puppet',
'nova_puppet2',
'nova_compute_libvirt_puppet',
'nova_neutron_puppet',
'nova_compute_puppet',
'neutron_puppet2',
'neutron_plugins_ml22',
'neutron_agents_ml22',
]
@click.command()
def deploy():
setup_resources()

View File

@ -170,7 +170,7 @@ def setup_haproxies():
for single_hps, single_hpc in zip(hps, hpc):
signals.connect(single_hpc, single_hps, {'listen_ports': 'ports'},
events=None)
events=False)
# assign haproxy services to each node
@ -204,11 +204,12 @@ def setup_haproxies():
events = []
for node, single_hps, single_hpc in zip(nodes, hps, hpc):
r = React(node.name, 'run', 'success', single_hps.name, 'install')
d = Dep(single_hps.name, 'install', 'success', single_hpc.name, 'run')
e1 = React(single_hpc.name, 'run', 'success', single_hps.name, 'run')
e2 = React(single_hpc.name, 'update', 'success', single_hps.name, 'update')
events.extend([r, d, e1, e2])
# r = React(node.name, 'run', 'success', single_hps.name, 'install')
d = Dep(single_hps.name, 'run', 'success', single_hpc.name, 'run')
e1 = React(single_hpc.name, 'run', 'success', single_hps.name, 'apply_config')
e2 = React(single_hpc.name, 'update', 'success', single_hps.name, 'apply_config')
# events.extend([r, d, e1, e2])
events.extend([d, e1, e2])
for event in events:
add_event(event)
@ -223,7 +224,6 @@ def main():
def deploy():
setup_riak()
@click.command()
def add_haproxies():
setup_haproxies()

View File

@ -3,29 +3,26 @@
- hosts: all
sudo: yes
tasks:
- apt: name=git state=present
- apt: name=python-mock state=present
- apt: name=python-keystoneclient state=present
# PIP
- apt: name=python-pip state=absent
- apt: name=python-six state=absent
- shell: easy_install pip
- shell: pip install -U pip
- shell: pip install -U setuptools
- shell: pip install httpie
#- apt: name=python-virtualenv state=present
#- apt: name=virtualenvwrapper state=present
- apt: name=python-mock state=present
- apt: name=ipython state=present
- apt: name=python-pudb state=present
#- apt: name=python-pip state=present
- shell: pip install docker-py==1.1.0
- apt: name=python-keystoneclient state=present
# Redis
- apt: name=redis-server state=present
#- apt: name=python-redis state=present
- shell: pip install redis
- lineinfile: dest=/etc/redis/redis.conf regexp='^bind ' line='bind 0.0.0.0'
- service: name=redis-server state=restarted
- apt: name=git state=present
# Puppet
- shell: wget https://apt.puppetlabs.com/puppetlabs-release-trusty.deb -O /root/puppetlabs-release-trusty.deb
- shell: dpkg -i /root/puppetlabs-release-trusty.deb
- shell: apt-get update
@ -50,11 +47,16 @@
- shell: gem install librarian-puppet --no-ri --no-rdoc
# Setup additional development tools
- apt: name=vim state=present
- apt: name=tmux state=present
- apt: name=htop state=present
- apt: name=python-mysqldb state=present
- apt: name=jq state=present
- name: Additional development tools
apt: name={{ item }} state=present
with_items:
- vim
- tmux
- htop
- python-mysqldb
- jq
- ipython
- python-pudb
# Graph drawing
#- apt: name=python-matplotlib state=present
@ -64,6 +66,7 @@
# Setup development env for solar
- shell: python setup.py develop chdir=/vagrant/solar
# Ubuntu OpenStack packages
#- apt: name=ubuntu-cloud-keyring state=present
#- shell: echo "deb http://ubuntu-cloud.archive.canonical.com/ubuntu trusty-updates/kilo main" > /etc/apt/sources.list.d/cloudarchive-kilo.list
#- shell: echo "deb http://osci-mirror-poz.infra.mirantis.net/pkgs/ubuntu-2015-06-25-194717 trusty-updates main" > /etc/apt/sources.list.d/fuel-kilo.list

View File

@ -0,0 +1,4 @@
# Apache puppet resource
This class installs Apache and manages apache service.
Defaults provided for Debian OS family.

View File

@ -0,0 +1,5 @@
class {'apache':
service_enable => false,
service_ensure => 'stopped',
package_ensure => 'absent',
}

View File

@ -0,0 +1,120 @@
$resource = hiera($::resource_name)
$apache_name = $resource['input']['apache_name']['value']
$service_name = $resource['input']['service_name']['value']
$default_mods = $resource['input']['default_mods']['value']
$default_vhost = $resource['input']['default_vhost']['value']
$default_charset = $resource['input']['default_charset']['value']
$default_confd_files = $resource['input']['default_confd_files']['value']
$default_ssl_vhost = $resource['input']['default_ssl_vhost']['value']
$default_ssl_cert = $resource['input']['default_ssl_cert']['value']
$default_ssl_key = $resource['input']['default_ssl_key']['value']
$default_ssl_chain = $resource['input']['default_ssl_chain']['value']
$default_ssl_ca = $resource['input']['default_ssl_ca']['value']
$default_ssl_crl_path = $resource['input']['default_ssl_crl_path']['value']
$default_ssl_crl = $resource['input']['default_ssl_crl']['value']
$default_ssl_crl_check = $resource['input']['default_ssl_crl_check']['value']
$default_type = $resource['input']['default_type']['value']
$ip = $resource['input']['ip']['value']
$service_restart = $resource['input']['service_restart']['value']
$purge_configs = $resource['input']['purge_configs']['value']
$purge_vhost_dir = $resource['input']['purge_vhost_dir']['value']
$purge_vdir = $resource['input']['purge_vdir']['value']
$serveradmin = $resource['input']['serveradmin']['value']
$sendfile = $resource['input']['sendfile']['value']
$error_documents = $resource['input']['error_documents']['value']
$timeout = $resource['input']['timeout']['value']
$httpd_dir = $resource['input']['httpd_dir']['value']
$server_root = $resource['input']['server_root']['value']
$conf_dir = $resource['input']['conf_dir']['value']
$confd_dir = $resource['input']['confd_dir']['value']
$vhost_dir = $resource['input']['vhost_dir']['value']
$vhost_enable_dir = $resource['input']['vhost_enable_dir']['value']
$mod_dir = $resource['input']['mod_dir']['value']
$mod_enable_dir = $resource['input']['mod_enable_dir']['value']
$mpm_module = $resource['input']['mpm_module']['value']
$lib_path = $resource['input']['lib_path']['value']
$conf_template = $resource['input']['conf_template']['value']
$servername = $resource['input']['servername']['value']
$manage_user = $resource['input']['manage_user']['value']
$manage_group = $resource['input']['manage_group']['value']
$user = $resource['input']['user']['value']
$group = $resource['input']['group']['value']
$keepalive = $resource['input']['keepalive']['value']
$keepalive_timeout = $resource['input']['keepalive_timeout']['value']
$max_keepalive_requests = $resource['input']['max_keepalive_requests']['value']
$logroot = $resource['input']['logroot']['value']
$logroot_mode = $resource['input']['logroot_mode']['value']
$log_level = $resource['input']['log_level']['value']
$log_formats = $resource['input']['log_formats']['value']
$ports_file = $resource['input']['ports_file']['value']
$docroot = $resource['input']['docroot']['value']
$apache_version = $resource['input']['apache_version']['value']
$server_tokens = $resource['input']['server_tokens']['value']
$server_signature = $resource['input']['server_signature']['value']
$trace_enable = $resource['input']['trace_enable']['value']
$allow_encoded_slashes = $resource['input']['allow_encoded_slashes']['value']
$package_ensure = $resource['input']['package_ensure']['value']
$use_optional_includes = $resource['input']['use_optional_includes']['value']
class {'apache':
apache_name => $apache_name,
service_name => $service_name,
default_mods => $default_mods,
default_vhost => $default_vhost,
default_charset => $default_charset,
default_confd_files => $default_confd_files,
default_ssl_vhost => $default_ssl_vhost,
default_ssl_cert => $default_ssl_cert,
default_ssl_key => $default_ssl_key,
default_ssl_chain => $default_ssl_chain,
default_ssl_ca => $default_ssl_ca,
default_ssl_crl_path => $default_ssl_crl_path,
default_ssl_crl => $default_ssl_crl,
default_ssl_crl_check => $default_ssl_crl_check,
default_type => $default_type,
ip => $ip,
service_enable => true,
service_manage => true,
service_ensure => 'running',
service_restart => $service_restart,
purge_configs => $purge_configs,
purge_vhost_dir => $purge_vhost_dir,
purge_vdir => $purge_vdir,
serveradmin => $serveradmin,
sendfile => $sendfile,
error_documents => $error_documents,
timeout => $timeout,
httpd_dir => $httpd_dir,
server_root => $server_root,
conf_dir => $conf_dir,
confd_dir => $confd_dir,
vhost_dir => $vhost_dir,
vhost_enable_dir => $vhost_enable_dir,
mod_dir => $mod_dir,
mod_enable_dir => $mod_enable_dir,
mpm_module => $mpm_module,
lib_path => $lib_path,
conf_template => $conf_template,
servername => $servername,
manage_user => $manage_user,
manage_group => $manage_group,
user => $user,
group => $group,
keepalive => $keepalive,
keepalive_timeout => $keepalive_timeout,
max_keepalive_requests => $max_keepalive_requests,
logroot => $logroot,
logroot_mode => $logroot_mode,
log_level => $log_level,
log_formats => $log_formats,
ports_file => $ports_file,
docroot => $docroot,
apache_version => $apache_version,
server_tokens => $server_tokens,
server_signature => $server_signature,
trace_enable => $trace_enable,
allow_encoded_slashes => $allow_encoded_slashes,
package_ensure => $package_ensure,
use_optional_includes => $use_optional_includes,
}

View File

@ -0,0 +1,186 @@
id: apache_puppet
handler: puppet
puppet_module: apache
version: 1.0.0
input:
apache_name:
schema: str
value: 'apache2'
service_name:
schema: str
value: 'apache2'
default_mods:
schema: bool
value: true
default_vhost:
schema: bool
value: true
default_charset:
schema: str
value:
default_confd_files:
schema: bool
value: true
default_ssl_vhost:
schema: bool
value: false
default_ssl_cert:
schema: str
value: '/etc/ssl/certs/ssl-cert-snakeoil.pem'
default_ssl_key:
schema: str
value: '/etc/ssl/private/ssl-cert-snakeoil.key'
default_ssl_chain:
schema: str
value:
default_ssl_ca:
schema: str
value:
default_ssl_crl_path:
schema: str
value:
default_ssl_crl:
schema: str
value:
default_ssl_crl_check:
schema: str
value:
default_type:
schema: str
value: 'none'
service_restart:
schema: str
value: 'restart'
purge_configs:
schema: bool
value: true
purge_vhost_dir:
schema: str
value:
purge_vdir:
schema: bool
value: false
serveradmin:
schema: str
value: 'root@localhost'
sendfile:
schema: str
value: 'On'
error_documents:
schema: bool
value: false
timeout:
schema: int
value: 120
httpd_dir:
schema: str
value: '/etc/apache2'
server_root:
schema: str
value: '/etc/apache2'
conf_dir:
schema: str
value: '/etc/apache2'
confd_dir:
schema: str
value: '/etc/apache2/conf.d'
vhost_dir:
schema: str
value: '/etc/apache2/sites-available'
vhost_enable_dir:
schema: str
value: '/etc/apache2/sites-enabled'
mod_dir:
schema: str
value: '/etc/apache2/mods-available'
mod_enable_dir:
schema: str
value: '/etc/apache2/mods-enabled'
mpm_module:
schema: str
value: 'worker'
lib_path:
schema: str
value: '/usr/lib/apache2/modules'
conf_template:
schema: str
value: 'apache/httpd.conf.erb'
servername:
schema: str!
value:
manage_user:
schema: bool
value: true
manage_group:
schema: bool
value: true
user:
schema: str
value: 'www-data'
group:
schema: str
value: 'www-data'
keepalive:
schema: str
value: 'Off'
keepalive_timeout:
schema: int
value: 15
max_keepalive_requests:
schema: int
value: 100
logroot:
schema: str
value: '/var/log/apache2'
logroot_mode:
schema: str
value: '0640'
log_level:
schema: str
value: 'warn'
log_formats:
schema: {}
value: {}
ports_file:
schema: str
value: '/etc/apache2/ports.conf'
docroot:
schema: str
value: '/srv/www'
apache_version:
schema: str
value: '2.4'
server_tokens:
schema: str
value: 'OS'
server_signature:
schema: str
value: 'On'
trace_enable:
schema: str
value: 'On'
allow_encoded_slashes:
schema: str
value:
package_ensure:
schema: str
value: 'installed'
use_optional_includes:
schema: bool
value: false
git:
schema: {repository: str!, branch: str!}
value: {repository: 'https://github.com/puppetlabs/puppetlabs-apache.git', branch: '1.5.0'}
ip:
schema: str!
value:
ssh_key:
schema: str!
value:
ssh_user:
schema: str!
value:
tags: [resource/apache_service, resources/apache]

View File

@ -0,0 +1,11 @@
import requests
from solar.core.log import log
def test(resource):
log.debug('Testing apache_puppet')
requests.get(
'http://%s:%s' % (resource.args['ip'].value, 80)
)

View File

@ -0,0 +1,22 @@
# TODO
- hosts: [{{ip}}]
sudo: yes
vars:
config_dir: {src: {{ config_dir.value['src'] }}, dst: {{ config_dir.value['dst'] }}}
haproxy_ip: {{ ip }}
haproxy_services:
{% for service, ports, listen_port, protocol in zip(configs.value, configs_ports.value, listen_ports.value, configs_protocols.value) %}
- name: {{ service['emitter_attached_to'] }}
listen_port: {{ listen_port['value'] }}
protocol: {{ protocol['value'] }}
servers:
{% for server_ip, server_port in zip(service['value'], ports['value']) %}
- name: {{ server_ip['emitter_attached_to'] }}
ip: {{ server_ip['value'] }}
port: {{ server_port['value'] }}
{% endfor %}
{% endfor %}
tasks:
- file: path={{ config_dir.value['src'] }}/ state=directory
- file: path={{ config_dir.value['src'] }}/haproxy.cfg state=touch
- template: src={{ resource_dir }}/templates/haproxy.cfg dest=/etc/haproxy/haproxy.cfg

View File

@ -0,0 +1,7 @@
- hosts: [{{ ip }}]
sudo: yes
tasks:
- service:
name: haproxy
state: reloaded

View File

@ -1,7 +1,10 @@
- hosts: [{{host}}]
sudo: yes
tasks:
- service:
- apt:
name: haproxy
state: reloaded
state: present
- replace:
dest: '/etc/default/haproxy'
regexp: ENABLED=0
replace: ENABLED=1

View File

@ -0,0 +1,57 @@
# Neutron DHCP agent puppet resource
Installs and configures the Neutron DHCP service
# Parameters
https://github.com/openstack/puppet-neutron/blob/5.1.0/manifests/agents/dhcp.pp
``package_ensure``
(optional) Ensure state for package. Defaults to 'present'.
``debug``
(optional) Show debugging output in log. Defaults to false.
``state_path``
(optional) Where to store dnsmasq state files. This directory must be
writable by the user executing the agent. Defaults to '/var/lib/neutron'.
``resync_interval``
(optional) The DHCP agent will resync its state with Neutron to recover
from any transient notification or rpc errors. The interval is number of
seconds between attempts. Defaults to 30.
``interface_driver``
(optional) Defaults to 'neutron.agent.linux.interface.OVSInterfaceDriver'.
``dhcp_driver``
(optional) Defaults to 'neutron.agent.linux.dhcp.Dnsmasq'.
``root_helper``
(optional) Defaults to 'sudo neutron-rootwrap /etc/neutron/rootwrap.conf'.
Addresses bug: https://bugs.launchpad.net/neutron/+bug/1182616
Note: This can safely be removed once the module only targets the Havana release.
``use_namespaces``
(optional) Allow overlapping IP (Must have kernel build with
CONFIG_NET_NS=y and iproute2 package that supports namespaces).
Defaults to true.
``dnsmasq_config_file``
(optional) Override the default dnsmasq settings with this file.
Defaults to undef
``dhcp_delete_namespaces``
(optional) Delete namespace after removing a dhcp server
Defaults to false.
``enable_isolated_metadata``
(optional) enable metadata support on isolated networks.
Defaults to false.
``enable_metadata_network``
(optional) Allows for serving metadata requests coming from a dedicated metadata
access network whose cidr is 169.254.169.254/16 (or larger prefix), and is
connected to a Neutron router from which the VMs send metadata request.
This option requires enable_isolated_metadata = True
Defaults to false.

View File

@ -0,0 +1,16 @@
class { 'neutron::agents::dhcp':
package_ensure => 'absent',
enabled => false,
}
include neutron::params
package { 'neutron':
ensure => 'absent',
name => $::neutron::params::package_name,
}
# Remove external class dependency
Service <| title == 'neutron-dhcp-service' |> {
require => undef
}

View File

@ -0,0 +1,45 @@
$resource = hiera($::resource_name)
$ip = $resource['input']['ip']['value']
$package_ensure = $resource['input']['package_ensure']['value']
$debug = $resource['input']['debug']['value']
$state_path = $resource['input']['state_path']['value']
$resync_interval = $resource['input']['resync_interval']['value']
$interface_driver = $resource['input']['interface_driver']['value']
$dhcp_driver = $resource['input']['dhcp_driver']['value']
$root_helper = $resource['input']['root_helper']['value']
$use_namespaces = $resource['input']['use_namespaces']['value']
$dnsmasq_config_file = $resource['input']['dnsmasq_config_file']['value']
$dhcp_delete_namespaces = $resource['input']['dhcp_delete_namespaces']['value']
$enable_isolated_metadata = $resource['input']['enable_isolated_metadata']['value']
$enable_metadata_network = $resource['input']['enable_metadata_network']['value']
class { 'neutron::agents::dhcp':
enabled => true,
manage_service => true,
package_ensure => $package_ensure,
debug => $debug,
state_path => $state_path,
resync_interval => $resync_interval,
interface_driver => $interface_driver,
dhcp_driver => $dhcp_driver,
root_helper => $root_helper,
use_namespaces => $use_namespaces,
dnsmasq_config_file => $dnsmasq_config_file,
dhcp_delete_namespaces => $dhcp_delete_namespaces,
enable_isolated_metadata => $enable_isolated_metadata,
enable_metadata_network => $enable_metadata_network,
}
include neutron::params
package { 'neutron':
ensure => $package_ensure,
name => $::neutron::params::package_name,
}
# Remove external class dependency
Service <| title == 'neutron-dhcp-service' |> {
require => undef
}

View File

@ -0,0 +1,57 @@
handler: puppet
id: 'neutron_agents_dhcp_puppet'
input:
ip:
schema: str!
value:
ssh_key:
schema: str!
value:
ssh_user:
schema: str!
value:
package_ensure:
schema: str
value: present
debug:
schema: bool
value: false
state_path:
schema: str
value: '/var/lib/neutron'
resync_interval:
schema: int
value: 30
interface_driver:
schema: str
value: 'neutron.agent.linux.interface.OVSInterfaceDriver'
dhcp_driver:
schema: str
value: 'neutron.agent.linux.dhcp.Dnsmasq'
root_helper:
schema: str
value: 'sudo neutron-rootwrap /etc/neutron/rootwrap.conf'
use_namespaces:
schema: bool
value: true
dnsmasq_config_file:
schema: str
value:
dhcp_delete_namespaces:
schema: bool
value: false
enable_isolated_metadata:
schema: bool
value: false
enable_metadata_network:
schema: bool
value: false
git:
schema: {repository: str!, branch: str!}
value: {repository: 'https://github.com/openstack/puppet-neutron', branch: '5.1.0'}
puppet_module: 'neutron'
tags: [resource/neutron, resource/neutron_agents_dhcp]
version: 1.0.0

View File

@ -0,0 +1,100 @@
# Neutron L3 agent puppet resource
Installs and configures the Neutron L3 service
TODO: create ability to have multiple L3 services
# Parameters
https://github.com/openstack/puppet-neutron/blob/5.1.0/manifests/agents/l3.pp
``package_ensure``
(optional) The state of the package
Defaults to present
``debug``
(optional) Print debug info in logs
Defaults to false
``external_network_bridge``
(optional) The name of the external bridge
Defaults to br-ex
``use_namespaces``
(optional) Enable overlapping IPs / network namespaces
Defaults to false
``interface_driver``
(optional) Driver to interface with neutron
Defaults to OVSInterfaceDriver
``router_id``
(optional) The ID of the external router in neutron
Defaults to blank
``gateway_external_network_id``
(optional) The ID of the external network in neutron
Defaults to blank
``handle_internal_only_routers``
(optional) L3 Agent will handle non-external routers
Defaults to true
``metadata_port``
(optional) The port of the metadata server
Defaults to 9697
``send_arp_for_ha``
(optional) Send this many gratuitous ARPs for HA setup. Set it below or equal to 0
to disable this feature.
Defaults to 3
``periodic_interval``
(optional) seconds between re-sync routers' data if needed
Defaults to 40
``periodic_fuzzy_delay``
(optional) seconds to start to sync routers' data after starting agent
Defaults to 5
``enable_metadata_proxy``
(optional) can be set to False if the Nova metadata server is not available
Defaults to True
``network_device_mtu``
(optional) The MTU size for the interfaces managed by the L3 agent
Defaults to undef
Should be deprecated in the next major release in favor of a global parameter
``router_delete_namespaces``
(optional) namespaces can be deleted cleanly on the host running the L3 agent
Defaults to False
``ha_enabled``
(optional) Enabled or not HA for L3 agent.
Defaults to false
``ha_vrrp_auth_type``
(optional) VRRP authentication type. Can be AH or PASS.
Defaults to "PASS"
``ha_vrrp_auth_password``
(optional) VRRP authentication password. Required if ha_enabled = true.
Defaults to undef
``ha_vrrp_advert_int``
(optional) The advertisement interval in seconds.
Defaults to '2'
``agent_mode``
(optional) The working mode for the agent.
'legacy': default behavior (without DVR)
'dvr': enable DVR for an L3 agent running on compute node (DVR in production)
'dvr_snat': enable DVR with centralized SNAT support (DVR for single-host, for testing only)
Defaults to 'legacy'
``allow_automatic_l3agent_failover``
(optional) Automatically reschedule routers from offline L3 agents to online
L3 agents.
This is another way to run virtual routers in highly available way but with slow
failover performances compared to Keepalived feature in Neutron L3 Agent.
Defaults to 'False'

View File

@ -0,0 +1,16 @@
class { 'neutron::agents::l3':
package_ensure => 'absent',
enabled => false,
}
include neutron::params
package { 'neutron':
ensure => 'absent',
name => $::neutron::params::package_name,
}
# Remove external class dependency
Service <| title == 'neutron-l3' |> {
require => undef
}

View File

@ -0,0 +1,63 @@
$resource = hiera($::resource_name)
$ip = $resource['input']['ip']['value']
$package_ensure = $resource['input']['package_ensure']['value']
$debug = $resource['input']['debug']['value']
$external_network_bridge = $resource['input']['external_network_bridge']['value']
$use_namespaces = $resource['input']['use_namespaces']['value']
$interface_driver = $resource['input']['interface_driver']['value']
$router_id = $resource['input']['router_id']['value']
$gateway_external_network_id = $resource['input']['gateway_external_network_id']['value']
$handle_internal_only_routers = $resource['input']['handle_internal_only_routers']['value']
$metadata_port = $resource['input']['metadata_port']['value']
$send_arp_for_ha = $resource['input']['send_arp_for_ha']['value']
$periodic_interval = $resource['input']['periodic_interval']['value']
$periodic_fuzzy_delay = $resource['input']['periodic_fuzzy_delay']['value']
$enable_metadata_proxy = $resource['input']['enable_metadata_proxy']['value']
$network_device_mtu = $resource['input']['network_device_mtu']['value']
$router_delete_namespaces = $resource['input']['router_delete_namespaces']['value']
$ha_enabled = $resource['input']['ha_enabled']['value']
$ha_vrrp_auth_type = $resource['input']['ha_vrrp_auth_type']['value']
$ha_vrrp_auth_password = $resource['input']['ha_vrrp_auth_password']['value']
$ha_vrrp_advert_int = $resource['input']['ha_vrrp_advert_int']['value']
$agent_mode = $resource['input']['agent_mode']['value']
$allow_automatic_l3agent_failover = $resource['input']['allow_automatic_l3agent_failover']['value']
class { 'neutron::agents::l3':
enabled => true,
manage_service => true,
package_ensure => $package_ensure,
debug => $debug,
external_network_bridge => $external_network_bridge,
use_namespaces => $use_namespaces,
interface_driver => $interface_driver,
router_id => $router_id,
gateway_external_network_id => $gateway_external_network_id,
handle_internal_only_routers => $handle_internal_only_routers,
metadata_port => $metadata_port,
send_arp_for_ha => $send_arp_for_ha,
periodic_interval => $periodic_interval,
periodic_fuzzy_delay => $periodic_fuzzy_delay,
enable_metadata_proxy => $enable_metadata_proxy,
network_device_mtu => $network_device_mtu,
router_delete_namespaces => $router_delete_namespaces,
ha_enabled => $ha_enabled,
ha_vrrp_auth_type => $ha_vrrp_auth_type,
ha_vrrp_auth_password => $ha_vrrp_auth_password,
ha_vrrp_advert_int => $ha_vrrp_advert_int,
agent_mode => $agent_mode,
allow_automatic_l3agent_failover => $allow_automatic_l3agent_failover,
}
include neutron::params
package { 'neutron':
ensure => $package_ensure,
name => $::neutron::params::package_name,
}
# Remove external class dependency
Service <| title == 'neutron-l3' |> {
require => undef
}

View File

@ -0,0 +1,84 @@
handler: puppet
id: 'neutron_agents_l3_puppet'
input:
ip:
schema: str!
value:
ssh_key:
schema: str!
value:
ssh_user:
schema: str!
value:
package_ensure:
schema: str
value: 'present'
debug:
schema: bool
value: false
external_network_bridge:
schema: str
value: 'br-ex'
use_namespaces:
schema: bool
value: true
interface_driver:
schema: str
value: 'neutron.agent.linux.interface.OVSInterfaceDriver'
router_id:
schema: str
value:
gateway_external_network_id:
schema: str
value:
handle_internal_only_routers:
schema: bool
value: true
metadata_port:
schema: int
value: 9697
send_arp_for_ha:
schema: int
value: 3
periodic_interval:
schema: int
value: 40
periodic_fuzzy_delay:
schema: int
value: 5
enable_metadata_proxy:
schema: bool
value: true
network_device_mtu:
schema: str
value:
router_delete_namespaces:
schema: bool
value: false
ha_enabled:
schema: bool
value: false
ha_vrrp_auth_type:
schema: str
value: 'PASS'
ha_vrrp_auth_password:
schema: str
value:
ha_vrrp_advert_int:
schema: int
value: 3
agent_mode:
schema: str
value: 'legacy'
allow_automatic_l3agent_failover:
schema: bool
value: false
git:
schema: {repository: str!, branch: str!}
value: {repository: 'https://github.com/openstack/puppet-neutron', branch: '5.1.0'}
puppet_module: 'neutron'
tags: [resource/neutron, resource/neutron_agents_l3]
version: 1.0.0

View File

@ -0,0 +1,65 @@
# Neutron DHCP agent puppet resource
Setup and configure Neutron metadata agent
# Parameters
https://github.com/openstack/puppet-neutron/blob/5.1.0/manifests/agents/metadata.pp
``auth_password``
(required) The password for the administrative user.
``shared_secret``
(required) Shared secret to validate proxies Neutron metadata requests.
``package_ensure``
Ensure state of the package. Defaults to 'present'.
``debug``
Debug. Defaults to false.
``auth_tenant``
The administrative user's tenant name. Defaults to 'services'.
``auth_user``
The administrative user name for OpenStack Networking.
Defaults to 'neutron'.
``auth_url``
The URL used to validate tokens. Defaults to 'http://localhost:35357/v2.0'.
Note, for this resource it is decomposed to auth_host and auth_port
due to implementation restrictions
``auth_insecure``
turn off verification of the certificate for ssl (Defaults to false)
``auth_ca_cert``
CA cert to check against with for ssl keystone. (Defaults to undef)
``auth_region``
The authentication region. Defaults to 'RegionOne'.
``metadata_ip``
The IP address of the metadata service. Defaults to '127.0.0.1'.
``metadata_port``
The TCP port of the metadata service. Defaults to 8775.
``metadata_workers``
(optional) Number of separate worker processes to spawn.
The default, count of machine's processors, runs the worker thread in the
current process.
Greater than 0 launches that number of child processes as workers.
The parent process manages them. Having more workers will help to improve performances.
Defaults to: $::processorcount
``metadata_backlog``
(optional) Number of backlog requests to configure the metadata server socket with.
Defaults to 4096
``metadata_memory_cache_ttl``
(optional) Specifies time in seconds a metadata cache entry is valid in
memory caching backend.
Set to 0 will cause cache entries to never expire.
Set to undef or false to disable cache.
Defaults to 5

View File

@ -0,0 +1,16 @@
class { 'neutron::agents::metadata':
package_ensure => 'absent',
enabled => false,
}
include neutron::params
package { 'neutron':
ensure => 'absent',
name => $::neutron::params::package_name,
}
# Remove external class dependency
Service <| title == 'neutron-metadata' |> {
require => undef
}

View File

@ -0,0 +1,53 @@
$resource = hiera($::resource_name)
$ip = $resource['input']['ip']['value']
$auth_host = $resource['input']['auth_host']['value']
$auth_port = $resource['input']['auth_port']['value']
$auth_password = $resource['input']['auth_password']['value']
$shared_secret = $resource['input']['shared_secret']['value']
$package_ensure = $resource['input']['package_ensure']['value']
$debug = $resource['input']['debug']['value']
$auth_tenant = $resource['input']['auth_tenant']['value']
$auth_user = $resource['input']['auth_user']['value']
$auth_insecure = $resource['input']['auth_insecure']['value']
$auth_ca_cert = $resource['input']['auth_ca_cert']['value']
$auth_region = $resource['input']['auth_region']['value']
$metadata_ip = $resource['input']['metadata_ip']['value']
$metadata_port = $resource['input']['metadata_port']['value']
$metadata_workers = $resource['input']['metadata_workers']['value']
$metadata_backlog = $resource['input']['metadata_backlog']['value']
$metadata_memory_cache_ttl = $resource['input']['metadata_memory_cache_ttl']['value']
class { 'neutron::agents::metadata':
enabled => true,
manage_service => true,
auth_password => $auth_password,
shared_secret => $shared_secret,
package_ensure => $package_ensure,
debug => $debug,
auth_tenant => $auth_tenant,
auth_user => $auth_user,
auth_url => "http://${auth_host}:${auth_port}/v2.0",
auth_insecure => $auth_insecure,
auth_ca_cert => $auth_ca_cert,
auth_region => $auth_region,
metadata_ip => $metadata_ip,
metadata_port => $metadata_port,
metadata_workers => $metadata_workers,
metadata_backlog => $metadata_backlog,
metadata_memory_cache_ttl => $metadata_memory_cache_ttl,
}
include neutron::params
package { 'neutron':
ensure => $package_ensure,
name => $::neutron::params::package_name,
}
# Remove external class dependency
Service <| title == 'neutron-metadata' |> {
require => undef
}

View File

@ -0,0 +1,70 @@
handler: puppet
id: 'neutron_agents_metadata_puppet'
input:
ip:
schema: str!
value:
ssh_key:
schema: str!
value:
ssh_user:
schema: str!
value:
auth_password:
schema: str!
value:
shared_secret:
schema: str!
value:
package_ensure:
schema: str
value: 'present'
debug:
schema: bool
value: false
auth_tenant:
schema: str
value: 'services'
auth_user:
schema: str
value: 'neutron'
auth_insecure:
schema: bool
value: false
auth_ca_cert:
schema: str
value:
auth_region:
schema: str
value: 'RegionOne'
metadata_ip:
schema: str
value: '127.0.0.1'
metadata_port:
schema: int
value: 8775
metadata_workers:
schema: int
value: 1
metadata_backlog:
schema: int
value: 4096
metadata_memory_cache_ttl:
schema: int
value: 5
auth_host:
schema: str
value: 'localhost'
auth_port:
schema: int
value: 35357
git:
schema: {repository: str!, branch: str!}
value: {repository: 'https://github.com/openstack/puppet-neutron', branch: '5.1.0'}
puppet_module: 'neutron'
tags: [resource/neutron, resource/neutron_agents_metadata]
version: 1.0.0

View File

@ -0,0 +1,74 @@
# Neutron OVS agent with ML2 plugin puppet resource
Setups OVS neutron agent when using ML2 plugin
# === Parameters
source https://github.com/openstack/puppet-neutron/blob/5.1.0/manifests/agents/ml2/ovs.pp
``package_ensure``
(optional) The state of the package
Defaults to 'present'
``enabled``
(required) Whether or not to enable the OVS Agent
Defaults to true
``bridge_uplinks``
(optional) List of interfaces to connect to the bridge when doing
bridge mapping.
Defaults to empty list
``bridge_mapping``
(optional) List of <physical_network>:<bridge>
Defaults to empty list
``integration_bridge``
(optional) Integration bridge in OVS
Defaults to 'br-int'
``enable_tunneling``
(optional) Enable or not tunneling
Defaults to false
``tunnel_types``
(optional) List of types of tunnels to use when utilizing tunnels,
either 'gre' or 'vxlan'.
Defaults to false
``local_ip``
(optional) Local IP address of GRE tunnel endpoints.
Required when enabling tunneling
Defaults to false
``tunnel_bridge``
(optional) Bridge used to transport tunnels
Defaults to 'br-tun'
``vxlan_udp_port``
(optional) The UDP port to use for VXLAN tunnels.
Defaults to '4789'
``polling_interval``
(optional) The number of seconds the agent will wait between
polling for local device changes.
Defaults to '2"
``l2_population``
(optional) Extension to use alongside ml2 plugin's l2population
mechanism driver.
Defaults to false
``arp_responder``
(optional) Enable or not the ARP responder.
Recommanded when using l2 population mechanism driver.
Defaults to false
``firewall_driver``
(optional) Firewall driver for realizing neutron security group function.
Defaults to 'neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver'.
``enable_distributed_routing``
(optional) Set to True on L2 agents to enable support
for distributed virtual routing.
Defaults to false

View File

@ -0,0 +1,4 @@
class { 'neutron::agents::ml2::ovs':
package_ensure => 'absent',
enabled => false,
}

View File

@ -0,0 +1,45 @@
$resource = hiera($::resource_name)
$ip = $resource['input']['ip']['value']
$package_ensure = $resource['input']['package_ensure']['value']
$enabled = $resource['input']['enabled']['value']
$bridge_uplinks = $resource['input']['bridge_uplinks']['value']
$bridge_mappings = $resource['input']['bridge_mappings']['value']
$integration_bridge = $resource['input']['integration_bridge']['value']
$enable_tunneling = $resource['input']['enable_tunneling']['value']
$tunnel_types = $resource['input']['tunnel_types']['value']
$local_ip = $resource['input']['local_ip']['value']
$tunnel_bridge = $resource['input']['tunnel_bridge']['value']
$vxlan_udp_port = $resource['input']['vxlan_udp_port']['value']
$polling_interval = $resource['input']['polling_interval']['value']
$l2_population = $resource['input']['l2_population']['value']
$arp_responder = $resource['input']['arp_responder']['value']
$firewall_driver = $resource['input']['firewall_driver']['value']
$enable_distributed_routing = $resource['input']['enable_distributed_routing']['value']
class { 'neutron::agents::ml2::ovs':
enabled => true,
package_ensure => $package_ensure,
bridge_uplinks => $bridge_uplinks,
bridge_mappings => $bridge_mappings,
integration_bridge => $integration_bridge,
enable_tunneling => $enable_tunneling,
tunnel_types => $tunnel_types,
local_ip => $local_ip,
tunnel_bridge => $tunnel_bridge,
vxlan_udp_port => $vxlan_udp_port,
polling_interval => $polling_interval,
l2_population => $l2_population,
arp_responder => $arp_responder,
firewall_driver => $firewall_driver,
enable_distributed_routing => $enable_distributed_routing,
}
# Remove external class dependency and restore required ones
Service <| title == 'neutron-ovs-agent-service' |> {
require => undef
}
Neutron_plugin_ml2<||> ~> Service['neutron-ovs-agent-service']
File <| title == '/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini' |> ~>
Service<| title == 'neutron-ovs-agent-service' |>

View File

@ -0,0 +1,66 @@
handler: puppet
id: 'neutron_agents_ml2_ovs_puppet'
input:
ip:
schema: str!
value:
ssh_key:
schema: str!
value:
ssh_user:
schema: str!
value:
package_ensure:
schema: str
value: 'present'
enabled:
schema: bool
value: true
bridge_uplinks:
schema: [str]
value: []
bridge_mappings:
schema: [str]
value: []
integration_bridge:
schema: str
value: 'br-int'
enable_tunneling:
schema: bool
value: false
tunnel_types:
schema: [str]
value: []
local_ip:
schema: str
value: ''
tunnel_bridge:
schema: str
value: 'br-tun'
vxlan_udp_port:
schema: int
value: 4789
polling_interval:
schema: int
value: 2
l2_population:
schema: bool
value: false
arp_responder:
schema: bool
value: false
firewall_driver:
schema: str
value: 'neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver'
enable_distributed_routing:
schema: bool
value: false
git:
schema: {repository: str!, branch: str!}
value: {repository: 'https://github.com/openstack/puppet-neutron', branch: '5.1.0'}
puppet_module: 'neutron'
tags: [resource/neutron, resource/neutron_agents_ml2_ovs]
version: 1.0.0

View File

@ -0,0 +1,83 @@
# Neutron ML2 plugin puppet resource
# === Parameters
source https://github.com/openstack/puppet-neutron/blob/5.1.0/manifests/plugins/ml2.pp
``type_drivers``
(optional) List of network type driver entrypoints to be loaded
from the neutron.ml2.type_drivers namespace.
Could be an array that can have these elements:
local, flat, vlan, gre, vxlan
Defaults to ['local', 'flat', 'vlan', 'gre', 'vxlan'].
``tenant_network_types``
(optional) Ordered list of network_types to allocate as tenant networks.
The value 'local' is only useful for single-box testing
but provides no connectivity between hosts.
Should be an array that can have these elements:
local, flat, vlan, gre, vxlan
Defaults to ['local', 'flat', 'vlan', 'gre', 'vxlan'].
``mechanism_drivers``
(optional) An ordered list of networking mechanism driver
entrypoints to be loaded from the neutron.ml2.mechanism_drivers namespace.
Should be an array that can have these elements:
logger, test, linuxbridge, openvswitch, hyperv, ncs, arista, cisco_nexus,
l2population, sriovnicswitch
Default to ['openvswitch', 'linuxbridge'].
``flat_networks``
(optional) List of physical_network names with which flat networks
can be created. Use * to allow flat networks with arbitrary
physical_network names.
Should be an array.
Default to *.
``network_vlan_ranges``
(optional) List of <physical_network>:<vlan_min>:<vlan_max> or
<physical_network> specifying physical_network names
usable for VLAN provider and tenant networks, as
well as ranges of VLAN tags on each available for
allocation to tenant networks.
Should be an array with vlan_min = 1 & vlan_max = 4094 (IEEE 802.1Q)
Default to empty.
``tunnel_id_ranges``
(optional) Comma-separated list of <tun_min>:<tun_max> tuples
enumerating ranges of GRE tunnel IDs that are
available for tenant network allocation
Should be an array with tun_max +1 - tun_min > 1000000
Default to empty.
``vxlan_group``
(optional) Multicast group for VXLAN.
Multicast group for VXLAN. If unset, disables VXLAN enable sending allocate
broadcast traffic to this multicast group. When left unconfigured, will
disable multicast VXLAN mode
Should be an Multicast IP (v4 or v6) address.
Default to 'None'.
``vni_ranges``
(optional) Comma-separated list of <vni_min>:<vni_max> tuples
enumerating ranges of VXLAN VNI IDs that are
available for tenant network allocation.
Min value is 0 and Max value is 16777215.
Default to empty.
``enable_security_group``
(optional) Controls if neutron security group is enabled or not.
It should be false when you use nova security group.
Defaults to true.
``supported_pci_vendor_devs``
(optional) Supported PCI vendor devices, defined by
vendor_id:product_id according to the PCI ID
Repository. Should be an array of devices.
Defaults to ['15b3:1004', '8086:10ca'] (Intel & Mellanox SR-IOV capable NICs)
``sriov_agent_required``
(optional) SRIOV neutron agent is required for port binding.
Only set to true if SRIOV network adapters support VF link state setting
and if admin state management is desired.
Defaults to false.

View File

@ -0,0 +1,3 @@
class { 'neutron::plugins::ml2':
package_ensure => 'absent',
}

View File

@ -0,0 +1,54 @@
$resource = hiera($::resource_name)
$ip = $resource['input']['ip']['value']
$type_drivers = $resource['input']['type_drivers']['value']
$tenant_network_types = $resource['input']['tenant_network_types']['value']
$mechanism_drivers = $resource['input']['mechanism_drivers']['value']
$flat_networks = $resource['input']['flat_networks']['value']
$network_vlan_ranges = $resource['input']['network_vlan_ranges']['value']
$tunnel_id_ranges = $resource['input']['tunnel_id_ranges']['value']
$vxlan_group = $resource['input']['vxlan_group']['value']
$vni_ranges = $resource['input']['vni_ranges']['value']
$enable_security_group = $resource['input']['enable_security_group']['value']
$package_ensure = $resource['input']['package_ensure']['value']
$supported_pci_vendor_devs = $resource['input']['supported_pci_vendor_devs']['value']
$sriov_agent_required = $resource['input']['sriov_agent_required']['value']
# LP1490438
file {'/etc/default/neutron-server':
ensure => present,
owner => 'root',
group => 'root',
mode => 644
} ->
class { 'neutron::plugins::ml2':
type_drivers => $type_drivers,
tenant_network_types => $tenant_network_types,
mechanism_drivers => $mechanism_drivers,
flat_networks => $flat_networks,
network_vlan_ranges => $network_vlan_ranges,
tunnel_id_ranges => $tunnel_id_ranges,
vxlan_group => $vxlan_group,
vni_ranges => $vni_ranges,
enable_security_group => $enable_security_group,
package_ensure => $package_ensure,
supported_pci_vendor_devs => $supported_pci_vendor_devs,
sriov_agent_required => $sriov_agent_required,
} ->
exec { 'neutron-db-sync':
provider => 'shell',
command => "${command} stamp head",
path => [ '/usr/bin', '/bin' ],
onlyif => "${command} current | grep -qE '^Current revision.*None$' "
}
include neutron::params
package { 'neutron':
ensure => $package_ensure,
name => $::neutron::params::package_name,
before => Exec['neutron-db-sync']
}

View File

@ -0,0 +1,57 @@
handler: puppet
id: 'neutron_plugins_ml2_puppet'
input:
ip:
schema: str!
value:
ssh_key:
schema: str!
value:
ssh_user:
schema: str!
value:
type_drivers:
schema: [str]
value: ['local', 'flat', 'vlan', 'gre', 'vxlan']
tenant_network_types:
schema: [str]
value: ['local', 'flat', 'vlan', 'gre', 'vxlan']
mechanism_drivers:
schema: [str]
value: ['openvswitch', 'linuxbridge']
flat_networks:
schema: [str]
value: ['*']
network_vlan_ranges:
schema: [str]
value: ['physnet1:1000:2999']
tunnel_id_ranges:
schema: [str]
value: ['20:100']
vxlan_group:
schema: str
value: '224.0.0.1'
vni_ranges:
schema: [str]
value: ['10:100']
enable_security_group:
schema: bool
value: true
package_ensure:
schema: str
value: 'present'
supported_pci_vendor_devs:
schema: [str]
value: ['15b3:1004', '8086:10ca']
sriov_agent_required:
schema: bool
value: false
git:
schema: {repository: str!, branch: str!}
value: {repository: 'https://github.com/openstack/puppet-neutron', branch: '5.1.0'}
puppet_module: 'neutron'
tags: [resource/neutron, resource/neutron_plugins_ml2]
version: 1.0.0

View File

@ -0,0 +1,194 @@
# Neutron puppet resource
Installs the neutron package and configures
/etc/neutron/neutron.conf for SSL, AMQP, logging, service plugins and other stuff.
Does not produce any services.
# Parameters:
source https://github.com/openstack/puppet-neutron/blob/5.1.0/manifests/init.pp
``package_ensure``
(optional) The state of the package
Defaults to 'present'
``verbose``
(optional) Verbose logging
Defaults to False
``debug``
(optional) Print debug messages in the logs
Defaults to False
``bind_host``
(optional) The IP/interface to bind to
Defaults to 0.0.0.0 (all interfaces)
``bind_port``
(optional) The port to use
Defaults to 9696
``core_plugin``
(optional) Neutron plugin provider
Defaults to openvswitch
Could be bigswitch, brocade, cisco, embrane, hyperv, linuxbridge, midonet, ml2, mlnx, nec, nicira, plumgrid, ryu
``service_plugins``
(optional) Advanced service modules.
Could be an array that can have these elements:
router, firewall, lbaas, vpnaas, metering
Defaults to empty
``auth_strategy``
(optional) How to authenticate
Defaults to 'keystone'. 'noauth' is the only other valid option
``base_mac``
(optional) The MAC address pattern to use.
Defaults to fa:16:3e:00:00:00
``mac_generation_retries``
(optional) How many times to try to generate a unique mac
Defaults to 16
``dhcp_lease_duration``
(optional) DHCP lease
Defaults to 86400 seconds
``dhcp_agents_per_network``
(optional) Number of DHCP agents scheduled to host a network.
This enables redundant DHCP agents for configured networks.
Defaults to 1
``network_device_mtu``
(optional) The MTU size for the interfaces managed by neutron
Defaults to undef
``dhcp_agent_notification``
(optional) Allow sending resource operation notification to DHCP agent.
Defaults to true
``allow_bulk``
(optional) Enable bulk crud operations
Defaults to true
``allow_pagination``
(optional) Enable pagination
Defaults to false
``allow_sorting``
(optional) Enable sorting
Defaults to false
``allow_overlapping_ips``
(optional) Enables network namespaces
Defaults to false
``api_extensions_path``
(optional) Specify additional paths for API extensions that the
module in use needs to load.
Defaults to undef
``report_interval``
(optional) Seconds between nodes reporting state to server; should be less than
agent_down_time, best if it is half or less than agent_down_time.
agent_down_time is a config for neutron-server, set by class neutron::server
report_interval is a config for neutron agents, set by class neutron
Defaults to: 30
``control_exchange``
(optional) What RPC queue/exchange to use
Defaults to neutron
``rpc_backend``
(optional) what rpc/queuing service to use
Defaults to impl_kombu (rabbitmq)
``rabbit_password``
``rabbit_host``
``rabbit_port``
``rabbit_user``
(optional) Various rabbitmq settings
``rabbit_hosts``
(optional) array of rabbitmq servers for HA.
A single IP address, such as a VIP, can be used for load-balancing
multiple RabbitMQ Brokers.
Defaults to false
``rabbit_use_ssl``
(optional) Connect over SSL for RabbitMQ
Defaults to false
``kombu_ssl_ca_certs``
(optional) SSL certification authority file (valid only if SSL enabled).
Defaults to undef
``kombu_ssl_certfile``
(optional) SSL cert file (valid only if SSL enabled).
Defaults to undef
``kombu_ssl_keyfile``
(optional) SSL key file (valid only if SSL enabled).
Defaults to undef
``kombu_ssl_version``
(optional) SSL version to use (valid only if SSL enabled).
Valid values are TLSv1, SSLv23 and SSLv3. SSLv2 may be
available on some distributions.
Defaults to 'TLSv1'
``kombu_reconnect_delay``
(optional) The amount of time to wait before attempting to reconnect
to MQ provider. This is used in some cases where you may need to wait
for the provider to propery premote the master before attempting to
reconnect. See https://review.openstack.org/#/c/76686
Defaults to '1.0'
``qpid_hostname``
``qpid_port``
``qpid_username``
``qpid_password``
``qpid_heartbeat``
``qpid_protocol``
``qpid_tcp_nodelay``
``qpid_reconnect``
``qpid_reconnect_timeout``
``qpid_reconnect_limit``
``qpid_reconnect_interval``
``qpid_reconnect_interval_min``
``qpid_reconnect_interval_max``
(optional) various QPID options
``use_ssl``
(optinal) Enable SSL on the API server
Defaults to false, not set
``cert_file``
(optinal) certificate file to use when starting api server securely
defaults to false, not set
``key_file``
(optional) Private key file to use when starting API server securely
Defaults to false, not set
``ca_file``
(optional) CA certificate file to use to verify connecting clients
Defaults to false, not set
``use_syslog``
(optional) Use syslog for logging
Defaults to false
``log_facility``
(optional) Syslog facility to receive log lines
Defaults to LOG_USER
``log_file``
(optional) Where to log
Defaults to false
``log_dir``
(optional) Directory where logs should be stored
If set to boolean false, it will not log to any directory
Defaults to /var/log/neutron

View File

@ -1,22 +1,5 @@
$resource = hiera('{{ resource_name }}')
$rabbitmq_user = $resource['input']['rabbitmq_user']['value']
$rabbitmq_password = $resource['input']['rabbitmq_password']['value']
$rabbitmq_host = $resource['input']['rabbitmq_host']['value']
$rabbitmq_port = $resource['input']['rabbitmq_port']['value']
class { 'neutron::server':
enabled => false,
package_ensure => 'absent',
auth_type => 'noauth'
}
class { 'neutron':
enabled => false,
package_ensure => 'absent',
rabbit_user => $rabbitmq_user,
rabbit_password => $rabbitmq_password,
rabbit_host => $rabbitmq_host,
rabbit_port => $rabbitmq_port
}
enabled => false,
package_ensure => 'absent',
rabbit_password => 'not important as removed',
}

View File

@ -2,45 +2,119 @@ $resource = hiera($::resource_name)
$ip = $resource['input']['ip']['value']
$rabbitmq_user = $resource['input']['rabbitmq_user']['value']
$rabbitmq_password = $resource['input']['rabbitmq_password']['value']
$rabbitmq_host = $resource['input']['rabbitmq_host']['value']
$rabbitmq_port = $resource['input']['rabbitmq_port']['value']
$rabbitmq_virtual_host = $resource['input']['rabbitmq_virtual_host']['value']
$keystone_host = $resource['input']['keystone_host']['value']
$keystone_port = $resource['input']['keystone_port']['value']
$keystone_user = $resource['input']['keystone_user']['value']
$keystone_password = $resource['input']['keystone_password']['value']
$keystone_tenant = $resource['input']['keystone_tenant']['value']
$package_ensure = $resource['input']['package_ensure']['value']
$verbose = $resource['input']['verbose']['value']
$debug = $resource['input']['debug']['value']
$bind_host = $resource['input']['bind_host']['value']
$bind_port = $resource['input']['bind_port']['value']
$core_plugin = $resource['input']['core_plugin']['value']
$service_plugins = $resource['input']['service_plugins']['value']
$auth_strategy = $resource['input']['auth_strategy']['value']
$base_mac = $resource['input']['base_mac']['value']
$mac_generation_retries = $resource['input']['mac_generation_retries']['value']
$dhcp_lease_duration = $resource['input']['dhcp_lease_duration']['value']
$dhcp_agents_per_network = $resource['input']['dhcp_agents_per_network']['value']
$network_device_mtu = $resource['input']['network_device_mtu']['value']
$dhcp_agent_notification = $resource['input']['dhcp_agent_notification']['value']
$allow_bulk = $resource['input']['allow_bulk']['value']
$allow_pagination = $resource['input']['allow_pagination']['value']
$allow_sorting = $resource['input']['allow_sorting']['value']
$allow_overlapping_ips = $resource['input']['allow_overlapping_ips']['value']
$api_extensions_path = $resource['input']['api_extensions_path']['value']
$root_helper = $resource['input']['root_helper']['value']
$report_interval = $resource['input']['report_interval']['value']
$control_exchange = $resource['input']['control_exchange']['value']
$rpc_backend = $resource['input']['rpc_backend']['value']
$rabbit_password = $resource['input']['rabbit_password']['value']
$rabbit_host = $resource['input']['rabbit_host']['value']
$rabbit_hosts = $resource['input']['rabbit_hosts']['value']
$rabbit_port = $resource['input']['rabbit_port']['value']
$rabbit_user = $resource['input']['rabbit_user']['value']
$rabbit_virtual_host = $resource['input']['rabbit_virtual_host']['value']
$rabbit_use_ssl = $resource['input']['rabbit_use_ssl']['value']
$kombu_ssl_ca_certs = $resource['input']['kombu_ssl_ca_certs']['value']
$kombu_ssl_certfile = $resource['input']['kombu_ssl_certfile']['value']
$kombu_ssl_keyfile = $resource['input']['kombu_ssl_keyfile']['value']
$kombu_ssl_version = $resource['input']['kombu_ssl_version']['value']
$kombu_reconnect_delay = $resource['input']['kombu_reconnect_delay']['value']
$qpid_hostname = $resource['input']['qpid_hostname']['value']
$qpid_port = $resource['input']['qpid_port']['value']
$qpid_username = $resource['input']['qpid_username']['value']
$qpid_password = $resource['input']['qpid_password']['value']
$qpid_heartbeat = $resource['input']['qpid_heartbeat']['value']
$qpid_protocol = $resource['input']['qpid_protocol']['value']
$qpid_tcp_nodelay = $resource['input']['qpid_tcp_nodelay']['value']
$qpid_reconnect = $resource['input']['qpid_reconnect']['value']
$qpid_reconnect_timeout = $resource['input']['qpid_reconnect_timeout']['value']
$qpid_reconnect_limit = $resource['input']['qpid_reconnect_limit']['value']
$qpid_reconnect_interval_min = $resource['input']['qpid_reconnect_interval_min']['value']
$qpid_reconnect_interval_max = $resource['input']['qpid_reconnect_interval_max']['value']
$qpid_reconnect_interval = $resource['input']['qpid_reconnect_interval']['value']
$use_ssl = $resource['input']['use_ssl']['value']
$cert_file = $resource['input']['cert_file']['value']
$key_file = $resource['input']['key_file']['value']
$ca_file = $resource['input']['ca_file']['value']
$use_syslog = $resource['input']['use_syslog']['value']
$log_facility = $resource['input']['log_facility']['value']
$log_file = $resource['input']['log_file']['value']
$log_dir = $resource['input']['log_dir']['value']
class { 'neutron':
debug => true,
verbose => true,
enabled => true,
package_ensure => 'present',
auth_strategy => 'keystone',
rabbit_user => $rabbitmq_user,
rabbit_password => $rabbitmq_password,
rabbit_host => $rabbitmq_host,
rabbit_port => $rabbitmq_port,
rabbit_virtual_host => $rabbitmq_virtual_host,
service_plugins => ['metering']
enabled => true,
package_ensure => $package_ensure,
verbose => $verbose,
debug => $debug,
bind_host => $bind_host,
bind_port => $bind_port,
core_plugin => $core_plugin,
service_plugins => $service_plugins,
auth_strategy => $auth_strategy,
base_mac => $base_mac,
mac_generation_retries => $mac_generation_retries,
dhcp_lease_duration => $dhcp_lease_duration,
dhcp_agents_per_network => $dhcp_agents_per_network,
network_device_mtu => $network_device_mtu,
dhcp_agent_notification => $dhcp_agent_notification,
allow_bulk => $allow_bulk,
allow_pagination => $allow_pagination,
allow_sorting => $allow_sorting,
allow_overlapping_ips => $allow_overlapping_ips,
api_extensions_path => $api_extensions_path,
root_helper => $root_helper,
report_interval => $report_interval,
control_exchange => $control_exchange,
rpc_backend => $rpc_backend,
rabbit_password => $rabbit_password,
rabbit_host => $rabbit_host,
rabbit_hosts => $rabbit_hosts,
rabbit_port => $rabbit_port,
rabbit_user => $rabbit_user,
rabbit_virtual_host => $rabbit_virtual_host,
rabbit_use_ssl => $rabbit_use_ssl,
kombu_ssl_ca_certs => $kombu_ssl_ca_certs,
kombu_ssl_certfile => $kombu_ssl_certfile,
kombu_ssl_keyfile => $kombu_ssl_keyfile,
kombu_ssl_version => $kombu_ssl_version,
kombu_reconnect_delay => $kombu_reconnect_delay,
qpid_hostname => $qpid_hostname,
qpid_port => $qpid_port,
qpid_username => $qpid_username,
qpid_password => $qpid_password,
qpid_heartbeat => $qpid_heartbeat,
qpid_protocol => $qpid_protocol,
qpid_tcp_nodelay => $qpid_tcp_nodelay,
qpid_reconnect => $qpid_reconnect,
qpid_reconnect_timeout => $qpid_reconnect_timeout,
qpid_reconnect_limit => $qpid_reconnect_limit,
qpid_reconnect_interval_min => $qpid_reconnect_interval_min,
qpid_reconnect_interval_max => $qpid_reconnect_interval_max,
qpid_reconnect_interval => $qpid_reconnect_interval,
use_ssl => $use_ssl,
cert_file => $cert_file,
key_file => $key_file,
ca_file => $ca_file,
use_syslog => $use_syslog,
log_facility => $log_facility,
log_file => $log_file,
log_dir => $log_dir,
}
class { 'neutron::server':
enabled => true,
package_ensure => 'present',
auth_type => 'keystone',
auth_password => $keystone_password,
auth_user => $keystone_user,
auth_tenant => $keystone_tenant
}
class { 'neutron::agents::dhcp': }
#file { '/etc/neutron/neutron-exports':
# owner => 'root',
# group => 'root',
# content => template('neutron/exports.erb')
#}

View File

@ -1,5 +1,5 @@
handler: puppet
id: 'neutron'
id: 'neutron_puppet'
input:
ip:
schema: str!
@ -11,45 +11,179 @@ input:
schema: str!
value:
# TODO: add vhost!
rabbitmq_host:
package_ensure:
schema: str
value: 'present'
verbose:
schema: bool
value: false
debug:
schema: bool
value: false
bind_host:
schema: str
value: '0.0.0.0'
bind_port:
schema: int
value: 9696
core_plugin:
schema: str
value: 'openvswitch'
service_plugins:
schema: str
value:
auth_strategy:
schema: str
value: 'keystone'
base_mac:
schema: str
value: 'fa:16:3e:00:00:00'
mac_generation_retries:
schema: int
value: 16
dhcp_lease_duration:
schema: int
value: 86400
dhcp_agents_per_network:
schema: int
value: 1
network_device_mtu:
schema: str
value:
dhcp_agent_notification:
schema: bool
value: true
allow_bulk:
schema: bool
value: true
allow_pagination:
schema: bool
value: false
allow_sorting:
schema: bool
value: false
allow_overlapping_ips:
schema: bool
value: false
api_extensions_path:
schema: str
value:
root_helper:
schema: str
value: 'sudo neutron-rootwrap /etc/neutron/rootwrap.conf'
report_interval:
schema: int
value: 30
control_exchange:
schema: str
value: 'neutron'
rpc_backend:
schema: str
value: 'neutron.openstack.common.rpc.impl_kombu'
rabbit_password:
schema: str!
value:
rabbitmq_port:
schema: int!
rabbit_host:
schema: str
value: 'localhost'
rabbit_hosts:
schema: bool
value: false
rabbit_port:
schema: int
value: 5672
rabbit_user:
schema: str
value: 'guest'
rabbit_virtual_host:
schema: str
value: '/'
rabbit_use_ssl:
schema: bool
value: false
kombu_ssl_ca_certs:
schema: str
value:
rabbitmq_user:
kombu_ssl_certfile:
schema: str
value:
kombu_ssl_keyfile:
schema: str
value:
kombu_ssl_version:
schema: str
value: 'TLSv1'
kombu_reconnect_delay:
schema: str
value: '1.0'
qpid_hostname:
schema: str
value: 'localhost'
qpid_port:
schema: int
value: 5672
qpid_username:
schema: str
value: 'guest'
qpid_password:
schema: str!
value: 'guest'
qpid_heartbeat:
schema: int
value: 60
qpid_protocol:
schema: str
value: 'tcp'
qpid_tcp_nodelay:
schema: bool
value: true
qpid_reconnect:
schema: bool
value: true
qpid_reconnect_timeout:
schema: int
value: 0
qpid_reconnect_limit:
schema: int
value: 0
qpid_reconnect_interval_min:
schema: int
value: 0
qpid_reconnect_interval_max:
schema: int
value: 0
qpid_reconnect_interval:
schema: int
value: 0
use_ssl:
schema: bool
value: false
cert_file:
schema: str
value:
rabbitmq_password:
schema: str!
key_file:
schema: str
value:
rabbitmq_virtual_host:
schema: str!
ca_file:
schema: str
value:
use_syslog:
schema: bool
value: false
log_facility:
schema: str
value: 'LOG_USER'
log_file:
schema: str
value:
log_dir:
schema: str
value: '/var/log/neutron'
git:
schema: {repository: str!, branch: str!}
value: {repository: 'https://github.com/openstack/puppet-neutron', branch: 'stable/juno'}
value: {repository: 'https://github.com/openstack/puppet-neutron', branch: '5.1.0'}
port:
schema: int!
value: 9696
keystone_host:
schema: str!
keystone_port:
schema: int!
value:
keystone_user:
schema: str!
value:
keystone_password:
schema: str!
value:
keystone_tenant:
schema: str!
value:
puppet_module: 'neutron'
tags: []
tags: [resource/neutron]
version: 1.0.0

View File

@ -0,0 +1,167 @@
# Neutron puppet resource
Setup and configure the neutron API service and endpoint
# Parameters:
source https://github.com/openstack/puppet-neutron/blob/5.1.0/manifests/server.pp
``package_ensure``
(optional) The state of the package
Defaults to present
``log_file``
REMOVED: Use log_file of neutron class instead.
``log_dir``
REMOVED: Use log_dir of neutron class instead.
``auth_password``
(optional) The password to use for authentication (keystone)
Defaults to false. Set a value unless you are using noauth
``auth_type``
(optional) What auth system to use
Defaults to 'keystone'. Can other be 'noauth'
``auth_host``
(optional) The keystone host
Defaults to localhost
``auth_protocol``
(optional) The protocol used to access the auth host
Defaults to http.
``auth_port``
(optional) The keystone auth port
Defaults to 35357
``auth_admin_prefix``
(optional) The admin_prefix used to admin endpoint of the auth host
This allow admin auth URIs like http://auth_host:35357/keystone.
(where '/keystone' is the admin prefix)
Defaults to false for empty. If defined, should be a string with a leading '/' and no trailing '/'.
``auth_tenant``
(optional) The tenant of the auth user
Defaults to services
``auth_user``
(optional) The name of the auth user
Defaults to neutron
``auth_protocol``
(optional) The protocol to connect to keystone
Defaults to http
``auth_uri``
(optional) Complete public Identity API endpoint.
Defaults to: $auth_protocol://$auth_host:5000/
``database_connection``
(optional) Connection url for the neutron database.
(Defaults to 'sqlite:////var/lib/neutron/ovs.sqlite')
Note: for this resource it is decomposed to the
'db_host', 'db_port', 'db_user', 'db_password' inputs
due to implementation limitations
``database_max_retries``
(optional) Maximum database connection retries during startup.
(Defaults to 10)
``sql_max_retries``
DEPRECATED: Use database_max_retries instead.
``max_retries``
DEPRECATED: Use database_max_retries instead.
``database_idle_timeout``
(optional) Timeout before idle database connections are reaped.
Deprecates sql_idle_timeout
(Defaults to 3600)
``sql_idle_timeout``
DEPRECATED: Use database_idle_timeout instead.
``idle_timeout``
DEPRECATED: Use database_idle_timeout instead.
``database_retry_interval``
(optional) Interval between retries of opening a database connection.
(Defaults to 10)
``sql_reconnect_interval``
DEPRECATED: Use database_retry_interval instead.
``retry_interval``
DEPRECATED: Use database_retry_interval instead.
``database_min_pool_size``
(optional) Minimum number of SQL connections to keep open in a pool.
Defaults to: 1
``database_max_pool_size``
(optional) Maximum number of SQL connections to keep open in a pool.
Defaults to: 10
``database_max_overflow``
(optional) If set, use this value for max_overflow with sqlalchemy.
Defaults to: 20
``sync_db``
(optional) Run neutron-db-manage on api nodes after installing the package.
Defaults to false
``api_workers``
(optional) Number of separate worker processes to spawn.
The default, count of machine's processors, runs the worker thread in the
current process.
Greater than 0 launches that number of child processes as workers.
The parent process manages them.
Defaults to: $::processorcount
``rpc_workers``
(optional) Number of separate RPC worker processes to spawn.
The default, count of machine's processors, runs the worker thread in the
current process.
Greater than 0 launches that number of child processes as workers.
The parent process manages them.
Defaults to: $::processorcount
``agent_down_time``
(optional) Seconds to regard the agent as down; should be at least twice
report_interval, to be sure the agent is down for good.
agent_down_time is a config for neutron-server, set by class neutron::server
report_interval is a config for neutron agents, set by class neutron
Defaults to: 75
``router_scheduler_driver``
(optional) Driver to use for scheduling router to a default L3 agent. Could be:
neutron.scheduler.l3_agent_scheduler.ChanceScheduler to schedule a router in a random way
neutron.scheduler.l3_agent_scheduler.LeastRoutersScheduler to allocate on an L3 agent with the least number of routers bound.
Defaults to: neutron.scheduler.l3_agent_scheduler.ChanceScheduler
``mysql_module``
(optional) Deprecated. Does nothing.
``router_distributed``
(optional) Setting the "router_distributed" flag to "True" will default to the creation
of distributed tenant routers.
Also can be the type of the router on the create request (admin-only attribute).
Defaults to false
``l3_ha``
(optional) Enable high availability for virtual routers.
Defaults to false
``max_l3_agents_per_router``
(optional) Maximum number of l3 agents which a HA router will be scheduled on. If set to '0', a router will be scheduled on every agent.
Defaults to '3'
``min_l3_agents_per_router``
(optional) Minimum number of l3 agents which a HA router will be scheduled on.
Defaults to '2'
``l3_ha_net_cidr``
(optional) CIDR of the administrative network if HA mode is enabled.
Defaults to '169.254.192.0/18'

View File

@ -0,0 +1,10 @@
class { 'neutron::server':
enabled => false,
package_ensure => 'absent',
auth_password => 'not important as removed',
}
# Remove external class dependency
Service <| title == 'neutron-server' |> {
require => undef
}

View File

@ -0,0 +1,92 @@
$resource = hiera($::resource_name)
$ip = $resource['input']['ip']['value']
$db_user = $resource['input']['db_user']['value']
$db_host = $resource['input']['db_host']['value']
$db_password = $resource['input']['db_password']['value']
$db_name = $resource['input']['db_name']['value']
$package_ensure = $resource['input']['package_ensure']['value']
$auth_password = $resource['input']['auth_password']['value']
$auth_type = $resource['input']['auth_type']['value']
$auth_host = $resource['input']['auth_host']['value']
$auth_port = $resource['input']['auth_port']['value']
$auth_admin_prefix = $resource['input']['auth_admin_prefix']['value']
$auth_tenant = $resource['input']['auth_tenant']['value']
$auth_user = $resource['input']['auth_user']['value']
$auth_protocol = $resource['input']['auth_protocol']['value']
$auth_uri = $resource['input']['auth_uri']['value']
$database_max_retries = $resource['input']['database_max_retries']['value']
$database_idle_timeout = $resource['input']['database_idle_timeout']['value']
$database_retry_interval = $resource['input']['database_retry_interval']['value']
$database_min_pool_size = $resource['input']['database_min_pool_size']['value']
$database_max_pool_size = $resource['input']['database_max_pool_size']['value']
$database_max_overflow = $resource['input']['database_max_overflow']['value']
$sync_db = $resource['input']['sync_db']['value']
$api_workers = $resource['input']['api_workers']['value']
$rpc_workers = $resource['input']['rpc_workers']['value']
$agent_down_time = $resource['input']['agent_down_time']['value']
$router_scheduler_driver = $resource['input']['router_scheduler_driver']['value']
$router_distributed = $resource['input']['router_distributed']['value']
$l3_ha = $resource['input']['l3_ha']['value']
$max_l3_agents_per_router = $resource['input']['max_l3_agents_per_router']['value']
$min_l3_agents_per_router = $resource['input']['min_l3_agents_per_router']['value']
$l3_ha_net_cidr = $resource['input']['l3_ha_net_cidr']['value']
$mysql_module = $resource['input']['mysql_module']['value']
$sql_max_retries = $resource['input']['sql_max_retries']['value']
$max_retries = $resource['input']['max_retries']['value']
$sql_idle_timeout = $resource['input']['sql_idle_timeout']['value']
$idle_timeout = $resource['input']['idle_timeout']['value']
$sql_reconnect_interval = $resource['input']['sql_reconnect_interval']['value']
$retry_interval = $resource['input']['retry_interval']['value']
$log_dir = $resource['input']['log_dir']['value']
$log_file = $resource['input']['log_file']['value']
$report_interval = $resource['input']['report_interval']['value']
class { 'neutron::server':
enabled => true,
manage_service => true,
database_connection => "mysql://${db_user}:${db_password}@${db_host}/${db_name}",
package_ensure => $package_ensure,
auth_password => $auth_password,
auth_type => $auth_type,
auth_host => $auth_host,
auth_port => $auth_port,
auth_admin_prefix => $auth_admin_prefix,
auth_tenant => $auth_tenant,
auth_user => $auth_user,
auth_protocol => $auth_protocol,
auth_uri => $auth_uri,
database_max_retries => $database_max_retries,
database_idle_timeout => $database_idle_timeout,
database_retry_interval => $database_retry_interval,
database_min_pool_size => $database_min_pool_size,
database_max_pool_size => $database_max_pool_size,
database_max_overflow => $database_max_overflow,
sync_db => $sync_db,
api_workers => $api_workers,
rpc_workers => $rpc_workers,
agent_down_time => $agent_down_time,
router_scheduler_driver => $router_scheduler_driver,
router_distributed => $router_distributed,
l3_ha => $l3_ha,
max_l3_agents_per_router => $max_l3_agents_per_router,
min_l3_agents_per_router => $min_l3_agents_per_router,
l3_ha_net_cidr => $l3_ha_net_cidr,
mysql_module => $mysql_module,
sql_max_retries => $sql_max_retries,
max_retries => $max_retries,
sql_idle_timeout => $sql_idle_timeout,
idle_timeout => $idle_timeout,
sql_reconnect_interval => $sql_reconnect_interval,
retry_interval => $retry_interval,
log_dir => $log_dir,
log_file => $log_file,
report_interval => $report_interval,
}
# Remove external class dependency
Service <| title == 'neutron-server' |> {
require => undef
}

View File

@ -0,0 +1,142 @@
handler: puppet
id: 'neutron_server_puppet'
input:
ip:
schema: str!
value:
ssh_key:
schema: str!
value:
ssh_user:
schema: str!
value:
db_user:
schema: str!
value:
db_password:
schema: str!
value:
db_name:
schema: str!
value:
db_host:
schema: str!
value:
package_ensure:
schema: str
value: 'present'
auth_password:
schema: str!
value:
auth_type:
schema: str
value: 'keystone'
auth_host:
schema: str
value: 'localhost'
auth_port:
schema: int
value: 35357
auth_admin_prefix:
schema: str
value:
auth_tenant:
schema: str
value: 'services'
auth_user:
schema: str
value: 'neutron'
auth_protocol:
schema: str
value: 'http'
auth_uri:
schema: str
value:
database_max_retries:
schema: int
value: 10
database_idle_timeout:
schema: int
value: 3600
database_retry_interval:
schema: int
value: 10
database_min_pool_size:
schema: int
value: 1
database_max_pool_size:
schema: int
value: 10
database_max_overflow:
schema: int
value: 20
sync_db:
schema: bool
value: false
api_workers:
schema: int
value: 1
rpc_workers:
schema: int
value: 1
agent_down_time:
schema: int
value: 75
router_scheduler_driver:
schema: str
value: 'neutron.scheduler.l3_agent_scheduler.ChanceScheduler'
router_distributed:
schema: bool
value: false
l3_ha:
schema: bool
value: false
max_l3_agents_per_router:
schema: int
value: 3
min_l3_agents_per_router:
schema: int
value: 2
l3_ha_net_cidr:
schema: str
value: '169.254.192.0/18'
mysql_module:
schema: str
value:
sql_max_retries:
schema: str
value:
max_retries:
schema: str
value:
sql_idle_timeout:
schema: str
value:
idle_timeout:
schema: str
value:
sql_reconnect_interval:
schema: str
value:
retry_interval:
schema: str
value:
log_dir:
schema: str
value:
log_file:
schema: str
value:
report_interval:
schema: str
value:
git:
schema: {repository: str!, branch: str!}
value: {repository: 'https://github.com/openstack/puppet-neutron', branch: '5.1.0'}
puppet_module: 'neutron'
tags: [resource/neutron, resource/neutron_service, resource/neutron_server, resource/neutron_api]
version: 1.0.0

View File

@ -32,6 +32,9 @@ source https://github.com/openstack/puppet-nova/blob/5.1.0/manifests/network/neu
``neutron_url``
(optional) URL for connecting to the Neutron networking service.
Defaults to 'http://127.0.0.1:9696'
Note: for this resource it is decomposed to the
'neutron_endpoint_host', 'neutron_endpoint_port', 'neutron_endpoint_protocol' inputs
due to implementation limitations
``neutron_url_timeout``
(optional) Timeout value for connecting to neutron in seconds.
@ -73,6 +76,9 @@ source https://github.com/openstack/puppet-nova/blob/5.1.0/manifests/network/neu
This is the Identity (keystone) admin API server IP and port value,
and not the Identity service API IP and port.
Defaults to 'http://127.0.0.1:35357/v2.0'
Note: for this resource it is decomposed to the
'auth_host', 'auth_port', 'auth_protocol' inputs
due to implementation limitations
``network_api_class``
(optional) The full class name of the network API class.

View File

@ -1,16 +1,21 @@
$resource = hiera($::resource_name)
$auth_host = $resource['input']['auth_host']['value']
$auth_port = $resource['input']['auth_port']['value']
$auth_protocol = $resource['input']['auth_protocol']['value']
$neutron_endpoint_host = $resource['input']['neutron_endpoint_host']['value']
$neutron_endpoint_port = $resource['input']['neutron_endpoint_port']['value']
$neutron_endpoint_protocol = $resource['input']['neutron_endpoint_protocol']['value']
$libvirt_vif_driver = $resource['input']['libvirt_vif_driver']['value']
$force_snat_range = $resource['input']['force_snat_range']['value']
$neutron_admin_password = $resource['input']['neutron_admin_password']['value']
$neutron_auth_strategy = $resource['input']['neutron_auth_strategy']['value']
$neutron_url = $resource['input']['neutron_url']['value']
$neutron_url_timeout = $resource['input']['neutron_url_timeout']['value']
$neutron_admin_tenant_name = $resource['input']['neutron_admin_tenant_name']['value']
$neutron_default_tenant_id = $resource['input']['neutron_default_tenant_id']['value']
$neutron_region_name = $resource['input']['neutron_region_name']['value']
$neutron_admin_username = $resource['input']['neutron_admin_username']['value']
$neutron_admin_auth_url = $resource['input']['neutron_admin_auth_url']['value']
$neutron_ovs_bridge = $resource['input']['neutron_ovs_bridge']['value']
$neutron_extension_sync_interval = $resource['input']['neutron_extension_sync_interval']['value']
$neutron_ca_certificates_file = $resource['input']['neutron_ca_certificates_file']['value']
@ -30,13 +35,13 @@ class { 'nova::compute::neutron':
class { 'nova::network::neutron':
neutron_admin_password => $neutron_admin_password,
neutron_auth_strategy => $neutron_auth_strategy,
neutron_url => $neutron_url,
neutron_url => "${neutron_endpoint_protocol}://${neutron_endpoint_host}:${neutron_endpoint_port}",
neutron_url_timeout => $neutron_url_timeout,
neutron_admin_tenant_name => $neutron_admin_tenant_name,
neutron_default_tenant_id => $neutron_default_tenant_id,
neutron_region_name => $neutron_region_name,
neutron_admin_username => $neutron_admin_username,
neutron_admin_auth_url => $neutron_admin_auth_url,
neutron_admin_auth_url => "${auth_protocol}://${auth_host}:${auth_port}/v2.0",
neutron_ovs_bridge => $neutron_ovs_bridge,
neutron_extension_sync_interval => $neutron_extension_sync_interval,
neutron_ca_certificates_file => $neutron_ca_certificates_file,

View File

@ -3,6 +3,25 @@ handler: puppet
puppet_module: nova
version: 1.0.0
input:
auth_host:
schema: str
value: 'localhost'
auth_port:
schema: int
value: 35357
auth_protocol:
schema: str
value: 'http'
neutron_endpoint_host:
schema: str
value: 'localhost'
neutron_endpoint_port:
schema: int
value: 9696
neutron_endpoint_protocol:
schema: str
value: 'http'
libvirt_vif_driver:
schema: str
value: 'nova.virt.libvirt.vif.LibvirtGenericVIFDriver'
@ -15,9 +34,6 @@ input:
neutron_auth_strategy:
schema: str
value: 'keystone'
neutron_url:
schema: str
value: 'http://127.0.0.1:9696'
neutron_url_timeout:
schema: int
value: 30
@ -33,9 +49,6 @@ input:
neutron_admin_username:
schema: str
value: 'neutron'
neutron_admin_auth_url:
schema: str
value: 'http://127.0.0.1:35357/v2.0'
neutron_ovs_bridge:
schema: str
value: 'br-int'

View File

@ -5,7 +5,11 @@
- shell: killall -u riak
ignore_errors: yes
# remove above when non tests
# we install ubuntu repo there,
# NOT recommended on production
- shell: curl -s https://packagecloud.io/install/repositories/basho/riak/script.deb.sh | sudo bash
- apt:
name: riak
state: present

View File

@ -0,0 +1,12 @@
- hosts: [{{host}}]
sudo: yes
tasks:
- service:
name: riak
state: stopped
- template:
src: {{ resource_dir }}/templates/riak.conf
dest: /etc/riak/riak.conf
- service:
name: riak
state: reloaded

View File

@ -0,0 +1,12 @@
---
# TODO: this shouldn't be outside of the resource directory
- hosts: all
sudo: yes
tasks:
- script: /vagrant/bootstrap/playbooks/files/ubuntu-ansible.sh
- include: /vagrant/bootstrap/playbooks/tasks/cinder.yml
#- include: celery.yml tags=['master'] celery_dir=/var/run/celery
- include: /vagrant/bootstrap/playbooks/build-main.yml
- include: /vagrant/bootstrap/playbooks/custom-configs.yml master_ip={{ master_ip }}
- include: /vagrant/bootstrap/playbooks/celery.yml tags=slave

View File

@ -0,0 +1,18 @@
handler: ansible
id: 'solar_bootstrap'
input:
ip:
schema: str!
value:
ssh_key:
schema: str!
value:
ssh_user:
schema: str!
value:
master_ip:
schema: str!
value:
tags: []
version: 1.0.0

77
snapshotter.py Normal file
View File

@ -0,0 +1,77 @@
import click
import time
from itertools import takewhile
from subprocess import check_output
def get_vagrant_vms():
status = vagrant('status')
lines = status.splitlines()[2:]
vms = takewhile(lambda x: x.split(), lines)
vms = map(lambda x: x.split()[0], vms)
return vms
def vboxmanage(*args):
args = ('VBoxManage', ) + args
p = check_output(args, shell=False)
return p
def vagrant(*args):
args = ('vagrant', ) + args
p = check_output(args, shell=False)
return p
@click.group()
def cli():
pass
@cli.command()
@click.option('-n', default=None)
def take(n):
now = time.time()
if n is None:
n = 'solar-%d' % now
vms = get_vagrant_vms()
for vm in vms:
click.echo("Taking %s" % vm)
snap = vboxmanage('snapshot', vm, 'take', n, '--live', '--description', 'solar: %d' % now)
click.echo(snap)
@click.option('-n')
@cli.command()
def restore(n):
vms = get_vagrant_vms()
for vm in vms:
click.echo("Restoring %s" % vm)
snap = vboxmanage('snapshot', vm, 'restore', n)
click.echo(snap)
# wanted to use list but it would
@cli.command()
def show():
vms = get_vagrant_vms()
for vm in vms:
click.echo("VM: %s" % vm)
snap = vboxmanage('snapshot', vm, 'list')
click.echo(snap)
click.echo('-' * 10)
@click.option('-n')
@cli.command()
def delete(n):
vms = get_vagrant_vms()
for vm in vms:
click.echo('Removing %s from %s' % (n, vm))
snap = vboxmanage('snapshot', vm, 'delete', n)
click.echo(snap)
if __name__ == '__main__':
cli()

View File

@ -26,13 +26,28 @@ def validate():
@changes.command()
def stage():
@click.option('-d', default=False, is_flag=True)
def stage(d):
log = list(change.stage_changes().reverse())
for item in log:
click.echo(item)
if d:
for line in item.details:
click.echo(' '*4+line)
if not log:
click.echo('No changes')
@changes.command(name='staged-item')
@click.argument('log_action')
@click.option('-d', default=True, is_flag=True)
def staged_item(log_action, d):
item = data.SL().get(log_action)
if not item:
click.echo('No staged changes for {}'.format(log_action))
else:
click.echo(item)
for line in item.details:
click.echo(' '*4+line)
@changes.command()
def process():

View File

@ -2,7 +2,7 @@ import click
import os
import re
uids_history = os.path.join(os.getcwd(), '.solar_cli_uids')
UIDS_HISTORY = os.path.join(os.getcwd(), '.solar_cli_uids')
def remember_uid(uid):
@ -11,14 +11,14 @@ def remember_uid(uid):
Can be used then as `last`, `last1`, `last2` anywhere
"""
try:
with open(uids_history, 'rb') as f:
with open(UIDS_HISTORY, 'rb') as f:
hist = [x.strip() for x in f.readlines()]
except IOError:
hist = []
hist.insert(0, uid)
if len(hist) > 3:
hist = hist[:3]
with open(uids_history, 'wb') as f:
with open(UIDS_HISTORY, 'wb') as f:
f.write('\n'.join(hist))
@ -32,7 +32,7 @@ def get_uid(given_uid):
position = int(matched.group(1))
except ValueError:
position = 0
with open(uids_history, 'rb') as f:
with open(UIDS_HISTORY, 'rb') as f:
uids = [x.strip() for x in f.readlines()]
try:
return uids[position]

View File

@ -27,10 +27,6 @@ def create_resource(name, base_path, args, virtual_resource=None):
prepare_meta(metadata)
if os.path.exists(actions_path):
for f in os.listdir(actions_path):
metadata['actions'][os.path.splitext(f)[0]] = f
tags = metadata.get('tags', [])
resource = Resource(name, metadata, args, tags, virtual_resource)

View File

@ -8,7 +8,6 @@ from solar.interfaces.db import get_db
from solar.events.api import add_events
from solar.events.controls import Dependency
db = get_db()
@ -176,7 +175,6 @@ def connect(emitter, receiver, mapping=None, events=None):
# receiver.save()
def disconnect(emitter, receiver):
# convert if needed
# TODO: handle invalid resource

View File

@ -63,6 +63,20 @@ class LogItem(object):
def compact(self):
return 'log task={} uid={}'.format(self.log_action, self.uid)
@property
def details(self):
rst = []
for type_, val, change in self.diff:
if type_ == 'add':
for it in change:
if isinstance(it, dict):
rst.append('++ {}: {}'.format(it[0], it[1]['value']))
else:
rst.append('++ {}: {}'.format(it[0], str(it[1])))
elif type_ == 'change':
rst.append('-+ {}: {} >> {}'.format(val, change[0], change[1]))
return rst
class Log(object):

View File

@ -83,3 +83,4 @@ def test_riak():
evapi.build_edges(changed, changes_graph, events)
assert nx.topological_sort(changes_graph) == [
'riak_service1.run', 'riak_service2.join', 'riak_service3.join', 'riak_service1.commit']

View File

@ -0,0 +1,6 @@
# rename it to vagrant-settings.yml then Vagrantfile
# will use values from this file
slaves_count: 2
slaves_ram: 1024
master_ram: 1024