openstack-ansible-os_octavia/defaults/main.yml
Andrew Smith 1ee708ffb6 Update to use oslo.messaging services for RPC and Notify
This introduces oslo.messaging variables that define the RPC and
Notify transports for the OpenStack services. These parameters replace
the rabbitmq values and are used to generate the messaging
transport_url for the service. The association of the messaging
backend server to the oslo.messaging services will then be transparent
to the octavia service.

This patch:
* Add oslo.messaging variable for RPC and Notify to defaults
* Update transport_url generation
* Add oslo.messaging to tests inventory
* Update tests
* Add release note

Change-Id: Ibfd9b5325bf89414439a1a516d1bbde0896904b5
2018-06-12 13:21:33 -04:00

421 lines
15 KiB
YAML

---
# Copyright 2017, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## Verbosity Options
debug: False
## Octavia standalone (v2) experimental
octavia_v2: True
## Activate Octavia V1 API
octavia_v1: False
## Allow TLS listener
octavia_tls_listener_enabled: True
# Legacy policy disables the requirement for load-balancer service users to
# have one of the load-balancer:* roles. It provides a similar policy to
# legacy OpenStack policies where any user or admin has access to load-balancer
# resources that they own. Users with the admin role has access to all
# load-balancer resources, whether they own them or not.
octavia_legacy_policy: False
# Set the package install state for distribution and pip packages
# Options are 'present' and 'latest'
octavia_package_state: "latest"
octavia_pip_package_state: "latest"
octavia_git_repo: https://git.openstack.org/openstack/octavia
octavia_git_install_branch: master
octavia_developer_mode: false
octavia_developer_constraints:
- "git+{{ octavia_git_repo }}@{{ octavia_git_install_branch }}#egg=octavia"
# Name of the virtual env to deploy into
octavia_venv_tag: untagged
octavia_bin: "/openstack/venvs/octavia-{{ octavia_venv_tag }}/bin"
octavia_venv_download_url: http://127.0.0.1/venvs/untagged/ubuntu/octavia.tgz
octavia_clients_endpoint: internalURL
octavia_auth_strategy: keystone
## Database info
octavia_galera_user: octavia
octavia_galera_database: octavia
octavia_galera_use_ssl: "{{ galera_use_ssl | default(False) }}"
octavia_galera_ssl_ca_cert: "{{ galera_ssl_ca_cert | default('/etc/ssl/certs/galera-ca.pem') }}"
octavia_db_max_overflow: 20
octavia_db_pool_size: 120
octavia_db_pool_timeout: 30
## Oslo Messaging
# RPC
octavia_oslomsg_rpc_transport: rabbit
octavia_oslomsg_rpc_servers: 127.0.0.1
octavia_oslomsg_rpc_port: 5672
octavia_oslomsg_rpc_use_ssl: False
octavia_oslomsg_rpc_userid: octavia
octavia_oslomsg_rpc_vhost: /octavia
# Notify
octavia_oslomsg_notify_transport: rabbit
octavia_oslomsg_notify_servers: 127.0.0.1
octavia_oslomsg_notify_port: 5672
octavia_oslomsg_notify_use_ssl: False
octavia_oslomsg_notify_userid: octavia
octavia_oslomsg_notify_vhost: /octavia
## octavia User / Group
octavia_system_user_name: octavia
octavia_system_group_name: octavia
octavia_system_shell: /bin/false
octavia_system_comment: octavia system user
octavia_system_home_folder: "/var/lib/{{ octavia_system_user_name }}"
## Auth
octavia_service_region: RegionOne
octavia_service_project_name: "service"
octavia_service_user_name: "octavia"
octavia_service_role_name: admin
octavia_service_project_domain_id: default
octavia_service_user_domain_id: default
octavia_keystone_auth_plugin: "{{ octavia_keystone_auth_type }}"
octavia_keystone_auth_type: password
octavia_ansible_endpoint_type: "internal" # endpoint for ansible
## octavia api service type and data
octavia_service_name: octavia
octavia_service_description: "Octavia Load Balancing Service"
octavia_service_port: 9876
octavia_service_proto: http
octavia_service_publicuri_proto: "{{ openstack_service_publicuri_proto | default(octavia_service_proto) }}"
octavia_service_adminuri_proto: "{{ openstack_service_adminuri_proto | default(octavia_service_proto) }}"
octavia_service_internaluri_proto: "{{ openstack_service_internaluri_proto | default(octavia_service_proto) }}"
octavia_service_type: load-balancer
octavia_service_publicuri: "{{ octavia_service_publicuri_proto }}://{{ external_lb_vip_address }}:{{ octavia_service_port }}"
octavia_service_adminuri: "{{ octavia_service_adminuri_proto }}://{{ internal_lb_vip_address }}:{{ octavia_service_port }}"
octavia_service_internaluri: "{{ octavia_service_internaluri_proto }}://{{ internal_lb_vip_address }}:{{ octavia_service_port }}"
octavia_service_in_ldap: false
## RPC
octavia_rpc_thread_pool_size: 64
octavia_rpc_conn_pool_size: 30
## Plugin dirs
octavia_plugin_dirs:
- /usr/lib/octavia
- /usr/local/lib/octavia
# octavia packages that must be installed before anything else
octavia_requires_pip_packages:
- httplib2
- python-keystoneclient # Keystoneclient needed to OSA keystone lib
- shade
- virtualenv
- python-openstackclient
# Common pip packages
octavia_pip_packages:
- cryptography
- keystonemiddleware
- PyMySQL
- python-glanceclient
- python-keystoneclient
- python-memcached
- python-neutronclient
- python-novaclient
- python-openstackclient
- python-octaviaclient
- octavia
- uwsgi
octavia_api_init_overrides: {}
octavia_worker_init_overrides: {}
octavia_housekeeping_init_overrides: {}
octavia_health_manager_init_overrides: {}
## Service Name-Group Mapping
octavia_services:
octavia-api:
group: octavia_api
service_name: octavia-api
init_config_overrides: "{{ octavia_api_init_overrides }}"
wsgi_overrides: "{{ octavia_api_uwsgi_ini_overrides }}"
wsgi_app: True
log_string: "--logto "
wsgi_name: octavia-wsgi
uwsgi_port: "{{ octavia_service_port }}"
uwsgi_bind_address: "{{ octavia_uwsgi_bind_address }}"
program_override: "{{ octavia_bin }}/uwsgi --ini /etc/uwsgi/octavia-api.ini"
octavia-worker:
group: octavia_worker
service_name: octavia-worker
init_config_overrides: "{{ octavia_worker_init_overrides }}"
octavia-housekeeping:
group: octavia_housekeeping
service_name: octavia-housekeeping
init_config_overrides: "{{ octavia_housekeeping_init_overrides }}"
octavia-health-manager:
group: octavia_health_manager
service_name: octavia-health-manager
init_config_overrides: "{{ octavia_health_manager_init_overrides }}"
# Required secrets for the role
octavia_required_secrets:
- keystone_auth_admin_password
- octavia_container_mysql_password
- octavia_oslomsg_rpc_password
- octavia_oslomsg_notify_password
- octavia_service_password
- memcached_encryption_key
# This variable is used by the repo_build process to determine
# which host group to check for members of before building the
# pip packages required by this role. The value is picked up
# by the py_pkgs lookup.
octavia_role_project_group: octavia_all
## Octavia configs
# Load balancer topology options are SINGLE, ACTIVE_STANDBY
# ACTIVE_STANDBY is recommended for production settings
octavia_loadbalancer_topology: SINGLE
# Image tag for the amphora image in glance
octavia_glance_image_tag: octavia-amphora-image
# add here the id of the image owner to avoid faked images being used
octavia_amp_image_owner_id:
# add here the glance image id if tagging is not used (not recommended for prod)
octavia_amp_image_id:
# download the image from an artefact server
# Note: The default is the Octavia test image so don't use that in prod
octavia_download_artefact: True
# The host to download images to if enabled
# Options are ['deployment-host', 'target-host']
octavia_image_downloader: "deployment-host"
# The URL to downlaod from
octavia_artefact_url: http://tarballs.openstack.org/octavia/test-images/test-only-amphora-x64-haproxy-ubuntu-xenial.qcow2
# the directory to store the downloaded file to
octavia_amp_image_path: "~/"
# add here the file name of the image if it should be uploaded automatically
octavia_amp_image_file_name:
# enable uploading image to glance automatically
octavia_amp_image_upload_enabled: "{{ octavia_download_artefact }}"
# Name of the Octavia security group
octavia_security_group_name: octavia_sec_grp
# Restrict access to only authorized hosts
octavia_security_group_rule_cidr:
# ssh enabled - switch to True if you need ssh access to the amphora
# and make sure to uplaod a key with the name below
octavia_ssh_enabled: False
octavia_ssh_key_name: octavia_key
# port the agent listens on
octavia_agent_port: "9443"
octavia_health_manager_port: 5555
#Octavia Nova flavor
octavia_amp_flavor_name: "m1.amphora"
octavia_amp_ram: 1024
octavia_amp_vcpu: 1
octavia_amp_disk: 2
# spare pool - increase to speed up load balancer creation and fail over
octavia_spare_amphora_pool_size: 1
# only increase when it's a really busy system since this is by deployed host,
# e.g. 3 hosts, 5 workers (this param) per host, results in 15 worker total
octavia_task_flow_max_workers: 5
# event_streamer - set to True if you are using neutron lbaas with Octavia
# (Octavia will stream events to the neutron DB)
octavia_event_streamer: False
# Enable provisioning status sync with neutron db
octavia_sync_provisioning_status: False
# OSA is architected to use vHosts for queues so we need to post events
# into the Neutron queue for them to be picked up
neutron_oslomsg_rpc_userid: neutron
neutron_oslomsg_rpc_vhost: /neutron
neutron_oslomsg_rpc_transport: rabbit
neutron_oslomsg_rpc_port: 5672
neutron_oslomsg_rpc_servers: 127.0.0.1
neutron_oslomsg_rpc_use_ssl: False
neutron_oslomsg_rpc_password: changeme
# For additional security use a different user on the Neutron queue
# for Octavia with restricted access to only the event streamer
# queues
octavia_neutron_oslomsg_rpc_userid: "{{ neutron_oslomsg_rpc_userid }}"
octavia_neutron_oslomsg_rpc_password: "{{ neutron_oslomsg_rpc_password }}"
# this controls if Octavia should add an anti-affinity hint to make sure
# two amphora are not placed pn the same host (the most common setup of
# ant affinity features in Nova).
# Since AIO only has one compute host this is set to false
octavia_enable_anti_affinity: False
# Some installations put hardware more suited for load balancing in special
# availability zones. This allows to target a specific availability zone
# for amphora creation
#octavia_amp_availability_zone:
# List of haproxy template files to copy from deployment host to octavia hosts
# octavia_user_haproxy_templates:
# - src: "/etc/openstack_deploy/octavia/haproxy_templates/base.cfg.j2"
# dest: "/etc/octavia/templates/base.cfg.j2"
# - src: "/etc/openstack_deploy/octavia/haproxy_templates/haproxy.cfg.j2"
# dest: "/etc/octavia/templates/haproxy.cfg.j2"
# - src: "/etc/openstack_deploy/octavia/haproxy_templates/macros.cfg.j2"
# dest: "/etc/octavia/templates/macros.cfg.j2"
octavia_user_haproxy_templates: {}
# Path of custom haproxy template file
#octavia_haproxy_amphora_template: /etc/octavia/templates/haproxy.cfg.j2
# Name of the Octavia management network in Neutron
octavia_neutron_management_network_name: lbaas-mgmt
# Name of the provider net in the system
octavia_provider_network_name: lbaas
# Network type
octavia_provider_network_type: flat
# Network segmentation ID if vlan, gre...
#octavia_provider_segmentation_id:
# Network CIDR
octavia_management_net_subnet_cidr: 172.29.232.0/22
# Example allocation range:
# octavia_management_net_subnet_allocation_pools: "172.29.232.10-172.29.235.200"
octavia_management_net_subnet_allocation_pools: ""
# Do we require the Neutron DHCP server
octavia_management_net_dhcp: "True"
# Should Octavia set up the network and subnet?
octavia_service_net_setup: True
# This sets it to the container managment network based on how you setup
# the provider net
octavia_container_network_name: "{{ octavia_provider_network_name }}_address"
octavia_provider_network: "{{ provider_networks|map(attribute='network')|selectattr('net_name','defined')|selectattr('net_name', 'equalto', octavia_provider_network_name)|list|first }}"
octavia_hm_group: "octavia-health-manager"
# Note: We use some heuritsics here but if you do anyhting special make sure to use the
# ip addresses on the right network. This will use the container newtorking to figure out the ip
octavia_hm_hosts: "{% for host in groups[octavia_hm_group] %}{{ hostvars[host]['container_networks'][octavia_container_network_name]['address'] }}{% if not loop.last %},{% endif %}{% endfor %}"
# Set this to the right container port aka the eth you connect to the octavia
# management network
octavia_container_interface: "{{ octavia_provider_network.container_interface }}"
# Set this to true to drop the iptables rules
octavia_ip_tables_fw: True
# The iptable rules
octavia_iptables_rules:
- # Allow icmp
chain: INPUT
protocol: icmp
ctstate: NEW
icmp_type: 8
jump: ACCEPT
- # Allow existing connections:
chain: INPUT
in_interface: "{{ octavia_container_interface }}"
ctstate: RELATED,ESTABLISHED
jump: ACCEPT
- # Allow heartbeat:
chain: INPUT
in_interface: "{{ octavia_container_interface }}"
protocol: udp
destination_port: "{{ octavia_health_manager_port }}"
jump: ACCEPT
- # Reject INPUT:
chain: INPUT
in_interface: "{{ octavia_container_interface }}"
reject_with: icmp-port-unreachable
- # Reject FORWARD:
chain: FORWARD
in_interface: "{{ octavia_container_interface }}"
reject_with: icmp-port-unreachable
- # Allow icmp6
chain: INPUT
protocol: icmpv6
jump: ACCEPT
ip_version: ipv6
- # Allow existing connections
chain: INPUT
in_interface: "{{ octavia_container_interface }}"
ctstate: RELATED,ESTABLISHED
jump: ACCEPT
ip_version: ipv6
- # Allow heartbeat
chain: INPUT
in_interface: "{{ octavia_container_interface }}"
protocol: udp
destination_port: "{{ octavia_health_manager_port }}"
jump: ACCEPT
ip_version: ipv6
- # Reject INPUT
chain: INPUT
in_interface: "{{ octavia_container_interface }}"
reject_with: icmp6-port-unreachable
ip_version: ipv6
- # Reject FORWARD
chain: FORWARD
in_interface: "{{ octavia_container_interface }}"
reject_with: icmp6-port-unreachable
ip_version: ipv6
# uWSGI Settings
octavia_wsgi_processes_max: 16
octavia_wsgi_processes: "{{ [[ansible_processor_vcpus|default(1), 1] | max * 2, octavia_wsgi_processes_max] | min }}"
octavia_wsgi_threads: 1
octavia_wsgi_buffer_size: 65535
octavia_uwsgi_bind_address: "0.0.0.0"
octavia_api_uwsgi_ini_overrides: {}
# Set up the drivers
octavia_amphora_driver: amphora_haproxy_rest_driver
octavia_compute_driver: compute_nova_driver
octavia_network_driver: allowed_address_pairs_driver
# Certificate generation
# this directory needs to be accessible
octavia_cert_dir: "{{ lookup('env', 'HOME') }}/openstack-ansible/octavia"
octavia_cert_key_length_server: '4096' # key length
octavia_cert_cipher_server: 'aes256'
octavia_cert_cipher_client: 'aes256'
octavia_cert_key_length_client: '4096' # key length
octavia_cert_server_ca_common_name: 'www.example.com' # change this to something more real
octavia_cert_client_ca_common_name: 'www.example.com' # change this to something more real
octavia_cert_client_req_common_name: 'www.example.com' # change this to something more real
octavia_generate_client_cert: True # generate self signed client certs
octavia_generate_certs: True
# client certs
octavia_client_ca_key: "{{ octavia_cert_dir }}/ca_01.key"
octavia_client_ca: "{{ octavia_cert_dir }}/ca_01.pem"
octavia_client_cert: "{{ octavia_cert_dir }}/client.pem"
# server
octavia_server_ca: "{{ octavia_ca_certificate }}"
# ca certs
octavia_ca_private_key: "{{ octavia_cert_dir }}/private/cakey.pem"
octavia_ca_private_key_passphrase: "{{ octavia_cert_client_password }}"
octavia_ca_certificate: "{{ octavia_cert_dir }}/ca_server_01.pem"
octavia_signing_digest: sha256
## Tunable overrides
octavia_octavia_conf_overrides: {}
octavia_api_paste_ini_overrides: {}
octavia_policy_overrides: {}