diff --git a/ansible/ansible.cfg b/ansible/ansible.cfg
index 2b8c14c70..62983c3ad 100644
--- a/ansible/ansible.cfg
+++ b/ansible/ansible.cfg
@@ -1,10 +1,11 @@
[defaults]
-gathering = smart
+callback_whitelist = profile_tasks
fact_caching_timeout = 86400
fact_caching = jsonfile
fact_caching_connection = /tmp/browbeat_fact_cache
+gathering = smart
+roles_path = ./browbeat/roles:./install/roles:
timeout = 30
-callback_whitelist = profile_tasks
[ssh_connection]
# Load the specific ssh config file in this directory
ssh_args = -F ssh-config
diff --git a/ansible/browbeat/adjustment-apache.yml b/ansible/browbeat/adjustment-apache.yml
new file mode 100644
index 000000000..9c854bf68
--- /dev/null
+++ b/ansible/browbeat/adjustment-apache.yml
@@ -0,0 +1,24 @@
+---
+#
+# Playbook to adjust Apache prefork settings
+#
+# Example:
+#
+# ansible-playbook -i hosts browbeat/adjustment-httpd.yml -e 'httpd_startservers=8 httpd_minspareservers=5 httpd_maxspareservers=20 httpd_serverlimit=256 httpd_maxclients=256 httpd_maxrequestsperchild=4000'
+#
+
+- hosts: controller
+ remote_user: "{{ host_remote_user }}"
+ gather_facts: false
+ vars_files:
+ - ../install/group_vars/all.yml
+ vars:
+ # Defaults per Pike (OSP11)
+ httpd_startservers: 8
+ httpd_minspareservers: 5
+ httpd_maxspareservers: 20
+ httpd_serverlimit: 256
+ httpd_maxclients: 256
+ httpd_maxrequestsperchild: 4000
+ roles:
+ - apache-config
diff --git a/ansible/browbeat/adjustment-ceilometer.yml b/ansible/browbeat/adjustment-ceilometer.yml
index d4406457e..6ee6b8345 100644
--- a/ansible/browbeat/adjustment-ceilometer.yml
+++ b/ansible/browbeat/adjustment-ceilometer.yml
@@ -1,22 +1,67 @@
---
#
-# Playbook to apply changes to ceilometer.
+# Playbook to adjust Ceilometer configuration items and restart either the
+# notification agent or the collector.
#
-# Examples (backend):
-# ansible-playbook -i hosts browbeat/adjustment-ceilometer.yml -e "ceilometer_backend=database"
-# ansible-playbook -i hosts browbeat/adjustment-ceilometer.yml -e "ceilometer_backend=gnocchi"
+# Examples:
+# ansible-playbook -i hosts browbeat/adjustment-ceilometer.yml -e "rabbit_qos_prefetch_count=64"
+# ansible-playbook -i hosts browbeat/adjustment-ceilometer.yml -e "rabbit_qos_prefetch_count=64 executor_thread_pool_size=64"
+# ansible-playbook -i hosts browbeat/adjustment-ceilometer.yml -e "rabbit_qos_prefetch_count=64 restart_notification=true"
+# ansible-playbook -i hosts browbeat/adjustment-ceilometer.yml -e "rabbit_qos_prefetch_count=64 restart_collector=true"
#
-# Examples (interval):
-# ansible-playbook -i hosts browbeat/adjustment-ceilometer.yml -e "ceilometer_interval=60"
-# ansible-playbook -i hosts browbeat/adjustment-ceilometer.yml -e "ceilometer_interval=600"
+# * Note not setting a variable does not change that configuration item then. Setting no variables
+# and running the playbook sets all configuration items to defaults (Ocata)
+#
+# ansible-playbook -i hosts browbeat/adjustment-ceilometer.yml
#
- hosts: controller
- remote_user: heat-admin
- roles:
- - { role: ceilometer-backend, when: ceilometer_backend is defined }
+ remote_user: "{{ host_remote_user }}"
+ gather_facts: false
+ vars_files:
+ - ../install/group_vars/all.yml
+ vars:
+ restart_notification: false
+ restart_collector: false
+ # Create initial blank configuration list
+ ceilometer_configuration: []
+ # Defaults
+ default_rabbit_qos_prefetch_count: 0
+ default_executor_thread_pool_size: 64
+ # Each configuration item needs to be a list so it can be merged
+ rabbit_qos_prefetch_count_item: []
+ executor_thread_pool_size_item: []
+ pre_tasks:
+ - name: Set default rabbit_qos_prefetch_count and executor_thread_pool_size
+ set_fact:
+ ceilometer_configuration:
+ - section: oslo_messaging_rabbit
+ option: rabbit_qos_prefetch_count
+ value: "{{default_rabbit_qos_prefetch_count}}"
+ - section: DEFAULT
+ option: executor_thread_pool_size
+ value: "{{default_executor_thread_pool_size}}"
+ when: rabbit_qos_prefetch_count is undefined and executor_thread_pool_size is undefined
+
+ - name: Set rabbit_qos_prefetch_count configuration for Ceilometer
+ set_fact:
+ rabbit_qos_prefetch_count_item:
+ - section: oslo_messaging_rabbit
+ option: rabbit_qos_prefetch_count
+ value: "{{rabbit_qos_prefetch_count}}"
+ when: rabbit_qos_prefetch_count is defined
+
+ - name: Set executor_thread_pool_size configuration for Ceilometer
+ set_fact:
+ executor_thread_pool_size_item:
+ - section: DEFAULT
+ option: executor_thread_pool_size
+ value: "{{executor_thread_pool_size}}"
+ when: executor_thread_pool_size is defined
+
+ - name: Merge configuration items
+ set_fact:
+ ceilometer_configuration: "{{ceilometer_configuration + rabbit_qos_prefetch_count_item + executor_thread_pool_size_item }}"
-- hosts: controller,compute
- remote_user: heat-admin
roles:
- - { role: ceilometer-interval, when: ceilometer_interval is defined }
+ - ceilometer-config
diff --git a/ansible/browbeat/adjustment-gnocchi-wsgi.yml b/ansible/browbeat/adjustment-gnocchi-wsgi.yml
new file mode 100644
index 000000000..704ab38ad
--- /dev/null
+++ b/ansible/browbeat/adjustment-gnocchi-wsgi.yml
@@ -0,0 +1,20 @@
+---
+#
+# Playbook to adjust Gnocchi API wsgi settings
+#
+# Examples:
+# ansible-playbook -i hosts browbeat/adjustment-gnocchi-wsgi.yml -e "gnocchi_api_processes=24"
+# ansible-playbook -i hosts browbeat/adjustment-gnocchi-wsgi.yml -e "gnocchi_api_processes=24 gnocchi_api_threads=6"
+#
+#
+
+- hosts: controller
+ remote_user: "{{ host_remote_user }}"
+ gather_facts: false
+ vars_files:
+ - ../install/group_vars/all.yml
+ vars:
+ gnocchi_api_processes: 12
+ gnocchi_api_threads: 1
+ roles:
+ - gnocchi-api-config
diff --git a/ansible/browbeat/adjustment-gnocchi.yml b/ansible/browbeat/adjustment-gnocchi.yml
new file mode 100644
index 000000000..bbffd7dc4
--- /dev/null
+++ b/ansible/browbeat/adjustment-gnocchi.yml
@@ -0,0 +1,43 @@
+---
+#
+# Playbook to adjust Gnocchi config options
+#
+# Example:
+#
+# ansible-playbook -i hosts browbeat/adjustment-gnocchi.yml -e 'metricd_workers=12 metric_processing_delay=60 processing_replicas=3'
+#
+
+- hosts: controller
+ remote_user: "{{ host_remote_user }}"
+ gather_facts: false
+ vars_files:
+ - ../install/group_vars/all.yml
+ vars:
+ metricd_workers: 12
+ metric_processing_delay: 60
+ processing_replicas: 3
+ tasks:
+ - name: Configure Gnocchi Options
+ become: true
+ ini_file:
+ dest: /etc/gnocchi/gnocchi.conf
+ mode: 0640
+ group: gnocchi
+ section: "{{item.section}}"
+ option: "{{item.option}}"
+ value: "{{item.value}}"
+ backup: yes
+ with_items:
+ - section: metricd
+ option: workers
+ value: "{{metricd_workers}}"
+ - section: metricd
+ option: metric_processing_delay
+ value: "{{metric_processing_delay}}"
+ - section: metricd
+ option: processing_replicas
+ value: "{{processing_replicas}}"
+
+ - name: Restart openstack-gnocchi-metricd
+ become: true
+ command: systemctl restart openstack-gnocchi-metricd
diff --git a/ansible/browbeat/adjustment-haproxy.yml b/ansible/browbeat/adjustment-haproxy.yml
new file mode 100644
index 000000000..3f96ad12d
--- /dev/null
+++ b/ansible/browbeat/adjustment-haproxy.yml
@@ -0,0 +1,31 @@
+---
+#
+# Playbook to bump the number of max "defaults" (vs global) connections through haproxy
+#
+# Examples:
+#
+# ansible-playbook -i hosts browbeat/adjustment-haproxy.yml -e 'old_maxconn=4096 new_maxconn=8192'
+#
+#
+
+
+- hosts: controller
+ remote_user: "{{ host_remote_user }}"
+ gather_facts: false
+ vars_files:
+ - ../install/group_vars/all.yml
+ vars:
+ old_maxconn: 4096
+ new_maxconn: 8192
+ tasks:
+ - name: Adjusting haproxy maxconn
+ become: true
+ replace:
+ dest: /etc/haproxy/haproxy.cfg
+ regexp: " maxconn {{old_maxconn}}"
+ replace: " maxconn {{new_maxconn}}"
+ backup: true
+
+ - name: Reload haproxy
+ become: true
+ command: systemctl reload haproxy
diff --git a/ansible/browbeat/adjustment-nova-allocation.yml b/ansible/browbeat/adjustment-nova-allocation.yml
index 104a8a872..46ffb76d3 100644
--- a/ansible/browbeat/adjustment-nova-allocation.yml
+++ b/ansible/browbeat/adjustment-nova-allocation.yml
@@ -1,12 +1,17 @@
---
#
-# Playbook to change Nova allocation ratios
+# Playbook to adjust Nova allocation ratios
#
# Examples:
# ansible-playbook -i hosts browbeat/adjustment-nova-allocation.yml -e "cpu_allocation_ratio=24"
# ansible-playbook -i hosts browbeat/adjustment-nova-allocation.yml -e "cpu_allocation_ratio=24 ram_allocation_ratio=10.0"
# ansible-playbook -i hosts browbeat/adjustment-nova-allocation.yml -e "cpu_allocation_ratio=24 ram_allocation_ratio=10.0 disk_allocation_ratio=10.0"
#
+# In order for new settings to take affect, you need to restart the Nova services
+# by adding variable restart_nova=true into the extra vars.
+#
+# ansible-playbook -i hosts browbeat/adjustment-nova-allocation.yml -e "cpu_allocation_ratio=24 ram_allocation_ratio=10.0 disk_allocation_ratio=10.0 restart_nova=true"
+#
# * Note not setting a variable does not change that configuration item then. Setting no variables
# and running the playbook sets all configuration items to defaults (cpu/ram/disk - 16/1/1)
#
@@ -14,9 +19,12 @@
#
- hosts: controller
- remote_user: heat-admin
+ remote_user: "{{ host_remote_user }}"
gather_facts: false
+ vars_files:
+ - ../install/group_vars/all.yml
vars:
+ restart_nova: false
# Create initial blank configuration list
nova_configuration: []
# Defaults
diff --git a/ansible/browbeat/adjustment-nova-scheduler.yml b/ansible/browbeat/adjustment-nova-scheduler.yml
new file mode 100644
index 000000000..98647bf2d
--- /dev/null
+++ b/ansible/browbeat/adjustment-nova-scheduler.yml
@@ -0,0 +1,98 @@
+---
+#
+# Playbook to adjust Nova Scheduler settings to avoid over-scheduling hosts
+# with greater memory in uneven memory environments.
+#
+# Examples:
+# ansible-playbook -i hosts browbeat/adjustment-nova-scheduler.yml -e 'max_instances_per_host=350'
+# ansible-playbook -i hosts browbeat/adjustment-nova-scheduler.yml -e 'max_instances_per_host=350 ram_weight_multiplier=0'
+# ansible-playbook -i hosts browbeat/adjustment-nova-scheduler.yml -e 'max_instances_per_host=350 ram_weight_multiplier=0 enabled_filters="NumInstancesFilter,RetryFilter,RamFilter,ComputeFilter"'
+# ansible-playbook -i hosts browbeat/adjustment-nova-scheduler.yml -e 'max_instances_per_host=350 ram_weight_multiplier=0 enabled_filters="NumInstancesFilter,RetryFilter,RamFilter,ComputeFilter" host_subset_size=4'
+#
+# In order for new settings to take affect, you need to restart the Nova services
+# by adding variable restart_nova=true into the extra vars.
+#
+# ansible-playbook -i hosts browbeat/adjustment-nova-scheduler.yml -e 'max_instances_per_host=350 ram_weight_multiplier=0 enabled_filters="NumInstancesFilter,RetryFilter,RamFilter,ComputeFilter" host_subset_size=4 restart_nova=true'
+#
+# * Note not setting a variable does not change that configuration item then. Setting no variables
+# and running the playbook sets all configuration items to defaults (Ocata)
+#
+# ansible-playbook -i hosts browbeat/adjustment-nova-scheduler.yml
+#
+
+- hosts: controller
+ remote_user: "{{ host_remote_user }}"
+ gather_facts: false
+ vars_files:
+ - ../install/group_vars/all.yml
+ vars:
+ restart_nova: false
+ # Create initial blank configuration list
+ nova_configuration: []
+ # Defaults
+ default_max_instances_per_host: 50
+ default_ram_weight_multiplier: 1.0
+ default_enabled_filters: "RetryFilter,AvailabilityZoneFilter,RamFilter,DiskFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter"
+ default_host_subset_size: 1
+ # Each configuration item needs to be a list so it can be merged
+ max_instances_per_host_item: []
+ ram_weight_multiplier_item: []
+ enabled_filters_item: []
+ host_subset_size_item: []
+
+ pre_tasks:
+ - name: Set default max_instances_per_host, ram_weight_multiplier, enabled_filters, and host_subset_size
+ set_fact:
+ nova_configuration:
+ - section: filter_scheduler
+ option: max_instances_per_host
+ value: "{{default_max_instances_per_host}}"
+ - section: filter_scheduler
+ option: ram_weight_multiplier
+ value: "{{default_ram_weight_multiplier}}"
+ - section: filter_scheduler
+ option: enabled_filters
+ value: "{{default_enabled_filters}}"
+ - section: filter_scheduler
+ option: host_subset_size
+ value: "{{default_host_subset_size}}"
+ when: max_instances_per_host is undefined and ram_weight_multiplier is undefined and enabled_filters is undefined and host_subset_size is undefined
+
+ - name: Set max_instances_per_host configuration for Nova
+ set_fact:
+ max_instances_per_host_item:
+ - section: filter_scheduler
+ option: max_instances_per_host
+ value: "{{max_instances_per_host}}"
+ when: max_instances_per_host is defined
+
+ - name: Set ram_weight_multiplier configuration for Nova
+ set_fact:
+ ram_weight_multiplier_item:
+ - section: filter_scheduler
+ option: ram_weight_multiplier
+ value: "{{ram_weight_multiplier}}"
+ when: ram_weight_multiplier is defined
+
+ - name: Set enabled_filters configuration for Nova
+ set_fact:
+ enabled_filters_item:
+ - section: filter_scheduler
+ option: enabled_filters
+ value: "{{enabled_filters}}"
+ when: enabled_filters is defined
+
+ - name: Set host_subset_size configuration for Nova
+ set_fact:
+ host_subset_size_item:
+ - section: filter_scheduler
+ option: host_subset_size
+ value: "{{host_subset_size}}"
+ when: host_subset_size is defined
+
+ - name: Merge configuration items
+ set_fact:
+ nova_configuration: "{{nova_configuration + max_instances_per_host_item + ram_weight_multiplier_item + enabled_filters_item + host_subset_size_item }}"
+
+ roles:
+ - nova-config
diff --git a/ansible/browbeat/install-at.yml b/ansible/browbeat/install-at.yml
new file mode 100644
index 000000000..af3b3f86f
--- /dev/null
+++ b/ansible/browbeat/install-at.yml
@@ -0,0 +1,28 @@
+---
+#
+# Playbook to install and enable atd
+#
+# This allows you to syncohize a script/command across multiple machines.
+# Example: Synconhized restarting of ceilometer polling across computes
+# and controllers.
+#
+
+- hosts: overcloud
+ remote_user: "{{ host_remote_user }}"
+ gather_facts: false
+ vars_files:
+ - ../install/group_vars/all.yml
+ roles:
+ - repo
+ tasks:
+ - name: Install at
+ yum:
+ name: at
+ become: true
+
+ - name: Start atd
+ service:
+ name: atd
+ enabled: true
+ state: restarted
+ become: true
diff --git a/ansible/browbeat/ntp-sync.yml b/ansible/browbeat/ntp-sync.yml
new file mode 100644
index 000000000..c401fdcf1
--- /dev/null
+++ b/ansible/browbeat/ntp-sync.yml
@@ -0,0 +1,17 @@
+---
+#
+# Playbook to force ntp time sync
+#
+# Example:
+#
+# ansible-playbook -i hosts browbeat/ntp-sync.yml -e 'ntp_server=clock.walkabout.com'
+#
+
+- hosts: overcloud
+ remote_user: "{{ host_remote_user }}"
+ vars_files:
+ - ../install/group_vars/all.yml
+ tasks:
+ - name: Sync NTP Time
+ command: ntpdate -u {{ntp_server}}
+ become: true
diff --git a/ansible/browbeat/roles/apache-config/tasks/main.yml b/ansible/browbeat/roles/apache-config/tasks/main.yml
new file mode 100644
index 000000000..85cb42a94
--- /dev/null
+++ b/ansible/browbeat/roles/apache-config/tasks/main.yml
@@ -0,0 +1,18 @@
+---
+#
+# Tasks to deploy new prefork.conf settings for httpd
+#
+
+- name: Push new prefork.conf
+ become: true
+ template:
+ src: prefork.conf.j2
+ dest: /etc/httpd/conf.modules.d/prefork.conf
+ mode: 0644
+ owner: root
+ group: root
+ backup: true
+
+- name: Restart httpd
+ become: true
+ command: systemctl restart httpd
diff --git a/ansible/browbeat/roles/apache-config/templates/prefork.conf.j2 b/ansible/browbeat/roles/apache-config/templates/prefork.conf.j2
new file mode 100644
index 000000000..a04c530c0
--- /dev/null
+++ b/ansible/browbeat/roles/apache-config/templates/prefork.conf.j2
@@ -0,0 +1,17 @@
+# Deployed by Browbeat
+
+ StartServers {{httpd_startservers}}
+ MinSpareServers {{httpd_minspareservers}}
+ MaxSpareServers {{httpd_maxspareservers}}
+ ServerLimit {{httpd_serverlimit}}
+ MaxClients {{httpd_maxclients}}
+ MaxRequestsPerChild {{httpd_maxrequestsperchild}}
+
+
+# Defaults:
+# httpd_startservers: 8
+# httpd_minspareservers: 5
+# httpd_maxspareservers: 20
+# httpd_serverlimit: 256
+# httpd_maxclients: 256
+# httpd_maxrequestsperchild: 4000
diff --git a/ansible/browbeat/roles/ceilometer-backend/handlers/main.yml b/ansible/browbeat/roles/ceilometer-backend/handlers/main.yml
deleted file mode 100644
index 6c1964d45..000000000
--- a/ansible/browbeat/roles/ceilometer-backend/handlers/main.yml
+++ /dev/null
@@ -1,49 +0,0 @@
----
-#
-# Ceilometer change backend handlers
-#
-
-- name: pacemaker default unmanaged
- become: true
- command: pcs property set is-managed-default=false
- when: pacemaker_controlled
-
-- name: restart ceilometer services
- become: true
- service: name={{item}} state=restarted
- with_items:
- - openstack-ceilometer-api
- - openstack-ceilometer-central
- - openstack-ceilometer-collector
- - openstack-ceilometer-notification
- when: pacemaker_controlled
-
-- name: restart gnocchi services
- become: true
- service: name={{item}} state=restarted
- with_items:
- - openstack-gnocchi-metricd
- - openstack-gnocchi-statsd
-
-- name: pacemaker default managed
- become: true
- command: pcs property set is-managed-default=true
- when: pacemaker_controlled
-
-- name: pacemaker cleanup ceilometer
- become: true
- command: pcs resource cleanup {{item}}
- with_items:
- - openstack-ceilometer-api
- - openstack-ceilometer-central
- - openstack-ceilometer-collector
- - openstack-ceilometer-notification
- when: inventory_hostname == groups['controller'][0] and pacemaker_controlled
-
-- name: pacemaker cleanup gnocchi
- become: true
- command: pcs resource cleanup {{item}}
- with_items:
- - openstack-gnocchi-metricd
- - openstack-gnocchi-statsd
- when: inventory_hostname == groups['controller'][0] and pacemaker_controlled
diff --git a/ansible/browbeat/roles/ceilometer-backend/tasks/main.yml b/ansible/browbeat/roles/ceilometer-backend/tasks/main.yml
deleted file mode 100644
index 263855eb6..000000000
--- a/ansible/browbeat/roles/ceilometer-backend/tasks/main.yml
+++ /dev/null
@@ -1,34 +0,0 @@
----
-#
-# Ceilometer tasks for browbeat
-# * Change backend between database and gnocchi
-#
-
-- name: Get current backend(s)
- become: true
- command: crudini --get /etc/ceilometer/ceilometer.conf DEFAULT meter_dispatchers
- register: current_backend
-
-- debug: msg="Current Backend={{current_backend.stdout}}"
-
-- name: Configure Ceilometer Backend
- become: true
- ini_file:
- dest: /etc/ceilometer/ceilometer.conf
- mode: 0640
- section: DEFAULT
- option: meter_dispatchers
- value: "{{ ceilometer_backend }}"
- backup: yes
- notify:
- - pacemaker default unmanaged
- - restart ceilometer services
- - restart gnocchi services
- - pacemaker default managed
- - pacemaker cleanup ceilometer
- - pacemaker cleanup gnocchi
-
-- name: Configure for gnocchi
- become: true
- when: "('{{ceilometer_backend}}' == 'gnocchi') and (inventory_hostname == groups['controller'][0])"
- shell: gnocchi-upgrade --create-legacy-resource-types
diff --git a/ansible/browbeat/roles/ceilometer-config/tasks/main.yml b/ansible/browbeat/roles/ceilometer-config/tasks/main.yml
new file mode 100644
index 000000000..6324c32f3
--- /dev/null
+++ b/ansible/browbeat/roles/ceilometer-config/tasks/main.yml
@@ -0,0 +1,27 @@
+---
+#
+# Configure ceilometer.conf tasks
+#
+
+- name: Configure ceilometer.conf
+ become: true
+ ini_file:
+ dest: /etc/ceilometer/ceilometer.conf
+ mode: 0640
+ group: ceilometer
+ section: "{{ item.section }}"
+ option: "{{ item.option }}"
+ value: "{{ item.value }}"
+ backup: yes
+ with_items:
+ - "{{ceilometer_configuration}}"
+
+- name: Restart Ceilometer Agent Notification
+ become: true
+ command: systemctl restart openstack-ceilometer-notification
+ when: restart_notification
+
+- name: Restart Ceilometer Collector
+ become: true
+ command: systemctl restart openstack-ceilometer-collector
+ when: restart_collector
diff --git a/ansible/browbeat/roles/ceilometer-interval/handlers/main.yml b/ansible/browbeat/roles/ceilometer-interval/handlers/main.yml
deleted file mode 100644
index 932429fe6..000000000
--- a/ansible/browbeat/roles/ceilometer-interval/handlers/main.yml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-#
-# Ceilometer Interval handlers
-#
-
-- name: pacemaker unmanage openstack-ceilometer-central
- become: true
- command: pcs resource unmanage openstack-ceilometer-central
- when: inventory_hostname in groups['controller']
-
-- name: restart openstack-ceilometer-central
- become: true
- service: name=openstack-ceilometer-central state=restarted
- when: inventory_hostname in groups['controller']
-
-- name: pacemaker manage openstack-ceilometer-central
- become: true
- command: pcs resource manage openstack-ceilometer-central
- when: inventory_hostname in groups['controller']
-
-- name: restart openstack-ceilometer-compute
- become: true
- service: name=openstack-ceilometer-compute state=restarted
- when: inventory_hostname in groups['compute']
diff --git a/ansible/browbeat/roles/ceilometer-interval/tasks/main.yml b/ansible/browbeat/roles/ceilometer-interval/tasks/main.yml
deleted file mode 100644
index c13296a59..000000000
--- a/ansible/browbeat/roles/ceilometer-interval/tasks/main.yml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-#
-# Ceilometer interval tasks for Browbeat
-#
-
-- name: Deploy pipeline.yaml files
- become: true
- template:
- src: pipeline.yaml.j2
- dest: /etc/ceilometer/pipeline.yaml
- owner: root
- group: ceilometer
- mode: 0640
- backup: true
- notify:
- - pacemaker unmanage openstack-ceilometer-central
- - restart openstack-ceilometer-central
- - pacemaker manage openstack-ceilometer-central
- - restart openstack-ceilometer-compute
diff --git a/ansible/browbeat/roles/ceilometer-interval/templates/pipeline.yaml.j2 b/ansible/browbeat/roles/ceilometer-interval/templates/pipeline.yaml.j2
deleted file mode 100644
index c4e53de10..000000000
--- a/ansible/browbeat/roles/ceilometer-interval/templates/pipeline.yaml.j2
+++ /dev/null
@@ -1,92 +0,0 @@
----
-sources:
- - name: meter_source
- interval: {{ceilometer_interval}}
- meters:
- - "*"
- sinks:
- - meter_sink
- - name: cpu_source
- interval: {{ceilometer_interval}}
- meters:
- - "cpu"
- sinks:
- - cpu_sink
- - cpu_delta_sink
- - name: disk_source
- interval: {{ceilometer_interval}}
- meters:
- - "disk.read.bytes"
- - "disk.read.requests"
- - "disk.write.bytes"
- - "disk.write.requests"
- - "disk.device.read.bytes"
- - "disk.device.read.requests"
- - "disk.device.write.bytes"
- - "disk.device.write.requests"
- sinks:
- - disk_sink
- - name: network_source
- interval: {{ceilometer_interval}}
- meters:
- - "network.incoming.bytes"
- - "network.incoming.packets"
- - "network.outgoing.bytes"
- - "network.outgoing.packets"
- sinks:
- - network_sink
-sinks:
- - name: meter_sink
- transformers:
- publishers:
- - notifier://
- - name: cpu_sink
- transformers:
- - name: "rate_of_change"
- parameters:
- target:
- name: "cpu_util"
- unit: "%"
- type: "gauge"
- scale: "100.0 / (10**9 * (resource_metadata.cpu_number or 1))"
- publishers:
- - notifier://
- - name: cpu_delta_sink
- transformers:
- - name: "delta"
- parameters:
- target:
- name: "cpu.delta"
- growth_only: True
- publishers:
- - notifier://
- - name: disk_sink
- transformers:
- - name: "rate_of_change"
- parameters:
- source:
- map_from:
- name: "(disk\\.device|disk)\\.(read|write)\\.(bytes|requests)"
- unit: "(B|request)"
- target:
- map_to:
- name: "\\1.\\2.\\3.rate"
- unit: "\\1/s"
- type: "gauge"
- publishers:
- - notifier://
- - name: network_sink
- transformers:
- - name: "rate_of_change"
- parameters:
- source:
- map_from:
- name: "network\\.(incoming|outgoing)\\.(bytes|packets)"
- unit: "(B|packet)"
- target:
- map_to:
- name: "network.\\1.\\2.rate"
- unit: "\\1/s"
- type: "gauge"
- publishers:
- - notifier://
diff --git a/ansible/browbeat/roles/ceilometer-polling/tasks/main.yml b/ansible/browbeat/roles/ceilometer-polling/tasks/main.yml
new file mode 100644
index 000000000..bb04335c9
--- /dev/null
+++ b/ansible/browbeat/roles/ceilometer-polling/tasks/main.yml
@@ -0,0 +1,26 @@
+---
+#
+# Deploy the Ceilometer polling.yaml file
+#
+
+- name: Deploy polling.yaml file
+ become: true
+ template:
+ src: polling.yaml.j2
+ dest: /etc/ceilometer/polling.yaml
+ owner: root
+ group: ceilometer
+ mode: 0640
+ backup: true
+ when: "{{reduced_metrics}} == false"
+
+- name: Deploy the reduced metrics polling.yaml file
+ become: true
+ template:
+ src: reduced_polling.yaml.j2
+ dest: /etc/ceilometer/polling.yaml
+ owner: root
+ group: ceilometer
+ mode: 0640
+ backup: true
+ when: reduced_metrics
diff --git a/ansible/browbeat/roles/ceilometer-polling/templates/polling.yaml.j2 b/ansible/browbeat/roles/ceilometer-polling/templates/polling.yaml.j2
new file mode 100644
index 000000000..73e9d1e00
--- /dev/null
+++ b/ansible/browbeat/roles/ceilometer-polling/templates/polling.yaml.j2
@@ -0,0 +1,6 @@
+---
+sources:
+ - name: some_pollsters
+ interval: {{polling_interval}}
+ meters:
+ - "*"
diff --git a/ansible/browbeat/roles/ceilometer-polling/templates/reduced_polling.yaml.j2 b/ansible/browbeat/roles/ceilometer-polling/templates/reduced_polling.yaml.j2
new file mode 100644
index 000000000..eef338c46
--- /dev/null
+++ b/ansible/browbeat/roles/ceilometer-polling/templates/reduced_polling.yaml.j2
@@ -0,0 +1,26 @@
+---
+sources:
+ - name: some_pollsters
+ interval: {{polling_interval}}
+ meters:
+ - cpu
+ - memory.usage
+ - network.incoming.bytes
+ - network.incoming.packets
+ - network.outgoing.bytes
+ - network.outgoing.packets
+ - disk.read.bytes
+ - disk.read.requests
+ - disk.write.bytes
+ - disk.write.requests
+ - hardware.cpu.util
+ - hardware.memory.used
+ - hardware.memory.total
+ - hardware.memory.buffer
+ - hardware.memory.cached
+ - hardware.memory.swap.avail
+ - hardware.memory.swap.total
+ - hardware.system_stats.io.outgoing.blocks
+ - hardware.system_stats.io.incoming.blocks
+ - hardware.network.ip.incoming.datagrams
+ - hardware.network.ip.outgoing.datagrams
diff --git a/ansible/browbeat/roles/gnocchi-api-config/tasks/main.yml b/ansible/browbeat/roles/gnocchi-api-config/tasks/main.yml
new file mode 100644
index 000000000..7f3909e1b
--- /dev/null
+++ b/ansible/browbeat/roles/gnocchi-api-config/tasks/main.yml
@@ -0,0 +1,23 @@
+---
+#
+# Tasks to reconfigure Gnocchi API wsgi service
+#
+
+- name: Get internal API address
+ become: true
+ shell: "grep {{inventory_hostname}}.internalapi.localdomain /etc/hosts | awk '{print $1}'"
+ register: internal_api_ip
+
+- name: Push new 10-gnocchi_wsgi.conf
+ become: true
+ template:
+ src: gnocchi_wsgi.conf.j2
+ dest: /etc/httpd/conf.d/10-gnocchi_wsgi.conf
+ mode: 0640
+ owner: root
+ group: root
+ backup: true
+
+- name: Restart Gnocchi API (httpd)
+ become: true
+ command: systemctl restart httpd
diff --git a/ansible/browbeat/roles/gnocchi-api-config/templates/gnocchi_wsgi.conf.j2 b/ansible/browbeat/roles/gnocchi-api-config/templates/gnocchi_wsgi.conf.j2
new file mode 100644
index 000000000..016a8d04b
--- /dev/null
+++ b/ansible/browbeat/roles/gnocchi-api-config/templates/gnocchi_wsgi.conf.j2
@@ -0,0 +1,26 @@
+# Browbeat Deployed Gnocchi API wsgi config
+
+
+ ServerName {{inventory_hostname}}.internalapi.localdomain
+
+ ## Vhost docroot
+ DocumentRoot "/var/www/cgi-bin/gnocchi"
+
+ ## Directories, there should at least be a declaration for /var/www/cgi-bin/gnocchi
+
+
+ Options Indexes FollowSymLinks MultiViews
+ AllowOverride None
+ Require all granted
+
+
+ ## Logging
+ ErrorLog "/var/log/httpd/gnocchi_wsgi_error.log"
+ ServerSignature Off
+ CustomLog "/var/log/httpd/gnocchi_wsgi_access.log" combined
+ SetEnvIf X-Forwarded-Proto https HTTPS=1
+ WSGIApplicationGroup %{GLOBAL}
+ WSGIDaemonProcess gnocchi display-name=gnocchi_wsgi group=gnocchi processes={{gnocchi_api_processes}} threads={{gnocchi_api_threads}} user=gnocchi
+ WSGIProcessGroup gnocchi
+ WSGIScriptAlias / "/var/www/cgi-bin/gnocchi/app"
+
diff --git a/ansible/browbeat/roles/nova-config/handlers/main.yml b/ansible/browbeat/roles/nova-config/handlers/main.yml
deleted file mode 100644
index 8cba11f7f..000000000
--- a/ansible/browbeat/roles/nova-config/handlers/main.yml
+++ /dev/null
@@ -1,42 +0,0 @@
----
-#
-# Nova handlers for browbeat adjustment
-#
-
-- name: unmanage nova services
- become: true
- command: pcs resource unmanage {{ item }}
- with_items:
- - openstack-nova-api
- - openstack-nova-scheduler
- - openstack-nova-conductor
- ignore_errors: true
- when: pacemaker_controlled
-
-- name: restart nova services
- become: true
- service: name={{ item }} state=restarted
- with_items:
- - openstack-nova-api
- - openstack-nova-scheduler
- - openstack-nova-conductor
-
-- name: manage nova services
- become: true
- command: pcs resource manage {{ item }}
- with_items:
- - openstack-nova-api
- - openstack-nova-scheduler
- - openstack-nova-conductor
- ignore_errors: true
- when: pacemaker_controlled
-
-- name: cleanup nova services
- become: true
- command: pcs resource cleanup {{ item }}
- with_items:
- - openstack-nova-api
- - openstack-nova-scheduler
- - openstack-nova-conductor
- ignore_errors: true
- when: pacemaker_controlled
diff --git a/ansible/browbeat/roles/nova-config/tasks/main.yml b/ansible/browbeat/roles/nova-config/tasks/main.yml
index d9a2d3ba4..b6b64c3ff 100644
--- a/ansible/browbeat/roles/nova-config/tasks/main.yml
+++ b/ansible/browbeat/roles/nova-config/tasks/main.yml
@@ -8,14 +8,24 @@
ini_file:
dest: /etc/nova/nova.conf
mode: 0640
+ group: nova
section: "{{ item.section }}"
option: "{{ item.option }}"
value: "{{ item.value }}"
backup: yes
with_items:
- "{{nova_configuration}}"
- notify:
- - unmanage nova services
- - restart nova services
- - manage nova services
- - cleanup nova services
+
+- name: Restart Nova Services
+ become: true
+ service:
+ name: "{{ item }}"
+ state: restarted
+ with_items:
+ - openstack-nova-scheduler
+ - openstack-nova-api
+ - openstack-nova-conductor
+ - openstack-nova-novncproxy
+ - openstack-nova-consoleauth
+ - httpd
+ when: restart_nova
diff --git a/ansible/browbeat/roles/run-task-at/tasks/main.yml b/ansible/browbeat/roles/run-task-at/tasks/main.yml
new file mode 100644
index 000000000..9c12f481b
--- /dev/null
+++ b/ansible/browbeat/roles/run-task-at/tasks/main.yml
@@ -0,0 +1,20 @@
+---
+#
+# Tasks to kick a task off at a specific time using at daemon
+#
+
+- name: Create job file
+ become: true
+ shell: "echo '#!/bin/bash\n {{the_task}} '>/root/browbeat-sync.sh"
+
+- name: Set execute on file
+ become: true
+ file:
+ path: /root/browbeat-sync.sh
+ owner: root
+ group: root
+ mode: 0744
+
+- name: Create at job
+ become: true
+ command: "at -f /root/browbeat-sync.sh {{task_time}}"
diff --git a/ansible/browbeat/scale-virtlogd.yml b/ansible/browbeat/scale-virtlogd.yml
new file mode 100644
index 000000000..88d7aa7b8
--- /dev/null
+++ b/ansible/browbeat/scale-virtlogd.yml
@@ -0,0 +1,47 @@
+---
+#
+# Playbook to prevent virtlogd from running out of files when scaling instances
+# on a compute node.
+#
+# With OS defaults, virtlogd will prevent more than 252 instances per compute
+# due to a maximum number of files it can open.
+#
+# Example:
+#
+# ansible-playbook -i hosts browbeat/scale-virtlogd.yml -e 'max_open_files=10000'
+#
+
+- hosts: compute
+ remote_user: "{{ host_remote_user }}"
+ gather_facts: false
+ vars_files:
+ - ../install/group_vars/all.yml
+ vars:
+ max_open_files: 10000
+ tasks:
+ # Virtlogd
+ - name: Replace max open files setting for virtlogd
+ become: true
+ replace:
+ dest: "/usr/lib/systemd/system/virtlogd.service"
+ regexp: "LimitNOFILE=[0-9]+"
+ replace: "LimitNOFILE={{max_open_files}}"
+ backup: true
+ ignore_errors: true
+ register: replace_output
+
+ - name: Override max open files for virtlogd
+ become: true
+ lineinfile:
+ dest: "/usr/lib/systemd/system/virtlogd.service"
+ line: "LimitNOFILE={{max_open_files}}"
+ insertafter: "OOMScoreAdjust=-900"
+ when: replace_output.changed != true
+
+ - name: Issue daemon-reload
+ become: true
+ command: systemctl daemon-reload
+
+ - name: Restart virtlogd
+ become: true
+ command: systemctl restart virtlogd
diff --git a/ansible/browbeat/sync-ceilometer-polling.yml b/ansible/browbeat/sync-ceilometer-polling.yml
new file mode 100644
index 000000000..ebc4158cc
--- /dev/null
+++ b/ansible/browbeat/sync-ceilometer-polling.yml
@@ -0,0 +1,44 @@
+---
+#
+# Playbook to sync ceilometer polling across the controller and compute nodes
+#
+# Example:
+#
+# ansible-playbook -i hosts browbeat/sync-ceilometer-polling.yml -e 'task_time=18:25'
+#
+
+# Pike
+- hosts: controller,compute
+ remote_user: "{{ host_remote_user }}"
+ gather_facts: false
+ vars_files:
+ - ../install/group_vars/all.yml
+ pre_tasks:
+ - name: Get OSP Version
+ slurp:
+ src: "/etc/rhosp-release"
+ register: osp_version
+ become: true
+
+ - name: (Ocata) Set Controller Task
+ set_fact:
+ the_task: "systemctl restart openstack-ceilometer-central.service"
+ when: "('Ocata' in osp_version['content'] | b64decode) and ('controller' in group_names)"
+
+ - name: (Ocata) Set Compute Task
+ set_fact:
+ the_task: "systemctl restart openstack-ceilometer-compute.service"
+ when: "('Ocata' in osp_version['content'] | b64decode) and ('compute' in group_names)"
+
+ - name: (Pike) Set Controller Task
+ set_fact:
+ the_task: "systemctl restart openstack-ceilometer-polling.service"
+ when: "('Pike' in osp_version['content'] | b64decode) and ('controller' in group_names)"
+
+ - name: (Pike) Set Compute Task
+ set_fact:
+ the_task: "systemctl restart openstack-ceilometer-polling.service"
+ when: "('Pike' in osp_version['content'] | b64decode) and ('compute' in group_names)"
+
+ roles:
+ - run-task-at
diff --git a/ansible/browbeat/telemetry-disable-polling.yml b/ansible/browbeat/telemetry-disable-polling.yml
new file mode 100644
index 000000000..0be2cdd75
--- /dev/null
+++ b/ansible/browbeat/telemetry-disable-polling.yml
@@ -0,0 +1,44 @@
+---
+#
+# Playbook to quickly disable polling across all overcloud nodes.
+#
+# Cuts off the flow of measures into the backlog.
+#
+
+- hosts: controller, compute
+ remote_user: "{{ host_remote_user }}"
+ gather_facts: false
+ vars_files:
+ - ../install/group_vars/all.yml
+ tasks:
+ - name: Get OSP Version
+ slurp:
+ src: "/etc/rhosp-release"
+ register: osp_version
+ become: true
+
+ - name: (Ocata) Set Controller Polling Daemon
+ set_fact:
+ polling_daemon: "openstack-ceilometer-central.service"
+ when: "('Ocata' in osp_version['content'] | b64decode) and ('controller' in group_names)"
+
+ - name: (Ocata) Set Compute Polling Daemon
+ set_fact:
+ polling_daemon: "openstack-ceilometer-compute.service"
+ when: "('Ocata' in osp_version['content'] | b64decode) and ('compute' in group_names)"
+
+ - name: (Pike) Set Controller Polling Daemon
+ set_fact:
+ polling_daemon: "openstack-ceilometer-polling.service"
+ when: "('Pike' in osp_version['content'] | b64decode) and ('controller' in group_names)"
+
+ - name: (Pike) Set Compute Polling Daemon
+ set_fact:
+ polling_daemon: "openstack-ceilometer-polling.service"
+ when: "('Pike' in osp_version['content'] | b64decode) and ('compute' in group_names)"
+
+ - name: Stopping Ceilometer Polling Daemon
+ become: true
+ command: "systemctl stop {{item}}"
+ with_items:
+ - "{{polling_daemon}}"
diff --git a/ansible/browbeat/telemetry-disable.yml b/ansible/browbeat/telemetry-disable.yml
new file mode 100644
index 000000000..bc37179d8
--- /dev/null
+++ b/ansible/browbeat/telemetry-disable.yml
@@ -0,0 +1,150 @@
+---
+#
+# Playbook to disable Telemetry Services
+#
+
+- hosts: controller, compute
+ remote_user: "{{ host_remote_user }}"
+ gather_facts: false
+ vars_files:
+ - ../install/group_vars/all.yml
+ tasks:
+ - name: Get OSP Version
+ slurp:
+ src: "/etc/rhosp-release"
+ register: osp_version
+ become: true
+
+ - name: (Ocata) Set Telemetry Controller Services
+ set_fact:
+ controller_services:
+ - openstack-aodh-evaluator.service
+ - openstack-aodh-listener.service
+ - openstack-aodh-notifier.service
+ - openstack-ceilometer-notification
+ - openstack-ceilometer-collector
+ - openstack-gnocchi-metricd.service
+ - openstack-gnocchi-statsd.service
+ - openstack-ceilometer-central.service
+ when: "'Ocata' in osp_version['content'] | b64decode"
+
+ - name: (Ocata) Set Compute Polling Daemon
+ set_fact:
+ compute_services:
+ - "openstack-ceilometer-compute.service"
+ when: "'Ocata' in osp_version['content'] | b64decode"
+
+ - name: (Pike) Set Telemetry Controller Services
+ set_fact:
+ controller_services:
+ - openstack-aodh-evaluator.service
+ - openstack-aodh-listener.service
+ - openstack-aodh-notifier.service
+ - openstack-ceilometer-notification
+ - openstack-gnocchi-metricd.service
+ - openstack-gnocchi-statsd.service
+ - openstack-ceilometer-polling.service
+ when: "'Pike' in osp_version['content'] | b64decode"
+
+ - name: (Pike) Set Compute Polling Daemon
+ set_fact:
+ compute_services:
+ - "openstack-ceilometer-polling.service"
+ when: "'Pike' in osp_version['content'] | b64decode"
+
+ - name: Copy HTTPD wsgi service config files in order to temporarily disable them
+ become: true
+ copy:
+ remote_src: true
+ src: "{{item.src}}"
+ dest: "{{item.dest}}"
+ with_items:
+ - src: /etc/httpd/conf.d/10-aodh_wsgi.conf
+ dest: /root/10-aodh_wsgi.conf
+ - src: /etc/httpd/conf.d/10-ceilometer_wsgi.conf
+ dest: /root/10-ceilometer_wsgi.conf
+ - src: /etc/httpd/conf.d/10-gnocchi_wsgi.conf
+ dest: /root/10-gnocchi_wsgi.conf
+ - src: /etc/httpd/conf.d/10-panko_wsgi.conf
+ dest: /root/10-panko_wsgi.conf
+ when: "'controller' in group_names"
+
+ - name: Delete HTTPD wsgi service config files in order to temporarily disable them
+ become: true
+ file:
+ path: "{{item}}"
+ state: absent
+ with_items:
+ - "/etc/httpd/conf.d/10-aodh_wsgi.conf"
+ - "/etc/httpd/conf.d/10-ceilometer_wsgi.conf"
+ - "/etc/httpd/conf.d/10-gnocchi_wsgi.conf"
+ - "/etc/httpd/conf.d/10-panko_wsgi.conf"
+ when: "'controller' in group_names"
+
+ - name: Stopping Telemetry Controller Services
+ become: true
+ command: "systemctl stop {{item}}"
+ with_items: "{{controller_services}}"
+ when: "'controller' in group_names"
+
+ - name: Stopping Telemetry Compute Services
+ become: true
+ command: "systemctl stop {{item}}"
+ with_items: "{{compute_services}}"
+ when: "'compute' in group_names"
+
+ - name: Setting Nova Notification Driver to noop
+ become: true
+ ini_file:
+ dest: "{{item.dest}}"
+ mode: 0640
+ group: "{{item.group}}"
+ section: oslo_messaging_notifications
+ option: driver
+ value: noop
+ backup: yes
+ with_items:
+ - dest: /etc/nova/nova.conf
+ group: nova
+
+ - name: Setting Notification Driver to noop
+ become: true
+ ini_file:
+ dest: "{{item.dest}}"
+ mode: 0640
+ group: "{{item.group}}"
+ section: oslo_messaging_notifications
+ option: driver
+ value: noop
+ backup: yes
+ with_items:
+ - dest: /etc/cinder/cinder.conf
+ group: cinder
+ - dest: /etc/glance/glance-api.conf
+ group: glance
+ - dest: /etc/heat/heat.conf
+ group: heat
+ - dest: /etc/keystone/keystone.conf
+ group: keystone
+ - dest: /etc/neutron/neutron.conf
+ group: neutron
+ when: "'controller' in group_names"
+
+ - name: Restart Controller Services to disable notifications
+ become: true
+ command: "systemctl restart {{item}}"
+ with_items:
+ - openstack-cinder-scheduler.service
+ - openstack-glance-api.service
+ - openstack-heat-engine.service
+ - neutron-server.service
+ - openstack-nova-api.service
+ - openstack-nova-conductor.service
+ - openstack-nova-scheduler.service
+ - httpd
+ when: "'controller' in group_names"
+
+ - name: Restart Nova Compute Service to disable notifications
+ become: true
+ command: "systemctl restart openstack-nova-compute.service"
+ when: "'compute' in group_names"
diff --git a/ansible/browbeat/telemetry-enable.yml b/ansible/browbeat/telemetry-enable.yml
new file mode 100644
index 000000000..1e495be71
--- /dev/null
+++ b/ansible/browbeat/telemetry-enable.yml
@@ -0,0 +1,155 @@
+---
+#
+# Playbook to reverse disable Telemetry Services playbook
+#
+
+- hosts: controller, compute
+ remote_user: "{{ host_remote_user }}"
+ gather_facts: false
+ vars_files:
+ - ../install/group_vars/all.yml
+ tasks:
+ - name: Get OSP Version
+ slurp:
+ src: "/etc/rhosp-release"
+ register: osp_version
+ become: true
+
+ - name: (Ocata) Set Telemetry Controller Services
+ set_fact:
+ controller_services:
+ - openstack-aodh-evaluator.service
+ - openstack-aodh-listener.service
+ - openstack-aodh-notifier.service
+ - openstack-ceilometer-notification
+ - openstack-ceilometer-collector
+ - openstack-gnocchi-metricd.service
+ - openstack-gnocchi-statsd.service
+ - openstack-ceilometer-central.service
+ when: "'Ocata' in osp_version['content'] | b64decode"
+
+ - name: (Ocata) Set Compute Polling Daemon
+ set_fact:
+ compute_services:
+ - "openstack-ceilometer-compute.service"
+ when: "'Ocata' in osp_version['content'] | b64decode"
+
+ - name: (Pike) Set Telemetry Controller Services
+ set_fact:
+ controller_services:
+ - openstack-aodh-evaluator.service
+ - openstack-aodh-listener.service
+ - openstack-aodh-notifier.service
+ - openstack-ceilometer-notification
+ - openstack-gnocchi-metricd.service
+ - openstack-gnocchi-statsd.service
+ - openstack-ceilometer-polling.service
+ when: "'Pike' in osp_version['content'] | b64decode"
+
+ - name: (Pike) Set Compute Polling Daemon
+ set_fact:
+ compute_services:
+ - "openstack-ceilometer-polling.service"
+ when: "'Pike' in osp_version['content'] | b64decode"
+
+ - name: Copy HTTPD wsgi service config files back to /etc/httpd/conf.d
+ become: true
+ copy:
+ remote_src: true
+ src: "{{item.src}}"
+ dest: "{{item.dest}}"
+ with_items:
+ - src: /root/10-aodh_wsgi.conf
+ dest: /etc/httpd/conf.d/10-aodh_wsgi.conf
+ - src: /root/10-ceilometer_wsgi.conf
+ dest: /etc/httpd/conf.d/10-ceilometer_wsgi.conf
+ - src: /root/10-gnocchi_wsgi.conf
+ dest: /etc/httpd/conf.d/10-gnocchi_wsgi.conf
+ - src: /root/10-panko_wsgi.conf
+ dest: /etc/httpd/conf.d/10-panko_wsgi.conf
+ when: "'controller' in group_names"
+
+ - name: Clean Up HTTPD wsgi service config files in /root
+ become: true
+ file:
+ path: "{{item}}"
+ state: absent
+ with_items:
+ - "/root/10-aodh_wsgi.conf"
+ - "/root/10-ceilometer_wsgi.conf"
+ - "/root/10-gnocchi_wsgi.conf"
+ - "/root/10-panko_wsgi.conf"
+ when: "'controller' in group_names"
+
+ - name: Starting Telemetry Controller Services
+ become: true
+ command: "systemctl start {{item}}"
+ with_items: "{{controller_services}}"
+ when: "'controller' in group_names"
+
+ - name: Starting Telemetry Compute Service(s)
+ become: true
+ command: "systemctl start {{item}}"
+ with_items: "{{compute_services}}"
+ when: "'compute' in group_names"
+
+ - name: Setting Nova Notification Driver to messagingv2
+ become: true
+ ini_file:
+ dest: "{{item.dest}}"
+ mode: 0640
+ group: "{{item.group}}"
+ section: oslo_messaging_notifications
+ option: driver
+ value: messagingv2
+ backup: yes
+ with_items:
+ - dest: /etc/nova/nova.conf
+ group: nova
+
+ - name: Setting Notification Driver to messagingv2/messaging
+ become: true
+ ini_file:
+ dest: "{{item.dest}}"
+ mode: 0640
+ group: "{{item.group}}"
+ section: oslo_messaging_notifications
+ option: driver
+ value: "{{item.value}}"
+ backup: yes
+ with_items:
+ - dest: /etc/cinder/cinder.conf
+ group: cinder
+ value: messagingv2
+ - dest: /etc/glance/glance-api.conf
+ group: glance
+ value: messagingv2
+ - dest: /etc/heat/heat.conf
+ group: heat
+ value: messaging
+ - dest: /etc/keystone/keystone.conf
+ group: keystone
+ value: messaging
+ - dest: /etc/neutron/neutron.conf
+ group: neutron
+ value:
+ when: "'controller' in group_names"
+
+ - name: Restart Controller Services to enable notifications
+ become: true
+ command: "systemctl restart {{item}}"
+ with_items:
+ - openstack-cinder-scheduler.service
+ - openstack-glance-api.service
+ - openstack-heat-engine.service
+ - neutron-server.service
+ - openstack-nova-api.service
+ - openstack-nova-conductor.service
+ - openstack-nova-scheduler.service
+ - httpd
+ when: "'controller' in group_names"
+
+ - name: Restart Nova Compute Services to enable notifications
+ become: true
+ command: "systemctl restart openstack-nova-compute.service"
+ when: "'compute' in group_names"
diff --git a/ansible/browbeat/telemetry-polling.yml b/ansible/browbeat/telemetry-polling.yml
new file mode 100644
index 000000000..1c2069cd0
--- /dev/null
+++ b/ansible/browbeat/telemetry-polling.yml
@@ -0,0 +1,25 @@
+---
+#
+# Playbook to change telemetry's polling yaml
+#
+# Deploy polling.yaml with specific interval:
+#
+# ansible-playbook -i hosts browbeat/telemetry-polling.yml -e 'polling_interval=600'
+#
+# Deploy with reduced metrics:
+#
+# ansible-playbook -i hosts browbeat/telemetry-polling.yml -e 'polling_interval=600 reduced_metrics=true'
+#
+# Remember to sync polling daemons afterwards (Restarting openstack-ceilometer-[central,compute])
+#
+
+- hosts: controller,compute
+ remote_user: "{{ host_remote_user }}"
+ gather_facts: false
+ vars_files:
+ - ../install/group_vars/all.yml
+ vars:
+ polling_interval: 300
+ reduced_metrics: false
+ roles:
+ - ceilometer-polling
diff --git a/ansible/install/roles/flavors/vars/main.yml b/ansible/install/roles/flavors/vars/main.yml
index bbc57d9fe..bfdb0f064 100644
--- a/ansible/install/roles/flavors/vars/main.yml
+++ b/ansible/install/roles/flavors/vars/main.yml
@@ -8,6 +8,10 @@ browbeat_flavors:
cpu: 1
memory: 64
disk: 1
+ - name: m1.tiny-centos
+ cpu: 1
+ memory: 192
+ disk: 8
- name: m1.tiny
cpu: 1
memory: 512