CI: Add codespell to pep8
Fix existing spelling errors Change-Id: Ie689cf5a344aaa630a4860448b09242333a8e119
This commit is contained in:
parent
a65524d0ca
commit
448209459d
2
.codespell-ignore
Normal file
2
.codespell-ignore
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
ist
|
||||||
|
solum
|
@ -1407,7 +1407,7 @@ octavia_public_endpoint: "{{ octavia_external_fqdn | kolla_url(public_protocol,
|
|||||||
###################################
|
###################################
|
||||||
# Identity federation configuration
|
# Identity federation configuration
|
||||||
###################################
|
###################################
|
||||||
# Here we configure all of the IdPs meta informations that will be required to implement identity federation with OpenStack Keystone.
|
# Here we configure all of the IdPs meta information that will be required to implement identity federation with OpenStack Keystone.
|
||||||
# We require the administrator to enter the following metadata:
|
# We require the administrator to enter the following metadata:
|
||||||
# * name (internal name of the IdP in Keystone);
|
# * name (internal name of the IdP in Keystone);
|
||||||
# * openstack_domain (the domain in Keystone that the IdP belongs to)
|
# * openstack_domain (the domain in Keystone that the IdP belongs to)
|
||||||
|
@ -30,7 +30,7 @@ class ContainerWorker(ABC):
|
|||||||
|
|
||||||
self.systemd = SystemdWorker(self.params)
|
self.systemd = SystemdWorker(self.params)
|
||||||
|
|
||||||
# NOTE(mgoddard): The names used by Docker are inconsisent between
|
# NOTE(mgoddard): The names used by Docker are inconsistent between
|
||||||
# configuration of a container's resources and the resources in
|
# configuration of a container's resources and the resources in
|
||||||
# container_info['HostConfig']. This provides a mapping between the
|
# container_info['HostConfig']. This provides a mapping between the
|
||||||
# two.
|
# two.
|
||||||
|
@ -24,7 +24,7 @@ CONTAINER_PARAMS = [
|
|||||||
'name', # string
|
'name', # string
|
||||||
'cap_add', # list
|
'cap_add', # list
|
||||||
'cgroupns', # 'str',choices=['private', 'host']
|
'cgroupns', # 'str',choices=['private', 'host']
|
||||||
'command', # arrray of strings -- docker string
|
'command', # array of strings -- docker string
|
||||||
|
|
||||||
# this part is hidden inside dimensions
|
# this part is hidden inside dimensions
|
||||||
'cpu_period', # int
|
'cpu_period', # int
|
||||||
@ -56,7 +56,7 @@ CONTAINER_PARAMS = [
|
|||||||
'privileged', # bool
|
'privileged', # bool
|
||||||
'restart_policy', # set to none, handled by systemd
|
'restart_policy', # set to none, handled by systemd
|
||||||
'remove', # bool
|
'remove', # bool
|
||||||
'restart_tries', # int doesnt matter done by systemd
|
'restart_tries', # int doesn't matter done by systemd
|
||||||
'stop_timeout', # int
|
'stop_timeout', # int
|
||||||
'tty' # bool
|
'tty' # bool
|
||||||
# VOLUMES NOT WORKING HAS TO BE DONE WITH MOUNTS
|
# VOLUMES NOT WORKING HAS TO BE DONE WITH MOUNTS
|
||||||
@ -390,7 +390,7 @@ class PodmanWorker(ContainerWorker):
|
|||||||
def compare_dimensions(self, container_info):
|
def compare_dimensions(self, container_info):
|
||||||
new_dimensions = self.params.get('dimensions')
|
new_dimensions = self.params.get('dimensions')
|
||||||
|
|
||||||
# NOTE(mgoddard): The names used by Docker are inconsisent between
|
# NOTE(mgoddard): The names used by Docker are inconsistent between
|
||||||
# configuration of a container's resources and the resources in
|
# configuration of a container's resources and the resources in
|
||||||
# container_info['HostConfig']. This provides a mapping between the
|
# container_info['HostConfig']. This provides a mapping between the
|
||||||
# two.
|
# two.
|
||||||
|
@ -30,9 +30,9 @@ common_services:
|
|||||||
volumes: "{{ cron_default_volumes + cron_extra_volumes }}"
|
volumes: "{{ cron_default_volumes + cron_extra_volumes }}"
|
||||||
dimensions: "{{ cron_dimensions }}"
|
dimensions: "{{ cron_dimensions }}"
|
||||||
|
|
||||||
#######################
|
########################
|
||||||
# TLS and authenication
|
# TLS and authentication
|
||||||
#######################
|
########################
|
||||||
|
|
||||||
fluentd_elasticsearch_path: ""
|
fluentd_elasticsearch_path: ""
|
||||||
fluentd_elasticsearch_scheme: "{{ internal_protocol }}"
|
fluentd_elasticsearch_scheme: "{{ internal_protocol }}"
|
||||||
|
@ -5,7 +5,7 @@ extensions = qos
|
|||||||
|
|
||||||
[sriov_nic]
|
[sriov_nic]
|
||||||
# 'physical_device_mappings' is a comma separated list
|
# 'physical_device_mappings' is a comma separated list
|
||||||
# Maps a physical network to network inferface used for SRIOV
|
# Maps a physical network to network interface used for SRIOV
|
||||||
# This template should be modified for specific environments
|
# This template should be modified for specific environments
|
||||||
# See Official OpenStack SRIOV documentation for all available options
|
# See Official OpenStack SRIOV documentation for all available options
|
||||||
physical_device_mappings = {{ neutron_sriov_physnets }}
|
physical_device_mappings = {{ neutron_sriov_physnets }}
|
||||||
|
@ -552,7 +552,7 @@ libvirt_tls: false
|
|||||||
# also means the deployer is responsible for restarting the nova_compute and
|
# also means the deployer is responsible for restarting the nova_compute and
|
||||||
# nova_libvirt containers when the key changes, as we can't know when to do that
|
# nova_libvirt containers when the key changes, as we can't know when to do that
|
||||||
libvirt_tls_manage_certs: true
|
libvirt_tls_manage_certs: true
|
||||||
# When using tls we are verfiying the hostname we are connected to matches the
|
# When using tls we are verifying the hostname we are connected to matches the
|
||||||
# libvirt cert we are presented. As such we can't use IP's here, but keep the
|
# libvirt cert we are presented. As such we can't use IP's here, but keep the
|
||||||
# ability for people to override the hostname to use.
|
# ability for people to override the hostname to use.
|
||||||
migration_hostname: "{{ ansible_facts.nodename }}"
|
migration_hostname: "{{ ansible_facts.nodename }}"
|
||||||
@ -579,7 +579,7 @@ nova_dev_mode: "{{ kolla_dev_mode }}"
|
|||||||
nova_source_version: "{{ kolla_source_version }}"
|
nova_source_version: "{{ kolla_source_version }}"
|
||||||
|
|
||||||
###################################
|
###################################
|
||||||
# Enable Shared Bind Propogation
|
# Enable Shared Bind Propagation
|
||||||
###################################
|
###################################
|
||||||
|
|
||||||
enable_shared_var_lib_nova_mnt: "{{ enable_cinder_backend_nfs | bool or enable_cinder_backend_quobyte | bool }}"
|
enable_shared_var_lib_nova_mnt: "{{ enable_cinder_backend_nfs | bool or enable_cinder_backend_quobyte | bool }}"
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
name: loadbalancer-config
|
name: loadbalancer-config
|
||||||
vars:
|
vars:
|
||||||
project_services: "{{ cell_proxy_project_services | namespace_haproxy_for_cell(cell_name) }}"
|
project_services: "{{ cell_proxy_project_services | namespace_haproxy_for_cell(cell_name) }}"
|
||||||
# Default is necessary because this play may not be targetting the hosts in
|
# Default is necessary because this play may not be targeting the hosts in
|
||||||
# the cell_proxy_group group, and therefore they would not have role
|
# the cell_proxy_group group, and therefore they would not have role
|
||||||
# defaults defined. If we put this variable in group_vars, then it cannot
|
# defaults defined. If we put this variable in group_vars, then it cannot
|
||||||
# be overridden by the inventory.
|
# be overridden by the inventory.
|
||||||
|
@ -20,7 +20,7 @@
|
|||||||
|
|
||||||
- import_tasks: proxy_loadbalancer.yml
|
- import_tasks: proxy_loadbalancer.yml
|
||||||
vars:
|
vars:
|
||||||
# Default is necessary because this play may not be targetting the hosts in
|
# Default is necessary because this play may not be targeting the hosts in
|
||||||
# the nova-novncproxy group, and therefore they would not have role
|
# the nova-novncproxy group, and therefore they would not have role
|
||||||
# defaults defined. If we put these variables in group_vars, then they
|
# defaults defined. If we put these variables in group_vars, then they
|
||||||
# cannot be overridden by the inventory.
|
# cannot be overridden by the inventory.
|
||||||
@ -57,7 +57,7 @@
|
|||||||
|
|
||||||
- import_tasks: proxy_loadbalancer.yml
|
- import_tasks: proxy_loadbalancer.yml
|
||||||
vars:
|
vars:
|
||||||
# Default is necessary because this play may not be targetting the hosts in
|
# Default is necessary because this play may not be targeting the hosts in
|
||||||
# the nova-spicehtml5proxy group, and therefore they would not have role
|
# the nova-spicehtml5proxy group, and therefore they would not have role
|
||||||
# defaults defined. If we put these variables in group_vars, then they
|
# defaults defined. If we put these variables in group_vars, then they
|
||||||
# cannot be overridden by the inventory.
|
# cannot be overridden by the inventory.
|
||||||
@ -94,7 +94,7 @@
|
|||||||
|
|
||||||
- import_tasks: proxy_loadbalancer.yml
|
- import_tasks: proxy_loadbalancer.yml
|
||||||
vars:
|
vars:
|
||||||
# Default is necessary because this play may not be targetting the hosts in
|
# Default is necessary because this play may not be targeting the hosts in
|
||||||
# the nova-serialproxy group, and therefore they would not have role
|
# the nova-serialproxy group, and therefore they would not have role
|
||||||
# defaults defined. If we put these variables in group_vars, then they
|
# defaults defined. If we put these variables in group_vars, then they
|
||||||
# cannot be overridden by the inventory.
|
# cannot be overridden by the inventory.
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
# Speaking to the nova team, this seems to be an issue in oslo.service,
|
# Speaking to the nova team, this seems to be an issue in oslo.service,
|
||||||
# with a fix proposed here: https://review.openstack.org/#/c/641907.
|
# with a fix proposed here: https://review.openstack.org/#/c/641907.
|
||||||
# This issue also seems to affect the proxy services, which exit non-zero in
|
# This issue also seems to affect the proxy services, which exit non-zero in
|
||||||
# reponse to a SIGHUP, so restart those too.
|
# response to a SIGHUP, so restart those too.
|
||||||
# The issue actually affects all nova services, since they remain with RPC
|
# The issue actually affects all nova services, since they remain with RPC
|
||||||
# version pinned to the previous release:
|
# version pinned to the previous release:
|
||||||
# https://bugs.launchpad.net/kolla-ansible/+bug/1833069.
|
# https://bugs.launchpad.net/kolla-ansible/+bug/1833069.
|
||||||
|
@ -45,7 +45,7 @@
|
|||||||
changed_when: opensearch_retention_policy_create.status == 201
|
changed_when: opensearch_retention_policy_create.status == 201
|
||||||
when: opensearch_retention_policy_check.status == 404
|
when: opensearch_retention_policy_check.status == 404
|
||||||
|
|
||||||
- name: Apply retention policy to existing indicies
|
- name: Apply retention policy to existing indices
|
||||||
become: true
|
become: true
|
||||||
vars:
|
vars:
|
||||||
opensearch_set_policy_body: {"policy_id": "retention"}
|
opensearch_set_policy_body: {"policy_id": "retention"}
|
||||||
|
@ -48,7 +48,7 @@ ovs_hugepage_mountpoint: /dev/hugepages
|
|||||||
|
|
||||||
# ovs <2.7 required dpdk phyical port names to be index
|
# ovs <2.7 required dpdk phyical port names to be index
|
||||||
# in pci address order as dpdkX where X is the index
|
# in pci address order as dpdkX where X is the index
|
||||||
# ovs>=2.7 allows arbitray names but the pci address
|
# ovs>=2.7 allows arbitrary names but the pci address
|
||||||
# must be set in a new dpdkdev-opt field
|
# must be set in a new dpdkdev-opt field
|
||||||
# valid values are indexed or named.
|
# valid values are indexed or named.
|
||||||
ovs_physical_port_policy: named
|
ovs_physical_port_policy: named
|
||||||
|
@ -386,8 +386,8 @@ function usage {
|
|||||||
ovs-dpdkctl.sh: A tool to configure ovs with dpdk.
|
ovs-dpdkctl.sh: A tool to configure ovs with dpdk.
|
||||||
|
|
||||||
- This tool automate the process of binding host insterfacesto a dpdk
|
- This tool automate the process of binding host insterfacesto a dpdk
|
||||||
compaible driver (uio_pci_generic | vfio-pci) at boot.
|
compatible driver (uio_pci_generic | vfio-pci) at boot.
|
||||||
- This tool automate bootstraping ovs so that it can use the
|
- This tool automate bootstrapping ovs so that it can use the
|
||||||
dpdk accelerated netdev datapath.
|
dpdk accelerated netdev datapath.
|
||||||
|
|
||||||
commands:
|
commands:
|
||||||
@ -403,14 +403,14 @@ commands:
|
|||||||
- removes ovs-dpdkctl configuration file.
|
- removes ovs-dpdkctl configuration file.
|
||||||
- bind_nics:
|
- bind_nics:
|
||||||
- iterates over all dpdk interfaces defined in ovs-dpdkctl config
|
- iterates over all dpdk interfaces defined in ovs-dpdkctl config
|
||||||
and binds the interface to the target driver specifed in the config
|
and binds the interface to the target driver specified in the config
|
||||||
if current driver does not equal target.
|
if current driver does not equal target.
|
||||||
- unbind_nics:
|
- unbind_nics:
|
||||||
- iterates over all dpdk interfaces defined in ovs-dpdkctl config
|
- iterates over all dpdk interfaces defined in ovs-dpdkctl config
|
||||||
and restores the interface to its original non dpdk driver.
|
and restores the interface to its original non dpdk driver.
|
||||||
- init:
|
- init:
|
||||||
- defines dpdk specific configuration paramater in the ovsdb.
|
- defines dpdk specific configuration parameter in the ovsdb.
|
||||||
- creates bridges as spcified by ovs bridge_mappings in
|
- creates bridges as specified by ovs bridge_mappings in
|
||||||
ovs-dpdkctl config.
|
ovs-dpdkctl config.
|
||||||
- creates dpdk ports as defined by ovs port_mappings in
|
- creates dpdk ports as defined by ovs port_mappings in
|
||||||
ovs-dpdkctl config.
|
ovs-dpdkctl config.
|
||||||
@ -418,10 +418,10 @@ commands:
|
|||||||
- prints this message
|
- prints this message
|
||||||
|
|
||||||
options:
|
options:
|
||||||
- debuging:
|
- debugging:
|
||||||
- To enable debuging export OVS_DPDK_CTL_DEBUG=True
|
- To enable debugging export OVS_DPDK_CTL_DEBUG=True
|
||||||
- install:
|
- install:
|
||||||
- The varibles described below can be defined to customise
|
- The variables described below can be defined to customise
|
||||||
installation of ovs-dpdkctl.
|
installation of ovs-dpdkctl.
|
||||||
<variable>=<value> ovs-dpdkctl.sh install
|
<variable>=<value> ovs-dpdkctl.sh install
|
||||||
- bridge_mappings:
|
- bridge_mappings:
|
||||||
@ -462,7 +462,7 @@ options:
|
|||||||
- Example: ovs_mem_channels=2
|
- Example: ovs_mem_channels=2
|
||||||
- Default: "4"
|
- Default: "4"
|
||||||
- ovs_socket_mem:
|
- ovs_socket_mem:
|
||||||
- A comma separated list of hugepage memory, specifed in MBs per numa node,
|
- A comma separated list of hugepage memory, specified in MBs per numa node,
|
||||||
allocated to the ovs-vswitchd to use for the dpdk dataplane.
|
allocated to the ovs-vswitchd to use for the dpdk dataplane.
|
||||||
- For best performance memory should be allocated evenly across all numa node
|
- For best performance memory should be allocated evenly across all numa node
|
||||||
that will run a pmd.
|
that will run a pmd.
|
||||||
|
@ -45,7 +45,7 @@
|
|||||||
hugepage_mountpoint: "{{ ovs_hugepage_mountpoint }}"
|
hugepage_mountpoint: "{{ ovs_hugepage_mountpoint }}"
|
||||||
ovs_physical_port_policy: "{{ ovs_physical_port_policy }}"
|
ovs_physical_port_policy: "{{ ovs_physical_port_policy }}"
|
||||||
|
|
||||||
- name: Binds the interface to the target driver specifed in the config
|
- name: Binds the interface to the target driver specified in the config
|
||||||
become: True
|
become: True
|
||||||
command: "{{ node_config_directory }}/ovsdpdk-db/ovs-dpdkctl.sh bind_nics"
|
command: "{{ node_config_directory }}/ovsdpdk-db/ovs-dpdkctl.sh bind_nics"
|
||||||
environment:
|
environment:
|
||||||
|
@ -215,7 +215,7 @@
|
|||||||
om_enable_rabbitmq_high_availability is True but no mirroring policy has been found.
|
om_enable_rabbitmq_high_availability is True but no mirroring policy has been found.
|
||||||
Currently the procedure to migrate from transient non-mirrored queues to durable mirrored queues is manual.
|
Currently the procedure to migrate from transient non-mirrored queues to durable mirrored queues is manual.
|
||||||
Please follow the process described here: https://docs.openstack.org/kolla-ansible/latest/reference/message-queues/rabbitmq.html#high-availability.
|
Please follow the process described here: https://docs.openstack.org/kolla-ansible/latest/reference/message-queues/rabbitmq.html#high-availability.
|
||||||
Note that this process may take several hours on larger systems, and may cause a degredation in performance at large scale.
|
Note that this process may take several hours on larger systems, and may cause a degradation in performance at large scale.
|
||||||
If you do not wish to enable this feature, set om_enable_rabbitmq_high_availability to False.
|
If you do not wish to enable this feature, set om_enable_rabbitmq_high_availability to False.
|
||||||
|
|
||||||
run_once: true
|
run_once: true
|
||||||
@ -239,7 +239,7 @@
|
|||||||
om_enable_rabbitmq_quorum_queues is True but {{ item.name }} is a non-quorum queue.
|
om_enable_rabbitmq_quorum_queues is True but {{ item.name }} is a non-quorum queue.
|
||||||
Currently the procedure to migrate to quorum queues is manual.
|
Currently the procedure to migrate to quorum queues is manual.
|
||||||
Please follow the process described here: https://docs.openstack.org/kolla-ansible/latest/reference/message-queues/rabbitmq.html#high-availability.
|
Please follow the process described here: https://docs.openstack.org/kolla-ansible/latest/reference/message-queues/rabbitmq.html#high-availability.
|
||||||
Note that this process may take several hours on larger systems, and may cause a degredation in performance at large scale.
|
Note that this process may take several hours on larger systems, and may cause a degradation in performance at large scale.
|
||||||
If you do not wish to enable this feature, set om_enable_rabbitmq_quorum_queues to False.
|
If you do not wish to enable this feature, set om_enable_rabbitmq_quorum_queues to False.
|
||||||
loop: "{{ (rabbitmq_queues.stdout | from_json) if rabbitmq_queues is not skipped else [] }}"
|
loop: "{{ (rabbitmq_queues.stdout | from_json) if rabbitmq_queues is not skipped else [] }}"
|
||||||
loop_control:
|
loop_control:
|
||||||
|
@ -6,7 +6,7 @@ HAProxy Guide
|
|||||||
|
|
||||||
Kolla Ansible supports a Highly Available (HA) deployment of
|
Kolla Ansible supports a Highly Available (HA) deployment of
|
||||||
Openstack and other services. High-availability in Kolla
|
Openstack and other services. High-availability in Kolla
|
||||||
is implented as via Keepalived and HAProxy. Keepalived manages virtual IP
|
is implemented as via Keepalived and HAProxy. Keepalived manages virtual IP
|
||||||
addresses, while HAProxy load-balances traffic to service backends.
|
addresses, while HAProxy load-balances traffic to service backends.
|
||||||
These two components must be installed on the same hosts
|
These two components must be installed on the same hosts
|
||||||
and they are deployed to hosts in the ``loadbalancer`` group.
|
and they are deployed to hosts in the ``loadbalancer`` group.
|
||||||
@ -71,7 +71,7 @@ Backend weights
|
|||||||
|
|
||||||
When different baremetal are used in infrastructure as haproxy backends
|
When different baremetal are used in infrastructure as haproxy backends
|
||||||
or they are overloaded for some reason, kolla-ansible is able to change
|
or they are overloaded for some reason, kolla-ansible is able to change
|
||||||
weight of backend per sevice. Weight can be any integer value from 1 to
|
weight of backend per service. Weight can be any integer value from 1 to
|
||||||
256.
|
256.
|
||||||
|
|
||||||
To set weight of backend per service, modify inventory file as below:
|
To set weight of backend per service, modify inventory file as below:
|
||||||
|
@ -39,8 +39,8 @@ Applying log retention policies
|
|||||||
|
|
||||||
To stop your disks filling up, the Index State Management plugin for
|
To stop your disks filling up, the Index State Management plugin for
|
||||||
OpenSearch can be used to define log retention policies. A default
|
OpenSearch can be used to define log retention policies. A default
|
||||||
retention policy is applied to all indicies which match the
|
retention policy is applied to all indices which match the
|
||||||
``opensearch_log_index_prefix``. This policy first closes old indicies,
|
``opensearch_log_index_prefix``. This policy first closes old indices,
|
||||||
and then eventually deletes them. It can be customised via the following
|
and then eventually deletes them. It can be customised via the following
|
||||||
variables:
|
variables:
|
||||||
|
|
||||||
|
@ -40,9 +40,9 @@ Basic Auth
|
|||||||
Prometheus is protected with basic HTTP authentication. Kolla-ansible will
|
Prometheus is protected with basic HTTP authentication. Kolla-ansible will
|
||||||
create the following users: ``admin``, ``grafana`` (if grafana is
|
create the following users: ``admin``, ``grafana`` (if grafana is
|
||||||
enabled) and ``skyline`` (if skyline is enabled). The grafana username can
|
enabled) and ``skyline`` (if skyline is enabled). The grafana username can
|
||||||
be overidden using the variable
|
be overridden using the variable
|
||||||
``prometheus_grafana_user``, the skyline username can
|
``prometheus_grafana_user``, the skyline username can
|
||||||
be overidden using the variable ``prometheus_skyline_user``.
|
be overridden using the variable ``prometheus_skyline_user``.
|
||||||
The passwords are defined by the
|
The passwords are defined by the
|
||||||
``prometheus_password``, ``prometheus_grafana_password`` and
|
``prometheus_password``, ``prometheus_grafana_password`` and
|
||||||
``prometheus_skyline_password`` variables in
|
``prometheus_skyline_password`` variables in
|
||||||
@ -56,7 +56,7 @@ The passwords are defined by the
|
|||||||
password: hello
|
password: hello
|
||||||
enabled: true
|
enabled: true
|
||||||
|
|
||||||
or completely overriden with the ``prometheus_basic_auth_users`` variable.
|
or completely overridden with the ``prometheus_basic_auth_users`` variable.
|
||||||
|
|
||||||
Extending the default command line options
|
Extending the default command line options
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
@ -124,7 +124,7 @@ Extra files
|
|||||||
|
|
||||||
Sometimes it is necessary to reference additional files from within
|
Sometimes it is necessary to reference additional files from within
|
||||||
``prometheus.yml``, for example, when defining file service discovery
|
``prometheus.yml``, for example, when defining file service discovery
|
||||||
configuration. To enable you to do this, kolla-ansible will resursively
|
configuration. To enable you to do this, kolla-ansible will recursively
|
||||||
discover any files in ``{{ node_custom_config }}/prometheus/extras`` and
|
discover any files in ``{{ node_custom_config }}/prometheus/extras`` and
|
||||||
template them. The templated output is then copied to
|
template them. The templated output is then copied to
|
||||||
``/etc/prometheus/extras`` within the container on startup. For example to
|
``/etc/prometheus/extras`` within the container on startup. For example to
|
||||||
|
@ -74,7 +74,7 @@ Infoblox Backend
|
|||||||
.. important::
|
.. important::
|
||||||
|
|
||||||
When using Infoblox as the Designate backend the MDNS node
|
When using Infoblox as the Designate backend the MDNS node
|
||||||
requires the container to listen on port 53. As this is a privilaged
|
requires the container to listen on port 53. As this is a privileged
|
||||||
port you will need to build your designate-mdns container to run
|
port you will need to build your designate-mdns container to run
|
||||||
as the user root rather than designate.
|
as the user root rather than designate.
|
||||||
|
|
||||||
|
@ -225,7 +225,7 @@ Mellanox Infiniband (ml2/mlnx)
|
|||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
In order to add ``mlnx_infiniband`` to the list of mechanism driver
|
In order to add ``mlnx_infiniband`` to the list of mechanism driver
|
||||||
for ``neutron`` to support Infiniband virtual funtions, you need to
|
for ``neutron`` to support Infiniband virtual functions, you need to
|
||||||
set the following (assuming neutron SR-IOV agent is also enabled using
|
set the following (assuming neutron SR-IOV agent is also enabled using
|
||||||
``enable_neutron_sriov`` flag):
|
``enable_neutron_sriov`` flag):
|
||||||
|
|
||||||
|
@ -32,8 +32,8 @@ a buffer key - three in total. If the rotation interval is set lower than the
|
|||||||
sum of the token expiry and token allow expired window, more active keys will
|
sum of the token expiry and token allow expired window, more active keys will
|
||||||
be configured in Keystone as necessary.
|
be configured in Keystone as necessary.
|
||||||
|
|
||||||
Further infomation on Fernet tokens is available in the :keystone-doc:`Keystone
|
Further information on Fernet tokens is available in the
|
||||||
documentation <admin/fernet-token-faq.html>`.
|
:keystone-doc:`Keystone documentation <admin/fernet-token-faq.html>`.
|
||||||
|
|
||||||
Federated identity
|
Federated identity
|
||||||
------------------
|
------------------
|
||||||
|
@ -41,7 +41,7 @@ This absolutely solves the problem of persistent data, but it introduces
|
|||||||
another security issue, permissions. With this host bind mount solution
|
another security issue, permissions. With this host bind mount solution
|
||||||
the data in ``var/lib/mysql`` will be owned by the mysql user in the
|
the data in ``var/lib/mysql`` will be owned by the mysql user in the
|
||||||
container. Unfortunately, that mysql user in the container could have
|
container. Unfortunately, that mysql user in the container could have
|
||||||
any UID/GID and thats who will own the data outside the container
|
any UID/GID and that's who will own the data outside the container
|
||||||
introducing a potential security risk. Additionally, this method
|
introducing a potential security risk. Additionally, this method
|
||||||
dirties the host and requires host permissions to the directories
|
dirties the host and requires host permissions to the directories
|
||||||
to bind mount.
|
to bind mount.
|
||||||
@ -98,8 +98,8 @@ The following variables should be configured in Kolla Ansible's
|
|||||||
* Bool - set to true or false
|
* Bool - set to true or false
|
||||||
|
|
||||||
|
|
||||||
Prerequsites
|
Prerequisites
|
||||||
============
|
=============
|
||||||
Firewalld needs to be installed beforehand.
|
Firewalld needs to be installed beforehand.
|
||||||
|
|
||||||
Kayobe can be used to automate the installation and configuration of firewalld
|
Kayobe can be used to automate the installation and configuration of firewalld
|
||||||
|
@ -32,7 +32,7 @@ workaround_ansible_issue_8743: yes
|
|||||||
# scenarios with all facts cached (as there is no task to fail).
|
# scenarios with all facts cached (as there is no task to fail).
|
||||||
#kolla_ansible_setup_any_errors_fatal: false
|
#kolla_ansible_setup_any_errors_fatal: false
|
||||||
|
|
||||||
# This variable may be used to set the maxiumum failure percentage for all
|
# This variable may be used to set the maximum failure percentage for all
|
||||||
# plays. More fine-grained control is possible via per-service variables, e.g.
|
# plays. More fine-grained control is possible via per-service variables, e.g.
|
||||||
# nova_max_fail_percentage. The default behaviour is to set a max fail
|
# nova_max_fail_percentage. The default behaviour is to set a max fail
|
||||||
# percentage of 100, which is equivalent to not setting it.
|
# percentage of 100, which is equivalent to not setting it.
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
---
|
---
|
||||||
features:
|
features:
|
||||||
- Add graceful timeout argument to kolla_docker library for stoping,
|
- Add graceful timeout argument to kolla_docker library for stopping,
|
||||||
restaring container.
|
restaring container.
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
features:
|
features:
|
||||||
- Add Lets Encrypt TLS certificate service integration into Openstack
|
- Add Lets Encrypt TLS certificate service integration into Openstack
|
||||||
deployment. Enables trusted TLS certificate generation option for
|
deployment. Enables trusted TLS certificate generation option for
|
||||||
secure communcation with OpenStack HAProxy instances using
|
secure communication with OpenStack HAProxy instances using
|
||||||
``letsencrypt_email``, ``kolla_internal_fqdn`` and/or
|
``letsencrypt_email``, ``kolla_internal_fqdn`` and/or
|
||||||
``kolla_external_fqdn`` is required. One container runs an Apache
|
``kolla_external_fqdn`` is required. One container runs an Apache
|
||||||
ACME client webserver and one runs Lego for certificate retrieval
|
ACME client webserver and one runs Lego for certificate retrieval
|
||||||
|
@ -2,6 +2,6 @@
|
|||||||
features:
|
features:
|
||||||
- |
|
- |
|
||||||
Add support for deploying the Monasca Notification service. The
|
Add support for deploying the Monasca Notification service. The
|
||||||
Notification service is responsible for notifiying users when
|
Notification service is responsible for notifying users when
|
||||||
an alert, as defined via the Monasca API, is generated by the
|
an alert, as defined via the Monasca API, is generated by the
|
||||||
Monasca Thresh topology.
|
Monasca Thresh topology.
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
---
|
---
|
||||||
features:
|
features:
|
||||||
- |
|
- |
|
||||||
Adds possibility for inlcuding custom alert notification templates with
|
Adds possibility for including custom alert notification templates with
|
||||||
Prometheus Alertmanager.
|
Prometheus Alertmanager.
|
||||||
|
@ -2,5 +2,5 @@
|
|||||||
features:
|
features:
|
||||||
- |
|
- |
|
||||||
Add "enable_trove_singletenant" option to enable the Trove single
|
Add "enable_trove_singletenant" option to enable the Trove single
|
||||||
tenant functionnality. This feature will allow Trove to create
|
tenant functionality. This feature will allow Trove to create
|
||||||
Nova instances in a different tenant than the user tenant.
|
Nova instances in a different tenant than the user tenant.
|
||||||
|
@ -2,6 +2,6 @@
|
|||||||
fixes:
|
fixes:
|
||||||
- |
|
- |
|
||||||
Fixes the copy job for grafana custom home dashboard file.
|
Fixes the copy job for grafana custom home dashboard file.
|
||||||
The copy job for the grafana home dashboard file needs to run priviliged,
|
The copy job for the grafana home dashboard file needs to run privileged,
|
||||||
otherwise permission denied error occurs.
|
otherwise permission denied error occurs.
|
||||||
`LP#1947710 <https://launchpad.net/bugs/1947710>`__
|
`LP#1947710 <https://launchpad.net/bugs/1947710>`__
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
fixes:
|
fixes:
|
||||||
- |
|
- |
|
||||||
The flags ``--db-nb-pid`` and ``--db-sb-pid`` have been corected to be
|
The flags ``--db-nb-pid`` and ``--db-sb-pid`` have been corrected to be
|
||||||
``--db-nb-pidfile`` and ``--db-sb-pidfile`` respectively. See here for
|
``--db-nb-pidfile`` and ``--db-sb-pidfile`` respectively. See here for
|
||||||
reference:
|
reference:
|
||||||
https://github.com/ovn-org/ovn/blob/6c6a7ad1c64a21923dc9b5bea7069fd88bcdd6a8/utilities/ovn-ctl#L1045
|
https://github.com/ovn-org/ovn/blob/6c6a7ad1c64a21923dc9b5bea7069fd88bcdd6a8/utilities/ovn-ctl#L1045
|
||||||
|
@ -11,7 +11,7 @@ deprecations:
|
|||||||
is not possible any more.
|
is not possible any more.
|
||||||
features:
|
features:
|
||||||
- |
|
- |
|
||||||
Sanity checks have been removed. These "smoke tests" orignially
|
Sanity checks have been removed. These "smoke tests" originally
|
||||||
were implemented for barbican, cinder, glance and keystone.
|
were implemented for barbican, cinder, glance and keystone.
|
||||||
upgrade:
|
upgrade:
|
||||||
- |
|
- |
|
||||||
|
@ -6,5 +6,5 @@ upgrade:
|
|||||||
Instructions to migrate existing data to the new, disk based format
|
Instructions to migrate existing data to the new, disk based format
|
||||||
can be found at
|
can be found at
|
||||||
https://docs.influxdata.com/influxdb/v1.7/administration/upgrading/
|
https://docs.influxdata.com/influxdb/v1.7/administration/upgrading/
|
||||||
If you do not follow the migration proceedure, InfluxDB should continue
|
If you do not follow the migration procedure, InfluxDB should continue
|
||||||
to work, but this is not recommended.
|
to work, but this is not recommended.
|
||||||
|
@ -3,5 +3,5 @@ features:
|
|||||||
- |
|
- |
|
||||||
Adds configuration options to enable backend TLS encryption from HAProxy
|
Adds configuration options to enable backend TLS encryption from HAProxy
|
||||||
to the Nova, Ironic, and Neutron services. When used in conjunction with
|
to the Nova, Ironic, and Neutron services. When used in conjunction with
|
||||||
enabling TLS for service API endpoints, network communcation will be
|
enabling TLS for service API endpoints, network communication will be
|
||||||
encrypted end to end, from client through HAProxy to the backend service.
|
encrypted end to end, from client through HAProxy to the backend service.
|
||||||
|
@ -4,5 +4,5 @@ features:
|
|||||||
Adds configuration options to enable backend TLS encryption from HAProxy
|
Adds configuration options to enable backend TLS encryption from HAProxy
|
||||||
to the Keystone, Glance, Heat, Placement, Horizon, Barbican, and Cinder
|
to the Keystone, Glance, Heat, Placement, Horizon, Barbican, and Cinder
|
||||||
services. When used in conjunction with enabling TLS for service API
|
services. When used in conjunction with enabling TLS for service API
|
||||||
endpoints, network communcation will be encrypted end to end, from client
|
endpoints, network communication will be encrypted end to end, from client
|
||||||
through HAProxy to the backend service.
|
through HAProxy to the backend service.
|
||||||
|
@ -3,6 +3,6 @@ fixes:
|
|||||||
- |
|
- |
|
||||||
Set the etcd internal hostname and cacert for tls internal enabled
|
Set the etcd internal hostname and cacert for tls internal enabled
|
||||||
deployments. This allows services to work with etcd when
|
deployments. This allows services to work with etcd when
|
||||||
coordination is enabled for TLS interal deployments. Without this
|
coordination is enabled for TLS internal deployments. Without this
|
||||||
fix, the coordination backend fails to connect to etcd and the
|
fix, the coordination backend fails to connect to etcd and the
|
||||||
service itself crashes.
|
service itself crashes.
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
fixes:
|
fixes:
|
||||||
- |
|
- |
|
||||||
Fix the configuration of the etcd service so that its protocol is
|
Fix the configuration of the etcd service so that its protocol is
|
||||||
independant of the value of the ``internal_protocol`` parameter. The etcd
|
independent of the value of the ``internal_protocol`` parameter. The etcd
|
||||||
service is not load balanced by HAProxy, so there is no proxy
|
service is not load balanced by HAProxy, so there is no proxy
|
||||||
layer to do TLS termination when ``internal_protocol`` is configured to be
|
layer to do TLS termination when ``internal_protocol`` is configured to be
|
||||||
``https``.
|
``https``.
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
---
|
---
|
||||||
fixes:
|
fixes:
|
||||||
- |
|
- |
|
||||||
Fixes Zun capsules loosing network namespaces after
|
Fixes Zun capsules losing network namespaces after
|
||||||
restarting zun_cni_daemon container
|
restarting zun_cni_daemon container
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
upgrade:
|
upgrade:
|
||||||
- |
|
- |
|
||||||
Horizon role was reworked to preffered local_settings.d
|
Horizon role was reworked to preferred local_settings.d
|
||||||
configuration model. Files ``local_settings`` and
|
configuration model. Files ``local_settings`` and
|
||||||
``custom_local_settings`` were renamed to ``_9998-kolla-settings.py``
|
``custom_local_settings`` were renamed to ``_9998-kolla-settings.py``
|
||||||
and ``_9999-custom-settings.py`` Users who use horizon's custom
|
and ``_9999-custom-settings.py`` Users who use horizon's custom
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
upgrade:
|
upgrade:
|
||||||
- |
|
- |
|
||||||
The default connection limit for HAProxy backends is 2000 however, MariaDB
|
The default connection limit for HAProxy backends is 2000 however, MariaDB
|
||||||
defaults to a max of 10000 conections. This has been changed to match the
|
defaults to a max of 10000 connections. This has been changed to match the
|
||||||
MariaDB limit.
|
MariaDB limit.
|
||||||
|
|
||||||
'haproxy_max_connections' has also been increased to 40000 to accommodate
|
'haproxy_max_connections' has also been increased to 40000 to accommodate
|
||||||
|
@ -9,10 +9,10 @@ prelude: >
|
|||||||
ubuntu 16.04 and centos 7.2 as target servers.
|
ubuntu 16.04 and centos 7.2 as target servers.
|
||||||
See features section for more details.
|
See features section for more details.
|
||||||
features:
|
features:
|
||||||
- The kolla-host playbook supports bootrapping
|
- The kolla-host playbook supports bootstrapping
|
||||||
clean os installations to enable them to be
|
clean os installations to enable them to be
|
||||||
used as kolla hosts. When the playbook completes
|
used as kolla hosts. When the playbook completes
|
||||||
the bootstraped systems should pass the kolla
|
the bootstrapped systems should pass the kolla
|
||||||
prechecks.
|
prechecks.
|
||||||
- The kolla-host playbook will install docker 1.11
|
- The kolla-host playbook will install docker 1.11
|
||||||
and docker-py on all baremetal nodes.
|
and docker-py on all baremetal nodes.
|
||||||
|
@ -15,5 +15,5 @@ upgrade:
|
|||||||
The Octavia amphora provider by default is now deployed with the jobboard
|
The Octavia amphora provider by default is now deployed with the jobboard
|
||||||
feature enabled. This requires the Redis service to be enabled as a
|
feature enabled. This requires the Redis service to be enabled as a
|
||||||
dependency, please update your configuration accordingly if needed.
|
dependency, please update your configuration accordingly if needed.
|
||||||
For futher information see
|
For further information see
|
||||||
`Amphorav2 docs <https://docs.openstack.org/octavia/latest/install/install-amphorav2.html>`_
|
`Amphorav2 docs <https://docs.openstack.org/octavia/latest/install/install-amphorav2.html>`_
|
||||||
|
@ -8,7 +8,7 @@ fixes:
|
|||||||
With the previous behavior each openstack exporter
|
With the previous behavior each openstack exporter
|
||||||
was scraped at the same time.
|
was scraped at the same time.
|
||||||
This caused each exporter to query the openstack APIs
|
This caused each exporter to query the openstack APIs
|
||||||
simultaneously introducing unneccesary load and duplicate
|
simultaneously introducing unnecessary load and duplicate
|
||||||
time series in the prometheus database due to the instance
|
time series in the prometheus database due to the instance
|
||||||
label being unique for each exporter.
|
label being unique for each exporter.
|
||||||
`LP#1972818 <https://bugs.launchpad.net/kolla-ansible/+bug/1972818>`__
|
`LP#1972818 <https://bugs.launchpad.net/kolla-ansible/+bug/1972818>`__
|
||||||
|
@ -6,5 +6,5 @@ features:
|
|||||||
databases.
|
databases.
|
||||||
Setting ``ovn-monitor-all`` variable to 'true' will remove some CPU
|
Setting ``ovn-monitor-all`` variable to 'true' will remove some CPU
|
||||||
load from OVN SouthBound DB but will effect with more updates
|
load from OVN SouthBound DB but will effect with more updates
|
||||||
comming to ovn-controller. Might be helpfull in large deployments
|
coming to ovn-controller. Might be helpful in large deployments
|
||||||
with many compute hosts.
|
with many compute hosts.
|
||||||
|
@ -28,7 +28,7 @@ issues:
|
|||||||
issue has been addressed.
|
issue has been addressed.
|
||||||
upgrade:
|
upgrade:
|
||||||
- |
|
- |
|
||||||
When upgrading ovs-dpdk it should be noted that this will alway invovle a
|
When upgrading ovs-dpdk it should be noted that this will always involve a
|
||||||
dataplane outage. Unlike kernel OVS the dataplane for ovs-dpdk executes in
|
dataplane outage. Unlike kernel OVS the dataplane for ovs-dpdk executes in
|
||||||
the ovs-vswitchd process. As such it is recommended to always evacuate
|
the ovs-vswitchd process. As such it is recommended to always evacuate
|
||||||
all vm workloads from a node running ovs-dpdk prior to upgrading.
|
all vm workloads from a node running ovs-dpdk prior to upgrading.
|
||||||
|
@ -2,4 +2,4 @@
|
|||||||
features:
|
features:
|
||||||
- |
|
- |
|
||||||
Adds the ability to configure rabbitmq via ``rabbitmq_extra_config``
|
Adds the ability to configure rabbitmq via ``rabbitmq_extra_config``
|
||||||
which can be overriden in globals.yml.
|
which can be overridden in globals.yml.
|
||||||
|
@ -4,7 +4,7 @@ upgrade:
|
|||||||
The RabbitMQ variable `rabbitmq-ha-promote-on-shutdown` now defaults to
|
The RabbitMQ variable `rabbitmq-ha-promote-on-shutdown` now defaults to
|
||||||
`"always"`. This only has an effect if
|
`"always"`. This only has an effect if
|
||||||
`om_enable_rabbitmq_high_availability` is set to `True`. When
|
`om_enable_rabbitmq_high_availability` is set to `True`. When
|
||||||
`ha-promote-on-shutdown` is set to `always`, queue mirrors are promted on
|
`ha-promote-on-shutdown` is set to `always`, queue mirrors are promoted on
|
||||||
shutdown even if they aren't fully synced. This means that value
|
shutdown even if they aren't fully synced. This means that value
|
||||||
availability over the risk of losing some messages. Note that the contents
|
availability over the risk of losing some messages. Note that the contents
|
||||||
of the RabbitMQ definitions.json are now changed, meaning RabbitMQ
|
of the RabbitMQ definitions.json are now changed, meaning RabbitMQ
|
||||||
|
@ -3,7 +3,7 @@ issues:
|
|||||||
- |
|
- |
|
||||||
As of Ceph Luminous 12.2.1 the maximum number of PGs per OSD before the
|
As of Ceph Luminous 12.2.1 the maximum number of PGs per OSD before the
|
||||||
monitor issues a warning has been reduced from 300 to 200 PGs. In addition,
|
monitor issues a warning has been reduced from 300 to 200 PGs. In addition,
|
||||||
Ceph now fails with an error rather than a warning in the case of exeeding
|
Ceph now fails with an error rather than a warning in the case of exceeding
|
||||||
the max value.
|
the max value.
|
||||||
In order to allow Kolla to continue to be used out of the box we have
|
In order to allow Kolla to continue to be used out of the box we have
|
||||||
reduced the default values for pg_num and pgp_num from 128 to 8. This will
|
reduced the default values for pg_num and pgp_num from 128 to 8. This will
|
||||||
|
@ -1,11 +1,11 @@
|
|||||||
---
|
---
|
||||||
features:
|
features:
|
||||||
- |
|
- |
|
||||||
Adds support to seperate Swift access and replication traffic from other
|
Adds support to separate Swift access and replication traffic from other
|
||||||
storage traffic.
|
storage traffic.
|
||||||
|
|
||||||
In a deployment where both Ceph and Swift have been deployed,
|
In a deployment where both Ceph and Swift have been deployed,
|
||||||
this changes adds functionalality to support optional seperation
|
this changes adds functionalality to support optional separation
|
||||||
of storage network traffic. This adds two new network interfaces
|
of storage network traffic. This adds two new network interfaces
|
||||||
``swift_storage_interface`` and ``swift_replication_interface`` which
|
``swift_storage_interface`` and ``swift_replication_interface`` which
|
||||||
maintain backwards compatibility.
|
maintain backwards compatibility.
|
||||||
|
@ -5,7 +5,7 @@ features:
|
|||||||
upgrade:
|
upgrade:
|
||||||
- |
|
- |
|
||||||
To support new tacker-conductor service, tacker role has been
|
To support new tacker-conductor service, tacker role has been
|
||||||
reformated, before upgrade, tacker-server and tacker-conductor
|
reformatted, before upgrade, tacker-server and tacker-conductor
|
||||||
groups should be included in inventory files.
|
groups should be included in inventory files.
|
||||||
- Tacker requires Mistral service to be enabled as of Pike release
|
- Tacker requires Mistral service to be enabled as of Pike release
|
||||||
to implement vim monitoring.
|
to implement vim monitoring.
|
||||||
|
@ -113,7 +113,7 @@ Work Items
|
|||||||
disables certificate verification.
|
disables certificate verification.
|
||||||
- Ensure that all tasks that interact with OpenStack APIs support disabling
|
- Ensure that all tasks that interact with OpenStack APIs support disabling
|
||||||
certificate verification.
|
certificate verification.
|
||||||
- Fix heat-api bootstrap process, which currently requires valid certficate,
|
- Fix heat-api bootstrap process, which currently requires valid certificate,
|
||||||
probably by moving domain/user creation out of the container, and into the
|
probably by moving domain/user creation out of the container, and into the
|
||||||
ansible itself.
|
ansible itself.
|
||||||
- Allow for providing a CA used to verify connections to the service backends.
|
- Allow for providing a CA used to verify connections to the service backends.
|
||||||
|
@ -199,7 +199,7 @@ At the broadest level, OpenStack can split up into two main roles, Controller
|
|||||||
and Compute. With Kubernetes, the role definition layer changes.
|
and Compute. With Kubernetes, the role definition layer changes.
|
||||||
Kolla-kubernetes will still need to define Compute nodes, but not Controller
|
Kolla-kubernetes will still need to define Compute nodes, but not Controller
|
||||||
nodes. Compute nodes hold the libvirt container and the running vms. That
|
nodes. Compute nodes hold the libvirt container and the running vms. That
|
||||||
service cannont migrate because the vms associated with it exist on the node.
|
service cannot migrate because the vms associated with it exist on the node.
|
||||||
However, the Controller role is more flexible. The Kubernetes layer provides IP
|
However, the Controller role is more flexible. The Kubernetes layer provides IP
|
||||||
persistence so that APIs will remain active and abstracted from the operator's
|
persistence so that APIs will remain active and abstracted from the operator's
|
||||||
view [15]. kolla-kubernetes can direct Controller services away from the Compute
|
view [15]. kolla-kubernetes can direct Controller services away from the Compute
|
||||||
@ -245,7 +245,7 @@ kibana as the default logging mechanism.
|
|||||||
|
|
||||||
The community will implement centralized logging by using a 'side car' container
|
The community will implement centralized logging by using a 'side car' container
|
||||||
in the Kubernetes pod [17]. The logging service will trace the logs from the
|
in the Kubernetes pod [17]. The logging service will trace the logs from the
|
||||||
shared volume of the running serivce and send the data to elastic search. This
|
shared volume of the running service and send the data to elastic search. This
|
||||||
solution is ideal because volumes are shared among the containers in a pod.
|
solution is ideal because volumes are shared among the containers in a pod.
|
||||||
|
|
||||||
Implementation
|
Implementation
|
||||||
|
@ -167,7 +167,7 @@ Handling HAProxy and Keepalived
|
|||||||
|
|
||||||
As already mentioned HAProxy and Keepalived do not support logging to files.
|
As already mentioned HAProxy and Keepalived do not support logging to files.
|
||||||
This means that some other mechanism should be used for these two services (and
|
This means that some other mechanism should be used for these two services (and
|
||||||
any other services that only suppport logging to Syslog).
|
any other services that only support logging to Syslog).
|
||||||
|
|
||||||
Our prototype has demonstrated that we can make Heka act as a Syslog server.
|
Our prototype has demonstrated that we can make Heka act as a Syslog server.
|
||||||
This works by using Heka's ``UdpInput`` plugin with its ``net`` option set
|
This works by using Heka's ``UdpInput`` plugin with its ``net`` option set
|
||||||
|
@ -36,7 +36,7 @@ more granular control, without the need to add the ``-e @/path/to/file`` flag.
|
|||||||
|
|
||||||
Use cases
|
Use cases
|
||||||
---------
|
---------
|
||||||
1. Allow a more granular controler over individual service's options
|
1. Allow a more granular controller over individual service's options
|
||||||
2. Better file and directory structure
|
2. Better file and directory structure
|
||||||
|
|
||||||
Proposed change
|
Proposed change
|
||||||
|
@ -45,7 +45,7 @@ Include where in the kolla tree hierarchy this will reside.
|
|||||||
|
|
||||||
Security impact
|
Security impact
|
||||||
---------------
|
---------------
|
||||||
How does this feature impact the securtiy of the deployed OpenStack.
|
How does this feature impact the security of the deployed OpenStack.
|
||||||
|
|
||||||
Performance Impact
|
Performance Impact
|
||||||
------------------
|
------------------
|
||||||
|
@ -70,7 +70,7 @@ openstack_service_rpc_workers: "1"
|
|||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{% if need_build_image and not is_previous_release %}
|
{% if need_build_image and not is_previous_release %}
|
||||||
# NOTE(Jeffrey4l): use different a docker namespace name in case it pull image from hub.docker.io when deplying
|
# NOTE(Jeffrey4l): use different a docker namespace name in case it pull image from hub.docker.io when deploying
|
||||||
docker_namespace: "lokolla"
|
docker_namespace: "lokolla"
|
||||||
# NOTE(yoctozepto): use hostname or FQDN to be compatible between IPv4 and IPv6
|
# NOTE(yoctozepto): use hostname or FQDN to be compatible between IPv4 and IPv6
|
||||||
# docker does not support referencing registry via an IPv6 address
|
# docker does not support referencing registry via an IPv6 address
|
||||||
|
@ -246,7 +246,7 @@ function unset_cirros_image_q35_machine_type {
|
|||||||
|
|
||||||
function test_neutron_modules {
|
function test_neutron_modules {
|
||||||
# Exit the function if scenario is "ovn" or if there's an upgrade
|
# Exit the function if scenario is "ovn" or if there's an upgrade
|
||||||
# as inly concerns ml2/ovs
|
# as it only concerns ml2/ovs
|
||||||
if [[ $SCENARIO == "ovn" ]] || [[ $HAS_UPGRADE == "yes" ]]; then
|
if [[ $SCENARIO == "ovn" ]] || [[ $HAS_UPGRADE == "yes" ]]; then
|
||||||
return
|
return
|
||||||
fi
|
fi
|
||||||
|
@ -44,27 +44,27 @@ if [[ "$enable_swift" == "yes" ]]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$glance_file_datadir_volume" != "glance" && -d "$glance_file_datadir_volume" ]]; then
|
if [[ "$glance_file_datadir_volume" != "glance" && -d "$glance_file_datadir_volume" ]]; then
|
||||||
echo "Removing glance volume if it is customzied"
|
echo "Removing glance volume if it is customized"
|
||||||
rm -rfv $glance_file_datadir_volume
|
rm -rfv $glance_file_datadir_volume
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$nova_instance_datadir_volume" != "nova_compute" && -d "$nova_instance_datadir_volume" ]]; then
|
if [[ "$nova_instance_datadir_volume" != "nova_compute" && -d "$nova_instance_datadir_volume" ]]; then
|
||||||
echo "Removing nova_compute volume if it is customzied"
|
echo "Removing nova_compute volume if it is customized"
|
||||||
rm -rfv $nova_instance_datadir_volume
|
rm -rfv $nova_instance_datadir_volume
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$gnocchi_metric_datadir_volume" != "gnocchi" && -d "$gnocchi_metric_datadir_volume" ]]; then
|
if [[ "$gnocchi_metric_datadir_volume" != "gnocchi" && -d "$gnocchi_metric_datadir_volume" ]]; then
|
||||||
echo "Removing gnocchi volume if it is customzied"
|
echo "Removing gnocchi volume if it is customized"
|
||||||
rm -rfv $gnocchi_metric_datadir_volume
|
rm -rfv $gnocchi_metric_datadir_volume
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$influxdb_datadir_volume" != "influxdb" && -d "$influxdb_datadir_volume" ]]; then
|
if [[ "$influxdb_datadir_volume" != "influxdb" && -d "$influxdb_datadir_volume" ]]; then
|
||||||
echo "Removing influxdb volume if it is customzied"
|
echo "Removing influxdb volume if it is customized"
|
||||||
rm -rfv $influxdb_datadir_volume
|
rm -rfv $influxdb_datadir_volume
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$opensearch_datadir_volume" != "opensearch" && -d "$opensearch_datadir_volume" ]]; then
|
if [[ "$opensearch_datadir_volume" != "opensearch" && -d "$opensearch_datadir_volume" ]]; then
|
||||||
echo "Removing opensearch volume if it is customzied"
|
echo "Removing opensearch volume if it is customized"
|
||||||
rm -rfv $opensearch_datadir_volume
|
rm -rfv $opensearch_datadir_volume
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
2
tox.ini
2
tox.ini
@ -100,6 +100,7 @@ deps =
|
|||||||
-c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
|
-c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
|
||||||
-r{toxinidir}/requirements.txt
|
-r{toxinidir}/requirements.txt
|
||||||
-r{toxinidir}/lint-requirements.txt
|
-r{toxinidir}/lint-requirements.txt
|
||||||
|
codespell
|
||||||
allowlist_externals = bash
|
allowlist_externals = bash
|
||||||
find
|
find
|
||||||
commands =
|
commands =
|
||||||
@ -115,6 +116,7 @@ commands =
|
|||||||
deps = {[testenv:linters]deps}
|
deps = {[testenv:linters]deps}
|
||||||
commands =
|
commands =
|
||||||
flake8 {posargs}
|
flake8 {posargs}
|
||||||
|
codespell -I {toxinidir}/.codespell-ignore
|
||||||
|
|
||||||
[flake8]
|
[flake8]
|
||||||
show-source = True
|
show-source = True
|
||||||
|
Loading…
Reference in New Issue
Block a user