Remove kolla-ceph
Kolla-Ansible Ceph deployment mechanism has been deprecated in Train [1]. This change removes the Ansible code and associated CI jobs. [1]: https://review.opendev.org/669214 Change-Id: Ie2167f02ad2f525d3b0f553e2c047516acf55bc2
This commit is contained in:
parent
4200089716
commit
4e6fe7a6da
@ -87,7 +87,6 @@ Infrastructure components
|
|||||||
|
|
||||||
Kolla-Ansible deploys containers for the following infrastructure components:
|
Kolla-Ansible deploys containers for the following infrastructure components:
|
||||||
|
|
||||||
- `Ceph <https://ceph.com/>`__ implementation for Cinder, Glance and Nova.
|
|
||||||
- `Collectd <https://collectd.org/>`__,
|
- `Collectd <https://collectd.org/>`__,
|
||||||
`Telegraf <https://docs.influxdata.com/telegraf/>`__,
|
`Telegraf <https://docs.influxdata.com/telegraf/>`__,
|
||||||
`InfluxDB <https://influxdata.com/time-series-platform/influxdb/>`__,
|
`InfluxDB <https://influxdata.com/time-series-platform/influxdb/>`__,
|
||||||
|
@ -394,7 +394,6 @@ prometheus_node_exporter_port: "9100"
|
|||||||
prometheus_mysqld_exporter_port: "9104"
|
prometheus_mysqld_exporter_port: "9104"
|
||||||
prometheus_haproxy_exporter_port: "9101"
|
prometheus_haproxy_exporter_port: "9101"
|
||||||
prometheus_memcached_exporter_port: "9150"
|
prometheus_memcached_exporter_port: "9150"
|
||||||
prometheus_ceph_mgr_exporter_port: "9283"
|
|
||||||
# Default cadvisor port of 8080 already in use
|
# Default cadvisor port of 8080 already in use
|
||||||
prometheus_cadvisor_port: "18080"
|
prometheus_cadvisor_port: "18080"
|
||||||
|
|
||||||
@ -549,11 +548,6 @@ enable_ceilometer: "no"
|
|||||||
enable_ceilometer_ipmi: "no"
|
enable_ceilometer_ipmi: "no"
|
||||||
enable_cells: "no"
|
enable_cells: "no"
|
||||||
enable_central_logging: "no"
|
enable_central_logging: "no"
|
||||||
enable_ceph: "no"
|
|
||||||
enable_ceph_mds: "no"
|
|
||||||
enable_ceph_rgw: "no"
|
|
||||||
enable_ceph_nfs: "no"
|
|
||||||
enable_ceph_dashboard: "{{ enable_ceph | bool }}"
|
|
||||||
enable_chrony: "yes"
|
enable_chrony: "yes"
|
||||||
enable_cinder: "no"
|
enable_cinder: "no"
|
||||||
enable_cinder_backup: "yes"
|
enable_cinder_backup: "yes"
|
||||||
@ -812,15 +806,11 @@ openstack_auth:
|
|||||||
#######################
|
#######################
|
||||||
# Glance options
|
# Glance options
|
||||||
#######################
|
#######################
|
||||||
# Using glance_backend_ceph rather than enable_ceph to determine whether to
|
|
||||||
# use the file backend, as this allows for the external ceph case, where
|
|
||||||
# enable_ceph is False.
|
|
||||||
glance_backend_file: "{{ not (glance_backend_ceph | bool or glance_backend_swift | bool or glance_backend_vmware | bool) }}"
|
glance_backend_file: "{{ not (glance_backend_ceph | bool or glance_backend_swift | bool or glance_backend_vmware | bool) }}"
|
||||||
glance_backend_ceph: "{{ enable_ceph }}"
|
glance_backend_ceph: "no"
|
||||||
glance_backend_vmware: "no"
|
glance_backend_vmware: "no"
|
||||||
enable_glance_image_cache: "no"
|
enable_glance_image_cache: "no"
|
||||||
# ceph backend has priority over swift in all-ceph clusters
|
glance_backend_swift: "{{ enable_swift | bool }}"
|
||||||
glance_backend_swift: "{{ not (enable_ceph | bool) and enable_swift | bool }}"
|
|
||||||
glance_file_datadir_volume: "glance"
|
glance_file_datadir_volume: "glance"
|
||||||
glance_enable_rolling_upgrade: "no"
|
glance_enable_rolling_upgrade: "no"
|
||||||
glance_api_hosts: "{{ [groups['glance-api']|first] if glance_backend_file | bool and glance_file_datadir_volume == 'glance' else groups['glance-api'] }}"
|
glance_api_hosts: "{{ [groups['glance-api']|first] if glance_backend_file | bool and glance_file_datadir_volume == 'glance' else groups['glance-api'] }}"
|
||||||
@ -844,7 +834,7 @@ panko_database_type: "mysql"
|
|||||||
#################
|
#################
|
||||||
# Valid options are [ file, ceph, swift ]
|
# Valid options are [ file, ceph, swift ]
|
||||||
# Defaults to file if ceph and swift are enabled; explicitly set to either if required.
|
# Defaults to file if ceph and swift are enabled; explicitly set to either if required.
|
||||||
gnocchi_backend_storage: "{% if enable_ceph | bool and not enable_swift | bool %}ceph{% elif enable_swift | bool and not enable_ceph | bool %}swift{% else %}file{% endif %}"
|
gnocchi_backend_storage: "{% if enable_swift | bool %}swift{% else %}file{% endif %}"
|
||||||
|
|
||||||
# Valid options are [redis, '']
|
# Valid options are [redis, '']
|
||||||
gnocchi_incoming_storage: "{{ 'redis' if enable_redis | bool else '' }}"
|
gnocchi_incoming_storage: "{{ 'redis' if enable_redis | bool else '' }}"
|
||||||
@ -854,7 +844,7 @@ gnocchi_metric_datadir_volume: "gnocchi"
|
|||||||
#################################
|
#################################
|
||||||
# Cinder options
|
# Cinder options
|
||||||
#################################
|
#################################
|
||||||
cinder_backend_ceph: "{{ enable_ceph }}"
|
cinder_backend_ceph: "no"
|
||||||
cinder_backend_vmwarevc_vmdk: "no"
|
cinder_backend_vmwarevc_vmdk: "no"
|
||||||
cinder_volume_group: "cinder-volumes"
|
cinder_volume_group: "cinder-volumes"
|
||||||
cinder_target_helper: "{{ 'lioadm' if ansible_os_family == 'RedHat' and ansible_distribution_major_version is version_compare('8', '>=') else 'tgtadm' }}"
|
cinder_target_helper: "{{ 'lioadm' if ansible_os_family == 'RedHat' and ansible_distribution_major_version is version_compare('8', '>=') else 'tgtadm' }}"
|
||||||
@ -913,7 +903,7 @@ neutron_legacy_iptables: "no"
|
|||||||
#######################
|
#######################
|
||||||
# Nova options
|
# Nova options
|
||||||
#######################
|
#######################
|
||||||
nova_backend_ceph: "{{ enable_ceph }}"
|
nova_backend_ceph: "no"
|
||||||
nova_backend: "{{ 'rbd' if nova_backend_ceph | bool else 'default' }}"
|
nova_backend: "{{ 'rbd' if nova_backend_ceph | bool else 'default' }}"
|
||||||
# Valid options are [ kvm, qemu, vmware, xenapi ]
|
# Valid options are [ kvm, qemu, vmware, xenapi ]
|
||||||
nova_compute_virt_type: "kvm"
|
nova_compute_virt_type: "kvm"
|
||||||
@ -964,43 +954,12 @@ octavia_amp_flavor_id:
|
|||||||
qinling_kubernetes_certificates: "no"
|
qinling_kubernetes_certificates: "no"
|
||||||
|
|
||||||
###################
|
###################
|
||||||
# Ceph options
|
# External Ceph options
|
||||||
###################
|
###################
|
||||||
# Ceph can be setup with a caching to improve performance. To use the cache you
|
# External Ceph - cephx auth enabled (this is the standard nowadays, defaults to yes)
|
||||||
# must provide separate disks than those for the OSDs
|
|
||||||
ceph_enable_cache: "no"
|
|
||||||
|
|
||||||
external_ceph_cephx_enabled: "yes"
|
external_ceph_cephx_enabled: "yes"
|
||||||
|
|
||||||
# Ceph is not able to determine the size of a cache pool automatically,
|
# External Ceph pool names
|
||||||
# so the configuration on the absolute size is required here, otherwise the flush/evict will not work.
|
|
||||||
ceph_target_max_bytes: ""
|
|
||||||
ceph_target_max_objects: ""
|
|
||||||
|
|
||||||
# Valid options are [ forward, none, writeback ]
|
|
||||||
ceph_cache_mode: "writeback"
|
|
||||||
|
|
||||||
# Valid options are [ ext4, btrfs, xfs ]
|
|
||||||
ceph_osd_filesystem: "xfs"
|
|
||||||
|
|
||||||
# Set to 'yes-i-really-really-mean-it' to force wipe disks with existing partitions for OSDs. Only
|
|
||||||
# set if you understand the consequences!
|
|
||||||
ceph_osd_wipe_disk: ""
|
|
||||||
|
|
||||||
# These are /etc/fstab options. Comma separated, no spaces (see fstab(8))
|
|
||||||
ceph_osd_mount_options: "defaults,noatime"
|
|
||||||
|
|
||||||
# A requirement for using the erasure-coded pools is you must setup a cache tier
|
|
||||||
# Valid options are [ erasure, replicated ]
|
|
||||||
ceph_pool_type: "replicated"
|
|
||||||
|
|
||||||
# Integrate Ceph Rados Object Gateway with OpenStack keystone
|
|
||||||
enable_ceph_rgw_keystone: "no"
|
|
||||||
|
|
||||||
# Enable/disable ceph-rgw compatibility with OpenStack Swift
|
|
||||||
# Valid options are [ True, False ]
|
|
||||||
ceph_rgw_compatibility: "False"
|
|
||||||
|
|
||||||
ceph_cinder_pool_name: "volumes"
|
ceph_cinder_pool_name: "volumes"
|
||||||
ceph_cinder_backup_pool_name: "backups"
|
ceph_cinder_backup_pool_name: "backups"
|
||||||
ceph_glance_pool_name: "images"
|
ceph_glance_pool_name: "images"
|
||||||
@ -1014,38 +973,13 @@ ceph_gnocchi_user: "gnocchi"
|
|||||||
ceph_manila_user: "manila"
|
ceph_manila_user: "manila"
|
||||||
ceph_nova_user: "nova"
|
ceph_nova_user: "nova"
|
||||||
|
|
||||||
ceph_erasure_profile: "k=4 m=2 ruleset-failure-domain=host"
|
|
||||||
ceph_rule: "default host {{ 'indep' if ceph_pool_type == 'erasure' else 'firstn' }}"
|
|
||||||
ceph_cache_rule: "cache host firstn"
|
|
||||||
|
|
||||||
# Set the pgs and pgps for pool
|
|
||||||
# WARNING! These values are dependant on the size and shape of your cluster -
|
|
||||||
# the default values are not suitable for production use. Please refer to the
|
|
||||||
# Kolla Ceph documentation for more information.
|
|
||||||
ceph_pool_pg_num: 8
|
|
||||||
ceph_pool_pgp_num: 8
|
|
||||||
|
|
||||||
# Set the store type for ceph OSD
|
|
||||||
# Valid options are [ filestore, bluestore]
|
|
||||||
ceph_osd_store_type: "bluestore"
|
|
||||||
|
|
||||||
# Set the host type for ceph daemons
|
|
||||||
# Valid options are [ IP, HOSTNAME, FQDN, INVENTORY ]
|
|
||||||
# Note: For existing clusters, please don't modify this parameter. Otherwise,
|
|
||||||
# the existing mon will be invalidated, and the existing osd crush map will
|
|
||||||
# be changed.
|
|
||||||
ceph_mon_host_type: "IP"
|
|
||||||
ceph_mgr_host_type: "INVENTORY"
|
|
||||||
ceph_osd_host_type: "IP"
|
|
||||||
ceph_mds_host_type: "INVENTORY"
|
|
||||||
|
|
||||||
# External Ceph keyrings
|
# External Ceph keyrings
|
||||||
ceph_cinder_keyring: "ceph.client.cinder.keyring"
|
ceph_cinder_keyring: "ceph.client.cinder.keyring"
|
||||||
ceph_cinder_backup_keyring: "ceph.client.cinder-backup.keyring"
|
ceph_cinder_backup_keyring: "ceph.client.cinder-backup.keyring"
|
||||||
ceph_glance_keyring: "ceph.client.glance.keyring"
|
ceph_glance_keyring: "ceph.client.glance.keyring"
|
||||||
ceph_gnocchi_keyring: "ceph.client.gnocchi.keyring"
|
ceph_gnocchi_keyring: "ceph.client.gnocchi.keyring"
|
||||||
ceph_manila_keyring: "ceph.client.manila.keyring"
|
ceph_manila_keyring: "ceph.client.manila.keyring"
|
||||||
ceph_nova_keyring: "{% if enable_ceph | bool %}ceph.client.nova.keyring{% else %}{{ ceph_cinder_keyring }}{% endif %}"
|
ceph_nova_keyring: "{{ ceph_cinder_keyring }}"
|
||||||
|
|
||||||
#####################
|
#####################
|
||||||
# VMware support
|
# VMware support
|
||||||
@ -1088,7 +1022,7 @@ enable_prometheus_node_exporter: "{{ enable_prometheus | bool }}"
|
|||||||
enable_prometheus_memcached_exporter: "{{ enable_memcached | bool }}"
|
enable_prometheus_memcached_exporter: "{{ enable_memcached | bool }}"
|
||||||
enable_prometheus_cadvisor: "{{ enable_prometheus | bool }}"
|
enable_prometheus_cadvisor: "{{ enable_prometheus | bool }}"
|
||||||
enable_prometheus_alertmanager: "{{ enable_prometheus | bool }}"
|
enable_prometheus_alertmanager: "{{ enable_prometheus | bool }}"
|
||||||
enable_prometheus_ceph_mgr_exporter: "{{ enable_ceph | bool and enable_prometheus | bool }}"
|
enable_prometheus_ceph_mgr_exporter: "no"
|
||||||
enable_prometheus_openstack_exporter: "{{ enable_prometheus | bool }}"
|
enable_prometheus_openstack_exporter: "{{ enable_prometheus | bool }}"
|
||||||
enable_prometheus_elasticsearch_exporter: "{{ enable_prometheus | bool and enable_elasticsearch | bool }}"
|
enable_prometheus_elasticsearch_exporter: "{{ enable_prometheus | bool and enable_elasticsearch | bool }}"
|
||||||
enable_prometheus_blackbox_exporter: "{{ enable_prometheus | bool }}"
|
enable_prometheus_blackbox_exporter: "{{ enable_prometheus | bool }}"
|
||||||
@ -1097,6 +1031,7 @@ prometheus_alertmanager_user: "admin"
|
|||||||
prometheus_openstack_exporter_interval: "60s"
|
prometheus_openstack_exporter_interval: "60s"
|
||||||
prometheus_elasticsearch_exporter_interval: "60s"
|
prometheus_elasticsearch_exporter_interval: "60s"
|
||||||
prometheus_cmdline_extras:
|
prometheus_cmdline_extras:
|
||||||
|
prometheus_ceph_mgr_exporter_endpoints: []
|
||||||
|
|
||||||
############
|
############
|
||||||
# Vitrage
|
# Vitrage
|
||||||
|
@ -145,9 +145,6 @@ control
|
|||||||
[murano:children]
|
[murano:children]
|
||||||
control
|
control
|
||||||
|
|
||||||
[ceph:children]
|
|
||||||
control
|
|
||||||
|
|
||||||
[ironic:children]
|
[ironic:children]
|
||||||
control
|
control
|
||||||
|
|
||||||
@ -314,25 +311,6 @@ neutron
|
|||||||
[ironic-neutron-agent:children]
|
[ironic-neutron-agent:children]
|
||||||
neutron
|
neutron
|
||||||
|
|
||||||
# Ceph
|
|
||||||
[ceph-mds:children]
|
|
||||||
ceph
|
|
||||||
|
|
||||||
[ceph-mgr:children]
|
|
||||||
ceph
|
|
||||||
|
|
||||||
[ceph-nfs:children]
|
|
||||||
ceph
|
|
||||||
|
|
||||||
[ceph-mon:children]
|
|
||||||
ceph
|
|
||||||
|
|
||||||
[ceph-rgw:children]
|
|
||||||
ceph
|
|
||||||
|
|
||||||
[ceph-osd:children]
|
|
||||||
storage
|
|
||||||
|
|
||||||
# Cinder
|
# Cinder
|
||||||
[cinder-api:children]
|
[cinder-api:children]
|
||||||
cinder
|
cinder
|
||||||
|
@ -176,9 +176,6 @@ control
|
|||||||
[ironic:children]
|
[ironic:children]
|
||||||
control
|
control
|
||||||
|
|
||||||
[ceph:children]
|
|
||||||
control
|
|
||||||
|
|
||||||
[magnum:children]
|
[magnum:children]
|
||||||
control
|
control
|
||||||
|
|
||||||
@ -333,25 +330,6 @@ neutron
|
|||||||
[ironic-neutron-agent:children]
|
[ironic-neutron-agent:children]
|
||||||
neutron
|
neutron
|
||||||
|
|
||||||
# Ceph
|
|
||||||
[ceph-mds:children]
|
|
||||||
ceph
|
|
||||||
|
|
||||||
[ceph-mgr:children]
|
|
||||||
ceph
|
|
||||||
|
|
||||||
[ceph-nfs:children]
|
|
||||||
ceph
|
|
||||||
|
|
||||||
[ceph-mon:children]
|
|
||||||
ceph
|
|
||||||
|
|
||||||
[ceph-rgw:children]
|
|
||||||
ceph
|
|
||||||
|
|
||||||
[ceph-osd:children]
|
|
||||||
storage
|
|
||||||
|
|
||||||
# Cinder
|
# Cinder
|
||||||
[cinder-api:children]
|
[cinder-api:children]
|
||||||
cinder
|
cinder
|
||||||
|
@ -1,160 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
|
|
||||||
# Copyright 2018 99cloud
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import json
|
|
||||||
import re
|
|
||||||
import subprocess # nosec
|
|
||||||
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
---
|
|
||||||
module: kolla_ceph_keyring
|
|
||||||
short_description: >
|
|
||||||
Module for update ceph client keyring caps in kolla.
|
|
||||||
description:
|
|
||||||
- A module used to update ceph client keyring caps in kolla.
|
|
||||||
options:
|
|
||||||
name:
|
|
||||||
description:
|
|
||||||
- the client name in ceph
|
|
||||||
required: True
|
|
||||||
type: str
|
|
||||||
container_name:
|
|
||||||
description:
|
|
||||||
- the ceph mon container name
|
|
||||||
required: True
|
|
||||||
default: ceph_mon
|
|
||||||
type: str
|
|
||||||
caps:
|
|
||||||
description:
|
|
||||||
- the ceph auth caps
|
|
||||||
required: True
|
|
||||||
type: dict
|
|
||||||
author: Jeffrey Zhang
|
|
||||||
'''
|
|
||||||
|
|
||||||
EXAMPLES = '''
|
|
||||||
- name: configure admin client caps
|
|
||||||
kolla_ceph_keyring:
|
|
||||||
name: client.admin
|
|
||||||
container_name: ceph_mon
|
|
||||||
caps:
|
|
||||||
mds: 'allow *'
|
|
||||||
mon: 'allow *'
|
|
||||||
osd: 'allow *'
|
|
||||||
mgr: 'allow *'
|
|
||||||
'''
|
|
||||||
|
|
||||||
|
|
||||||
enoent_re = re.compile(r"\bENOENT\b")
|
|
||||||
|
|
||||||
|
|
||||||
class CephKeyring(object):
|
|
||||||
def __init__(self, name, caps, container_name='ceph_mon'):
|
|
||||||
self.name = name
|
|
||||||
self.caps = caps
|
|
||||||
self.container_name = container_name
|
|
||||||
self.changed = False
|
|
||||||
self.message = None
|
|
||||||
|
|
||||||
def _run(self, cmd):
|
|
||||||
_prefix = ['docker', 'exec', self.container_name]
|
|
||||||
cmd = _prefix + cmd
|
|
||||||
proc = subprocess.Popen(cmd, # nosec
|
|
||||||
stdout=subprocess.PIPE,
|
|
||||||
stderr=subprocess.PIPE)
|
|
||||||
stdout, stderr = proc.communicate()
|
|
||||||
retcode = proc.poll()
|
|
||||||
if retcode != 0:
|
|
||||||
output = 'stdout: "%s", stderr: "%s"' % (stdout, stderr)
|
|
||||||
raise subprocess.CalledProcessError(retcode, cmd, output)
|
|
||||||
return stdout
|
|
||||||
|
|
||||||
def _format_caps(self):
|
|
||||||
caps = []
|
|
||||||
for obj in sorted(self.caps):
|
|
||||||
caps.extend([obj, self.caps[obj]])
|
|
||||||
return caps
|
|
||||||
|
|
||||||
def parse_stdout(self, stdout):
|
|
||||||
keyring = json.loads(stdout)
|
|
||||||
# there should be only one element
|
|
||||||
return keyring[0]
|
|
||||||
|
|
||||||
def ensure_keyring(self):
|
|
||||||
try:
|
|
||||||
stdout = self.get_keyring()
|
|
||||||
except subprocess.CalledProcessError as e:
|
|
||||||
if e.returncode != 2 or not enoent_re.search(e.output):
|
|
||||||
# this is not a missing keyring case
|
|
||||||
raise
|
|
||||||
# keyring doesn't exsit, try to create it
|
|
||||||
stdout = self.create_keyring()
|
|
||||||
self.changed = True
|
|
||||||
self.message = 'ceph keyring for %s is created' % self.name
|
|
||||||
keyring = self.parse_stdout(stdout)
|
|
||||||
if keyring['caps'] != self.caps:
|
|
||||||
self.update_caps()
|
|
||||||
stdout = self.get_keyring()
|
|
||||||
keyring = self.parse_stdout(stdout)
|
|
||||||
self.changed = True
|
|
||||||
self.message = 'ceph keyring for %s is updated' % self.name
|
|
||||||
self.keyring = keyring
|
|
||||||
return self.keyring
|
|
||||||
|
|
||||||
def get_keyring(self):
|
|
||||||
ceph_cmd = ['ceph', '--format', 'json', 'auth', 'get', self.name]
|
|
||||||
return self._run(ceph_cmd)
|
|
||||||
|
|
||||||
def update_caps(self):
|
|
||||||
ceph_cmd = ['ceph', '--format', 'json', 'auth', 'caps', self.name]
|
|
||||||
caps = self._format_caps()
|
|
||||||
ceph_cmd.extend(caps)
|
|
||||||
self._run(ceph_cmd)
|
|
||||||
|
|
||||||
def create_keyring(self):
|
|
||||||
ceph_cmd = ['ceph', '--format', 'json', 'auth',
|
|
||||||
'get-or-create', self.name]
|
|
||||||
caps = self._format_caps()
|
|
||||||
ceph_cmd.extend(caps)
|
|
||||||
return self._run(ceph_cmd)
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
specs = dict(
|
|
||||||
name=dict(type='str', required=True),
|
|
||||||
container_name=dict(type='str', default='ceph_mon'),
|
|
||||||
caps=dict(type='dict', required=True)
|
|
||||||
)
|
|
||||||
module = AnsibleModule(argument_spec=specs) # noqa
|
|
||||||
params = module.params
|
|
||||||
ceph_keyring = CephKeyring(params['name'],
|
|
||||||
params['caps'],
|
|
||||||
params['container_name'])
|
|
||||||
try:
|
|
||||||
keyring = ceph_keyring.ensure_keyring()
|
|
||||||
module.exit_json(changed=ceph_keyring.changed,
|
|
||||||
keyring=keyring,
|
|
||||||
message=ceph_keyring.message)
|
|
||||||
except subprocess.CalledProcessError as ex:
|
|
||||||
msg = ('Failed to call command: %s returncode: %s output: %s' %
|
|
||||||
(ex.cmd, ex.returncode, ex.output))
|
|
||||||
module.fail_json(msg=msg)
|
|
||||||
|
|
||||||
|
|
||||||
from ansible.module_utils.basic import * # noqa
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
@ -53,9 +53,6 @@ easy_install_available: >-
|
|||||||
host_python_version: "{{ ansible_python.version.major }}.{{ ansible_python.version.minor }}"
|
host_python_version: "{{ ansible_python.version.major }}.{{ ansible_python.version.minor }}"
|
||||||
host_python_major_version: "{{ ansible_python.version.major }}"
|
host_python_major_version: "{{ ansible_python.version.major }}"
|
||||||
|
|
||||||
# Ubuntu 18+ bundles nfs-ganesha 2.6.0 with Ceph Mimic packages,
|
|
||||||
# which does udp rpcbind test even with NFSv3 disabled - therefore
|
|
||||||
# rpcbind needs to be installed, when Ceph NFS is enabled.
|
|
||||||
debian_pkg_install:
|
debian_pkg_install:
|
||||||
- "{{ docker_apt_package }}"
|
- "{{ docker_apt_package }}"
|
||||||
- git
|
- git
|
||||||
@ -63,7 +60,6 @@ debian_pkg_install:
|
|||||||
- "{% if not easy_install_available %}python{% if host_python_major_version == '3' %}3{% endif %}-pip{% endif %}"
|
- "{% if not easy_install_available %}python{% if host_python_major_version == '3' %}3{% endif %}-pip{% endif %}"
|
||||||
- "{% if virtualenv is not none %}python{% if host_python_major_version == '3' %}3{% endif %}-virtualenv{% endif %}"
|
- "{% if virtualenv is not none %}python{% if host_python_major_version == '3' %}3{% endif %}-virtualenv{% endif %}"
|
||||||
- "{% if enable_host_ntp | bool %}ntp{% endif %}"
|
- "{% if enable_host_ntp | bool %}ntp{% endif %}"
|
||||||
- "{% if enable_ceph_nfs|bool %}rpcbind{% endif %}"
|
|
||||||
|
|
||||||
redhat_pkg_install:
|
redhat_pkg_install:
|
||||||
- "{{ docker_yum_package }}"
|
- "{{ docker_yum_package }}"
|
||||||
|
@ -1,149 +0,0 @@
|
|||||||
---
|
|
||||||
project_name: "ceph"
|
|
||||||
|
|
||||||
ceph_services:
|
|
||||||
ceph-rgw:
|
|
||||||
group: ceph-rgw
|
|
||||||
enabled: "{{ enable_ceph_rgw|bool }}"
|
|
||||||
haproxy:
|
|
||||||
radosgw:
|
|
||||||
enabled: "{{ enable_ceph|bool and enable_ceph_rgw|bool }}"
|
|
||||||
mode: "http"
|
|
||||||
external: false
|
|
||||||
port: "{{ rgw_port }}"
|
|
||||||
radosgw_external:
|
|
||||||
enabled: "{{ enable_ceph|bool and enable_ceph_rgw|bool }}"
|
|
||||||
mode: "http"
|
|
||||||
external: true
|
|
||||||
port: "{{ rgw_port }}"
|
|
||||||
|
|
||||||
|
|
||||||
####################
|
|
||||||
# Docker
|
|
||||||
####################
|
|
||||||
ceph_install_type: "{{ kolla_install_type }}"
|
|
||||||
ceph_tag: "{{ openstack_tag }}"
|
|
||||||
|
|
||||||
ceph_mds_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ ceph_install_type }}-ceph-mds"
|
|
||||||
ceph_mds_tag: "{{ ceph_tag }}"
|
|
||||||
ceph_mds_image_full: "{{ ceph_mds_image }}:{{ ceph_mds_tag }}"
|
|
||||||
|
|
||||||
ceph_mon_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ ceph_install_type }}-ceph-mon"
|
|
||||||
ceph_mon_tag: "{{ ceph_tag }}"
|
|
||||||
ceph_mon_image_full: "{{ ceph_mon_image }}:{{ ceph_mon_tag }}"
|
|
||||||
|
|
||||||
ceph_mgr_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ ceph_install_type }}-ceph-mgr"
|
|
||||||
ceph_mgr_tag: "{{ ceph_tag }}"
|
|
||||||
ceph_mgr_image_full: "{{ ceph_mgr_image }}:{{ ceph_mgr_tag }}"
|
|
||||||
|
|
||||||
ceph_nfs_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ ceph_install_type }}-ceph-nfs"
|
|
||||||
ceph_nfs_tag: "{{ ceph_tag }}"
|
|
||||||
ceph_nfs_image_full: "{{ ceph_nfs_image }}:{{ ceph_nfs_tag }}"
|
|
||||||
|
|
||||||
ceph_osd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ ceph_install_type }}-ceph-osd"
|
|
||||||
ceph_osd_tag: "{{ ceph_tag }}"
|
|
||||||
ceph_osd_image_full: "{{ ceph_osd_image }}:{{ ceph_osd_tag }}"
|
|
||||||
|
|
||||||
ceph_rgw_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ ceph_install_type }}-ceph-rgw"
|
|
||||||
ceph_rgw_tag: "{{ ceph_tag }}"
|
|
||||||
ceph_rgw_image_full: "{{ ceph_rgw_image }}:{{ ceph_rgw_tag }}"
|
|
||||||
|
|
||||||
####################
|
|
||||||
# Ceph
|
|
||||||
####################
|
|
||||||
osd_initial_weight: "1"
|
|
||||||
ceph_debug: "{{ openstack_logging_debug }}"
|
|
||||||
|
|
||||||
# Increase tcmalloc cache size
|
|
||||||
ceph_tcmalloc_tc_bytes: "134217728"
|
|
||||||
|
|
||||||
ceph_client_admin_keyring_caps:
|
|
||||||
mds: "allow *"
|
|
||||||
mon: "allow *"
|
|
||||||
osd: "allow *"
|
|
||||||
mgr: "allow *"
|
|
||||||
|
|
||||||
ceph_client_mgr_keyring_caps:
|
|
||||||
mon: 'allow profile mgr'
|
|
||||||
osd: 'allow *'
|
|
||||||
mds: 'allow *'
|
|
||||||
|
|
||||||
ceph_client_mds_keyring_caps:
|
|
||||||
mds: 'allow *'
|
|
||||||
osd: 'allow *'
|
|
||||||
mon: 'allow rwx'
|
|
||||||
|
|
||||||
partition_name_osd_bootstrap: "{{ 'KOLLA_CEPH_OSD_BOOTSTRAP_BS' if ceph_osd_store_type == 'bluestore' else 'KOLLA_CEPH_OSD_BOOTSTRAP' }}"
|
|
||||||
partition_name_cache_bootstrap: "{{ 'KOLLA_CEPH_OSD_CACHE_BOOTSTRAP_BS' if ceph_osd_store_type == 'bluestore' else 'KOLLA_CEPH_OSD_CACHE_BOOTSTRAP' }}"
|
|
||||||
partition_name_osd_data: "{{ 'KOLLA_CEPH_DATA_BS' if ceph_osd_store_type == 'bluestore' else 'KOLLA_CEPH_DATA' }}"
|
|
||||||
|
|
||||||
ceph_mon_hostname: "{%- if ceph_mon_host_type == 'HOSTNAME' -%}{{ ansible_hostname }}
|
|
||||||
{%- elif ceph_mon_host_type == 'FQDN' -%}{{ ansible_fqdn }}
|
|
||||||
{%- elif ceph_mon_host_type == 'INVENTORY' -%}{{ inventory_hostname }}
|
|
||||||
{%- else -%}{{ storage_interface_address }}
|
|
||||||
{%- endif %}"
|
|
||||||
ceph_mgr_hostname: "{%- if ceph_mgr_host_type == 'HOSTNAME' -%}{{ ansible_hostname }}
|
|
||||||
{%- elif ceph_mgr_host_type == 'FQDN' -%}{{ ansible_fqdn }}
|
|
||||||
{%- elif ceph_mgr_host_type == 'INVENTORY' -%}{{ inventory_hostname }}
|
|
||||||
{%- else -%}{{ storage_interface_address }}
|
|
||||||
{%- endif %}"
|
|
||||||
ceph_osd_hostname: "{%- if ceph_osd_host_type == 'HOSTNAME' -%}{{ ansible_hostname }}
|
|
||||||
{%- elif ceph_osd_host_type == 'FQDN' -%}{{ ansible_fqdn }}
|
|
||||||
{%- elif ceph_osd_host_type == 'INVENTORY' -%}{{ inventory_hostname }}
|
|
||||||
{%- else -%}{{ storage_interface_address }}
|
|
||||||
{%- endif %}"
|
|
||||||
ceph_mds_hostname: "{%- if ceph_mds_host_type == 'HOSTNAME' -%}{{ ansible_hostname }}
|
|
||||||
{%- elif ceph_mds_host_type == 'FQDN' -%}{{ ansible_fqdn }}
|
|
||||||
{%- elif ceph_mds_host_type == 'INVENTORY' -%}{{ inventory_hostname }}
|
|
||||||
{%- else -%}{{ storage_interface_address }}
|
|
||||||
{%- endif %}"
|
|
||||||
|
|
||||||
####################
|
|
||||||
## Ceph_rgw_keystone
|
|
||||||
####################
|
|
||||||
swift_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ rgw_port }}{{ '/' if ceph_rgw_compatibility|bool else '/swift/' }}v1"
|
|
||||||
swift_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ rgw_port }}{{ '/' if ceph_rgw_compatibility|bool else '/swift/' }}v1"
|
|
||||||
swift_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ rgw_port }}{{ '/' if ceph_rgw_compatibility|bool else '/swift/' }}v1"
|
|
||||||
|
|
||||||
ceph_rgw_keystone_user: "ceph_rgw"
|
|
||||||
|
|
||||||
openstack_swift_auth: "{{ openstack_auth }}"
|
|
||||||
openstack_ceph_rgw_auth: "{{ openstack_auth }}"
|
|
||||||
|
|
||||||
##########
|
|
||||||
# Ceph MDS
|
|
||||||
##########
|
|
||||||
cephfs_data_pool_name: "cephfs_data"
|
|
||||||
cephfs_data_pool_type: "{{ ceph_pool_type }}"
|
|
||||||
cephfs_data_pool_cache_mode: "{{ ceph_cache_mode }}"
|
|
||||||
cephfs_data_pool_pg_num: "{{ ceph_pool_pg_num }}"
|
|
||||||
cephfs_data_pool_pgp_num: "{{ ceph_pool_pgp_num }}"
|
|
||||||
cephfs_metadata_pool_name: "cephfs_metadata"
|
|
||||||
cephfs_metadata_pool_type: "{{ ceph_pool_type }}"
|
|
||||||
cephfs_metadata_pool_cache_mode: "{{ ceph_cache_mode }}"
|
|
||||||
cephfs_metadata_pool_pg_num: "{{ ceph_pool_pg_num }}"
|
|
||||||
cephfs_metadata_pool_pgp_num: "{{ ceph_pool_pgp_num }}"
|
|
||||||
|
|
||||||
####################
|
|
||||||
# Kolla
|
|
||||||
####################
|
|
||||||
kolla_ceph_use_udev: True
|
|
||||||
|
|
||||||
|
|
||||||
####################
|
|
||||||
# Keystone
|
|
||||||
####################
|
|
||||||
ceph_rgw_ks_services:
|
|
||||||
- name: "swift"
|
|
||||||
type: "object-store"
|
|
||||||
description: "Openstack Object Storage"
|
|
||||||
endpoints:
|
|
||||||
- {'interface': 'admin', 'url': '{{ swift_admin_endpoint }}'}
|
|
||||||
- {'interface': 'internal', 'url': '{{ swift_internal_endpoint }}'}
|
|
||||||
- {'interface': 'public', 'url': '{{ swift_public_endpoint }}'}
|
|
||||||
|
|
||||||
ceph_rgw_ks_users:
|
|
||||||
- project: "service"
|
|
||||||
user: "{{ ceph_rgw_keystone_user }}"
|
|
||||||
password: "{{ ceph_rgw_keystone_password }}"
|
|
||||||
role: "admin"
|
|
@ -1,3 +0,0 @@
|
|||||||
---
|
|
||||||
dependencies:
|
|
||||||
- { role: common }
|
|
@ -1,53 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Cleaning up temp file on localhost
|
|
||||||
file:
|
|
||||||
path: /tmp/kolla_ceph_cluster
|
|
||||||
state: absent
|
|
||||||
delegate_to: localhost
|
|
||||||
changed_when: False
|
|
||||||
check_mode: no
|
|
||||||
run_once: True
|
|
||||||
|
|
||||||
- name: Creating temp file on localhost
|
|
||||||
copy:
|
|
||||||
content: None
|
|
||||||
dest: /tmp/kolla_ceph_cluster
|
|
||||||
mode: 0644
|
|
||||||
delegate_to: localhost
|
|
||||||
changed_when: False
|
|
||||||
check_mode: no
|
|
||||||
run_once: True
|
|
||||||
|
|
||||||
- name: Creating ceph_mon_config volume
|
|
||||||
become: true
|
|
||||||
kolla_docker:
|
|
||||||
action: "create_volume"
|
|
||||||
common_options: "{{ docker_common_options }}"
|
|
||||||
name: "ceph_mon_config"
|
|
||||||
register: ceph_mon_config_volume
|
|
||||||
|
|
||||||
- name: Writing hostname of host with existing cluster files to temp file
|
|
||||||
copy:
|
|
||||||
content: "{{ inventory_hostname }}"
|
|
||||||
dest: /tmp/kolla_ceph_cluster
|
|
||||||
mode: 0644
|
|
||||||
delegate_to: localhost
|
|
||||||
changed_when: False
|
|
||||||
check_mode: no
|
|
||||||
when: not ceph_mon_config_volume.changed
|
|
||||||
|
|
||||||
- name: Registering host from temp file
|
|
||||||
set_fact:
|
|
||||||
delegate_host: "{{ lookup('file', '/tmp/kolla_ceph_cluster') }}"
|
|
||||||
|
|
||||||
- name: Cleaning up temp file on localhost
|
|
||||||
file:
|
|
||||||
path: /tmp/kolla_ceph_cluster
|
|
||||||
state: absent
|
|
||||||
delegate_to: localhost
|
|
||||||
changed_when: False
|
|
||||||
check_mode: no
|
|
||||||
run_once: True
|
|
||||||
|
|
||||||
- include_tasks: generate_cluster.yml
|
|
||||||
when: delegate_host == 'None' and inventory_hostname == groups['ceph-mon'][0]
|
|
@ -1,150 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Looking up disks to bootstrap for Ceph OSDs
|
|
||||||
become: true
|
|
||||||
command: docker exec -t kolla_toolbox sudo -E ansible localhost
|
|
||||||
-m find_disks
|
|
||||||
-a "partition_name={{ partition_name_osd_bootstrap }} match_mode='prefix' use_udev={{ kolla_ceph_use_udev }}"
|
|
||||||
register: osd_lookup
|
|
||||||
changed_when: osd_lookup.stdout.find('localhost | SUCCESS => ') != -1 and (osd_lookup.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed
|
|
||||||
failed_when: osd_lookup.stdout.split()[2] != 'SUCCESS'
|
|
||||||
|
|
||||||
- name: Parsing disk info for Ceph OSDs
|
|
||||||
set_fact:
|
|
||||||
osds_bootstrap: "{{ (osd_lookup.stdout.split('localhost | SUCCESS => ')[1]|from_json).disks|from_json }}"
|
|
||||||
|
|
||||||
- name: Looking up disks to bootstrap for Ceph Cache OSDs
|
|
||||||
become: true
|
|
||||||
command: docker exec -t kolla_toolbox sudo -E ansible localhost
|
|
||||||
-m find_disks
|
|
||||||
-a "partition_name={{ partition_name_cache_bootstrap }} match_mode='prefix' use_udev={{ kolla_ceph_use_udev }}"
|
|
||||||
register: osd_cache_lookup
|
|
||||||
changed_when: osd_cache_lookup.stdout.find('localhost | SUCCESS => ') != -1 and (osd_cache_lookup.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed
|
|
||||||
failed_when: osd_cache_lookup.stdout.split()[2] != 'SUCCESS'
|
|
||||||
|
|
||||||
- name: Parsing disk info for Ceph Cache OSDs
|
|
||||||
set_fact:
|
|
||||||
osds_cache_bootstrap: "{{ (osd_cache_lookup.stdout.split('localhost | SUCCESS => ')[1]|from_json).disks|from_json }}"
|
|
||||||
|
|
||||||
- name: Prompting before wiping existing partitions
|
|
||||||
pause:
|
|
||||||
prompt: |
|
|
||||||
WARNING: It seems {{ item.device }} is marked to be wiped and partitioned for Ceph data and
|
|
||||||
a co-located journal, but appears to contain other existing partitions (>1).
|
|
||||||
|
|
||||||
If you are sure you want this disk to be *wiped* for use with Ceph, press enter.
|
|
||||||
|
|
||||||
Otherwise, press Ctrl-C, then 'A'. (You can disable this check by setting
|
|
||||||
ceph_osd_wipe_disk: 'yes-i-really-really-mean-it' within globals.yml)
|
|
||||||
with_items: "{{ osds_bootstrap|default([]) }}"
|
|
||||||
when:
|
|
||||||
- not item.external_journal | bool
|
|
||||||
- item.device.split('/')[2] in ansible_devices # if there is no device in setup (like loopback, we don't need to warn user
|
|
||||||
- ansible_devices[item.device.split('/')[2]].partitions|count > 1
|
|
||||||
- ceph_osd_wipe_disk != "yes-i-really-really-mean-it"
|
|
||||||
|
|
||||||
- name: Bootstrapping Ceph OSDs
|
|
||||||
become: true
|
|
||||||
kolla_docker:
|
|
||||||
action: "start_container"
|
|
||||||
common_options: "{{ docker_common_options }}"
|
|
||||||
detach: False
|
|
||||||
environment:
|
|
||||||
KOLLA_BOOTSTRAP:
|
|
||||||
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
|
|
||||||
OSD_DEV: "{{ item.1.device }}"
|
|
||||||
OSD_PARTITION: "{{ item.1.partition }}"
|
|
||||||
OSD_PARTITION_NUM: "{{ item.1.partition_num }}"
|
|
||||||
JOURNAL_DEV: "{{ item.1.journal_device }}"
|
|
||||||
JOURNAL_PARTITION: "{{ item.1.journal }}"
|
|
||||||
JOURNAL_PARTITION_NUM: "{{ item.1.journal_num }}"
|
|
||||||
USE_EXTERNAL_JOURNAL: "{{ item.1.external_journal | bool }}"
|
|
||||||
OSD_FILESYSTEM: "{{ ceph_osd_filesystem }}"
|
|
||||||
OSD_INITIAL_WEIGHT: "{{ osd_initial_weight }}"
|
|
||||||
HOSTNAME: "{{ ceph_osd_hostname }}"
|
|
||||||
OSD_STORETYPE: "{{ ceph_osd_store_type }}"
|
|
||||||
OSD_BS_DEV: "{{ item.1.device | default('') }}"
|
|
||||||
OSD_BS_LABEL: "{{ item.1.partition_label | default('') }}"
|
|
||||||
OSD_BS_PARTNUM: "{{ item.1.partition_num | default('') }}"
|
|
||||||
OSD_BS_BLK_DEV: "{{ item.1.bs_blk_device | default('') }}"
|
|
||||||
OSD_BS_BLK_LABEL: "{{ item.1.bs_blk_label | default('') }}"
|
|
||||||
OSD_BS_BLK_PARTNUM: "{{ item.1.bs_blk_partition_num | default('') }}"
|
|
||||||
OSD_BS_WAL_DEV: "{{ item.1.bs_wal_device | default('') }}"
|
|
||||||
OSD_BS_WAL_LABEL: "{{ item.1.bs_wal_label | default('') }}"
|
|
||||||
OSD_BS_WAL_PARTNUM: "{{ item.1.bs_wal_partition_num | default('') }}"
|
|
||||||
OSD_BS_DB_DEV: "{{ item.1.bs_db_device | default('') }}"
|
|
||||||
OSD_BS_DB_LABEL: "{{ item.1.bs_db_label | default('') }}"
|
|
||||||
OSD_BS_DB_PARTNUM: "{{ item.1.bs_db_partition_num | default('') }}"
|
|
||||||
image: "{{ ceph_osd_image_full }}"
|
|
||||||
labels:
|
|
||||||
BOOTSTRAP:
|
|
||||||
name: "bootstrap_osd_{{ item.0 }}"
|
|
||||||
privileged: True
|
|
||||||
restart_policy: no
|
|
||||||
volumes:
|
|
||||||
- "{{ node_config_directory }}/ceph-osd/:{{ container_config_directory }}/:ro"
|
|
||||||
- "/etc/localtime:/etc/localtime:ro"
|
|
||||||
- "/dev/:/dev/"
|
|
||||||
- "kolla_logs:/var/log/kolla/"
|
|
||||||
with_indexed_items: "{{ osds_bootstrap|default([]) }}"
|
|
||||||
|
|
||||||
- name: Prompting before wiping existing partitions
|
|
||||||
pause:
|
|
||||||
prompt: |
|
|
||||||
WARNING: It seems {{ item.device }} is marked to be wiped and partitioned for Ceph data and
|
|
||||||
a co-located journal, but appears to contain other existing partitions (>1).
|
|
||||||
|
|
||||||
If you are sure you want this disk to be *wiped* for use with Ceph, press enter.
|
|
||||||
|
|
||||||
Otherwise, press Ctrl-C, then 'A'. (You can disable this check by setting
|
|
||||||
ceph_osd_wipe_disk: 'yes-i-really-really-mean-it' within globals.yml)
|
|
||||||
with_items: "{{ osds_cache_bootstrap|default([]) }}"
|
|
||||||
when:
|
|
||||||
- not item.external_journal | bool
|
|
||||||
- ansible_devices[item.device.split('/')[2]].partitions|count > 1
|
|
||||||
- ceph_osd_wipe_disk != "yes-i-really-really-mean-it"
|
|
||||||
|
|
||||||
- name: Bootstrapping Ceph Cache OSDs
|
|
||||||
become: true
|
|
||||||
kolla_docker:
|
|
||||||
action: "start_container"
|
|
||||||
common_options: "{{ docker_common_options }}"
|
|
||||||
detach: False
|
|
||||||
environment:
|
|
||||||
KOLLA_BOOTSTRAP:
|
|
||||||
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
|
|
||||||
CEPH_CACHE:
|
|
||||||
OSD_DEV: "{{ item.1.device }}"
|
|
||||||
OSD_PARTITION: "{{ item.1.partition }}"
|
|
||||||
OSD_PARTITION_NUM: "{{ item.1.partition_num }}"
|
|
||||||
JOURNAL_DEV: "{{ item.1.journal_device }}"
|
|
||||||
JOURNAL_PARTITION: "{{ item.1.journal }}"
|
|
||||||
JOURNAL_PARTITION_NUM: "{{ item.1.journal_num }}"
|
|
||||||
USE_EXTERNAL_JOURNAL: "{{ item.1.external_journal | bool }}"
|
|
||||||
OSD_FILESYSTEM: "{{ ceph_osd_filesystem }}"
|
|
||||||
OSD_INITIAL_WEIGHT: "{{ osd_initial_weight }}"
|
|
||||||
HOSTNAME: "{{ ceph_osd_hostname }}"
|
|
||||||
OSD_STORETYPE: "{{ ceph_osd_store_type }}"
|
|
||||||
OSD_BS_DEV: "{{ item.1.device | default('') }}"
|
|
||||||
OSD_BS_LABEL: "{{ item.1.partition_label | default('') }}"
|
|
||||||
OSD_BS_PARTNUM: "{{ item.1.partition_num | default('') }}"
|
|
||||||
OSD_BS_BLK_DEV: "{{ item.1.bs_blk_device | default('') }}"
|
|
||||||
OSD_BS_BLK_LABEL: "{{ item.1.bs_blk_label | default('') }}"
|
|
||||||
OSD_BS_BLK_PARTNUM: "{{ item.1.bs_blk_partition_num | default('') }}"
|
|
||||||
OSD_BS_WAL_DEV: "{{ item.1.bs_wal_device | default('') }}"
|
|
||||||
OSD_BS_WAL_LABEL: "{{ item.1.bs_wal_label|default('') }}"
|
|
||||||
OSD_BS_WAL_PARTNUM: "{{ item.1.bs_wal_partition_num | default('') }}"
|
|
||||||
OSD_BS_DB_DEV: "{{ item.1.bs_db_device | default('') }}"
|
|
||||||
OSD_BS_DB_LABEL: "{{ item.1.bs_db_label | default('') }}"
|
|
||||||
OSD_BS_DB_PARTNUM: "{{ item.1.bs_db_partition_num | default('') }}"
|
|
||||||
image: "{{ ceph_osd_image_full }}"
|
|
||||||
labels:
|
|
||||||
BOOTSTRAP:
|
|
||||||
name: "bootstrap_osd_cache_{{ item.0 }}"
|
|
||||||
privileged: True
|
|
||||||
restart_policy: no
|
|
||||||
volumes:
|
|
||||||
- "{{ node_config_directory }}/ceph-osd/:{{ container_config_directory }}/:ro"
|
|
||||||
- "/etc/localtime:/etc/localtime:ro"
|
|
||||||
- "/dev/:/dev/"
|
|
||||||
- "kolla_logs:/var/log/kolla/"
|
|
||||||
with_indexed_items: "{{ osds_cache_bootstrap|default([]) }}"
|
|
@ -1 +0,0 @@
|
|||||||
---
|
|
@ -1,70 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Ensuring config directories exist
|
|
||||||
file:
|
|
||||||
path: "{{ node_config_directory }}/{{ item }}"
|
|
||||||
state: "directory"
|
|
||||||
owner: "{{ config_owner_user }}"
|
|
||||||
group: "{{ config_owner_group }}"
|
|
||||||
mode: "0770"
|
|
||||||
become: true
|
|
||||||
with_items:
|
|
||||||
- "ceph-mon"
|
|
||||||
- "ceph-osd"
|
|
||||||
- "ceph-rgw"
|
|
||||||
- "ceph-mgr"
|
|
||||||
- "ceph-mds"
|
|
||||||
- "ceph-nfs"
|
|
||||||
|
|
||||||
- name: Copying over config.json files for services
|
|
||||||
template:
|
|
||||||
src: "{{ item.name }}.json.j2"
|
|
||||||
dest: "{{ node_config_directory }}/{{ item.name }}/config.json"
|
|
||||||
mode: "0660"
|
|
||||||
become: true
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups[item.group]
|
|
||||||
with_items:
|
|
||||||
- name: "ceph-mon"
|
|
||||||
group: ceph-mon
|
|
||||||
- name: "ceph-osd"
|
|
||||||
group: ceph-osd
|
|
||||||
- name: "ceph-rgw"
|
|
||||||
group: ceph-rgw
|
|
||||||
- name: "ceph-mgr"
|
|
||||||
group: ceph-mgr
|
|
||||||
- name: "ceph-mds"
|
|
||||||
group: ceph-mds
|
|
||||||
- name: "ceph-nfs"
|
|
||||||
group: ceph-nfs
|
|
||||||
|
|
||||||
- name: Copying over ceph.conf
|
|
||||||
vars:
|
|
||||||
service_name: "{{ item }}"
|
|
||||||
merge_configs:
|
|
||||||
sources:
|
|
||||||
- "{{ role_path }}/templates/ceph.conf.j2"
|
|
||||||
- "{{ node_custom_config }}/ceph.conf"
|
|
||||||
- "{{ node_custom_config }}/ceph/{{ inventory_hostname }}/ceph.conf"
|
|
||||||
dest: "{{ node_config_directory }}/{{ item }}/ceph.conf"
|
|
||||||
mode: "0660"
|
|
||||||
become: true
|
|
||||||
with_items:
|
|
||||||
- "ceph-mon"
|
|
||||||
- "ceph-osd"
|
|
||||||
- "ceph-rgw"
|
|
||||||
- "ceph-mgr"
|
|
||||||
- "ceph-mds"
|
|
||||||
- "ceph-nfs"
|
|
||||||
|
|
||||||
- name: Copying over ganesha.conf for ceph-nfs
|
|
||||||
template:
|
|
||||||
src: "{{ item }}"
|
|
||||||
dest: "{{ node_config_directory }}/ceph-nfs/ganesha.conf"
|
|
||||||
mode: "0600"
|
|
||||||
become: true
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups['ceph-nfs']
|
|
||||||
with_first_found:
|
|
||||||
- "{{ node_custom_config }}/ganesha.conf"
|
|
||||||
- "{{ node_custom_config }}/ceph-nfs/ganesha.conf"
|
|
||||||
- "ganesha.conf.j2"
|
|
@ -1,62 +0,0 @@
|
|||||||
---
|
|
||||||
# NOTE(yoctozepto): this file is used during upgrade as well
|
|
||||||
|
|
||||||
- include_tasks: config.yml
|
|
||||||
|
|
||||||
- include_tasks: bootstrap_mons.yml
|
|
||||||
when: inventory_hostname in groups['ceph-mon']
|
|
||||||
|
|
||||||
- include_tasks: distribute_keyrings.yml
|
|
||||||
when: kolla_action != "upgrade"
|
|
||||||
|
|
||||||
- include_tasks: start_mons.yml
|
|
||||||
when: inventory_hostname in groups['ceph-mon']
|
|
||||||
|
|
||||||
# NOTE(yoctozepto): this ensures caps for admin are always up-to-date (run as earliest as possible = after MONs start)
|
|
||||||
# this is retried because the cluster might not be fully operational yet (quorum gathering)
|
|
||||||
- name: configuring client.admin caps
|
|
||||||
become: true
|
|
||||||
kolla_ceph_keyring:
|
|
||||||
name: client.admin
|
|
||||||
caps: "{{ ceph_client_admin_keyring_caps }}"
|
|
||||||
run_once: True
|
|
||||||
delegate_to: "{{ groups['ceph-mon'][0] }}"
|
|
||||||
register: result
|
|
||||||
until: result is success
|
|
||||||
retries: 3
|
|
||||||
delay: 15
|
|
||||||
|
|
||||||
- include_tasks: start_mgrs.yml
|
|
||||||
when: inventory_hostname in groups['ceph-mgr']
|
|
||||||
|
|
||||||
- include_tasks: start_ceph_dashboard.yml
|
|
||||||
when:
|
|
||||||
- enable_ceph_dashboard | bool
|
|
||||||
- inventory_hostname in groups['ceph-mon']
|
|
||||||
|
|
||||||
- include_tasks: bootstrap_osds.yml
|
|
||||||
when: inventory_hostname in groups['ceph-osd']
|
|
||||||
|
|
||||||
- include_tasks: start_osds.yml
|
|
||||||
when: inventory_hostname in groups['ceph-osd']
|
|
||||||
|
|
||||||
- include_tasks: start_rgws.yml
|
|
||||||
when:
|
|
||||||
- enable_ceph_rgw | bool
|
|
||||||
- inventory_hostname in groups['ceph-rgw']
|
|
||||||
|
|
||||||
- include_tasks: start_rgw_keystone.yml
|
|
||||||
when:
|
|
||||||
- enable_ceph_rgw_keystone | bool
|
|
||||||
- inventory_hostname in groups['ceph-rgw']
|
|
||||||
|
|
||||||
- include_tasks: start_mdss.yml
|
|
||||||
when:
|
|
||||||
- enable_ceph_mds | bool
|
|
||||||
- inventory_hostname in groups['ceph-mds']
|
|
||||||
|
|
||||||
# NOTE(yoctozepto): nfs (cephfs-based) depends on mds so start it after
|
|
||||||
- include_tasks: start_nfss.yml
|
|
||||||
when:
|
|
||||||
- enable_ceph_nfs | bool
|
|
||||||
- inventory_hostname in groups['ceph-nfs']
|
|
@ -1,79 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Fetching Ceph keyrings
|
|
||||||
become: true
|
|
||||||
command: docker exec ceph_mon /usr/bin/python{{ distro_python_version }} /usr/bin/fetch_ceph_keys.py
|
|
||||||
register: ceph_files_json
|
|
||||||
changed_when: (ceph_files_json.stdout | from_json).changed
|
|
||||||
failed_when: (ceph_files_json.stdout | from_json).failed
|
|
||||||
delegate_to: "{{ delegate_host }}"
|
|
||||||
run_once: True
|
|
||||||
|
|
||||||
- name: Reading json from variable
|
|
||||||
set_fact:
|
|
||||||
ceph_files: "{{ (ceph_files_json.stdout | from_json) }}"
|
|
||||||
|
|
||||||
- name: Pushing Ceph keyring for OSDs
|
|
||||||
become: true
|
|
||||||
bslurp:
|
|
||||||
src: "{{ item.content }}"
|
|
||||||
dest: "{{ node_config_directory }}/ceph-osd/{{ item.filename }}"
|
|
||||||
sha1: "{{ item.sha1 | default('') }}"
|
|
||||||
sha256: "{{ item.sha256 | default('') }}"
|
|
||||||
mode: 0600
|
|
||||||
with_items:
|
|
||||||
- "{{ ceph_files['ceph.client.admin.keyring'] }}"
|
|
||||||
when: inventory_hostname in groups['ceph-osd']
|
|
||||||
|
|
||||||
- name: Pushing Ceph keyrings for Mons
|
|
||||||
become: true
|
|
||||||
bslurp:
|
|
||||||
src: "{{ item.content }}"
|
|
||||||
dest: "{{ node_config_directory }}/ceph-mon/{{ item.filename }}"
|
|
||||||
sha1: "{{ item.sha1 | default('') }}"
|
|
||||||
sha256: "{{ item.sha256 | default('') }}"
|
|
||||||
mode: 0600
|
|
||||||
with_items:
|
|
||||||
- "{{ ceph_files['ceph.client.admin.keyring'] }}"
|
|
||||||
- "{{ ceph_files['ceph.client.mon.keyring'] }}"
|
|
||||||
- "{{ ceph_files['ceph.client.radosgw.keyring'] }}"
|
|
||||||
- "{{ ceph_files['ceph.monmap'] }}"
|
|
||||||
when: inventory_hostname in groups['ceph-mon']
|
|
||||||
|
|
||||||
- name: Pushing Ceph keyrings for Mgrs
|
|
||||||
become: true
|
|
||||||
bslurp:
|
|
||||||
src: "{{ item.content }}"
|
|
||||||
dest: "{{ node_config_directory }}/ceph-mgr/{{ item.filename }}"
|
|
||||||
sha1: "{{ item.sha1 | default('') }}"
|
|
||||||
sha256: "{{ item.sha256 | default('') }}"
|
|
||||||
mode: 0600
|
|
||||||
with_items:
|
|
||||||
- "{{ ceph_files['ceph.client.admin.keyring'] }}"
|
|
||||||
when: inventory_hostname in groups['ceph-mgr']
|
|
||||||
|
|
||||||
- name: Pushing Ceph keyrings for RGWs
|
|
||||||
become: true
|
|
||||||
bslurp:
|
|
||||||
src: "{{ item.content }}"
|
|
||||||
dest: "{{ node_config_directory }}/ceph-rgw/{{ item.filename }}"
|
|
||||||
sha1: "{{ item.sha1 | default('') }}"
|
|
||||||
sha256: "{{ item.sha256 | default('') }}"
|
|
||||||
mode: 0600
|
|
||||||
with_items:
|
|
||||||
- "{{ ceph_files['ceph.client.admin.keyring'] }}"
|
|
||||||
- "{{ ceph_files['ceph.client.radosgw.keyring'] }}"
|
|
||||||
when: inventory_hostname in groups['ceph-rgw']
|
|
||||||
|
|
||||||
- name: Pushing Ceph keyrings for NFSs
|
|
||||||
become: true
|
|
||||||
bslurp:
|
|
||||||
src: "{{ item.content }}"
|
|
||||||
dest: "{{ node_config_directory }}/ceph-nfs/{{ item.filename }}"
|
|
||||||
sha1: "{{ item.sha1 | default('') }}"
|
|
||||||
sha256: "{{ item.sha256 | default('') }}"
|
|
||||||
mode: 0600
|
|
||||||
with_items:
|
|
||||||
- "{{ ceph_files['ceph.client.admin.keyring'] }}"
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups['ceph-nfs']
|
|
||||||
- enable_ceph_nfs | bool
|
|
@ -1,29 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Generating initial Ceph keyrings and monmap
|
|
||||||
become: true
|
|
||||||
kolla_docker:
|
|
||||||
action: "start_container"
|
|
||||||
common_options: "{{ docker_common_options }}"
|
|
||||||
detach: False
|
|
||||||
environment:
|
|
||||||
KOLLA_BOOTSTRAP:
|
|
||||||
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
|
|
||||||
MON_IP: "{{ storage_interface_address }}"
|
|
||||||
HOSTNAME: "{{ ceph_mon_hostname }}"
|
|
||||||
image: "{{ ceph_mon_image_full }}"
|
|
||||||
labels:
|
|
||||||
BOOTSTRAP:
|
|
||||||
name: "bootstrap_ceph_mon"
|
|
||||||
restart_policy: no
|
|
||||||
volumes:
|
|
||||||
- "{{ node_config_directory }}/ceph-mon/:{{ container_config_directory }}/:ro"
|
|
||||||
- "/etc/localtime:/etc/localtime:ro"
|
|
||||||
- "ceph_mon:/var/lib/ceph"
|
|
||||||
- "ceph_mon_config:/etc/ceph"
|
|
||||||
- "kolla_logs:/var/log/kolla/"
|
|
||||||
|
|
||||||
- include_tasks: start_mons.yml
|
|
||||||
|
|
||||||
- name: Setting host for cluster files
|
|
||||||
set_fact:
|
|
||||||
delegate_host: "{{ inventory_hostname }}"
|
|
@ -1,7 +0,0 @@
|
|||||||
---
|
|
||||||
- name: "Configure haproxy for {{ project_name }}"
|
|
||||||
import_role:
|
|
||||||
role: haproxy-config
|
|
||||||
vars:
|
|
||||||
project_services: "{{ ceph_services }}"
|
|
||||||
tags: always
|
|
@ -1,14 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Warn about deprecation
|
|
||||||
debug:
|
|
||||||
msg: >-
|
|
||||||
Support for deploying Ceph via Kolla Ansible is deprecated. In a future
|
|
||||||
release support for deploying Ceph will be removed from Kolla Ansible. Prior
|
|
||||||
to this we will ensure a migration path to another tool such as Ceph
|
|
||||||
Ansible (http://docs.ceph.com/ceph-ansible/master/>) is available. For new
|
|
||||||
deployments it is recommended to use another tool to deploy Ceph to avoid a
|
|
||||||
future migration. This can be integrated with OpenStack by following the
|
|
||||||
external Ceph guide
|
|
||||||
(https://docs.openstack.org/kolla-ansible/latest/reference/storage/external-ceph-guide.html).
|
|
||||||
|
|
||||||
- include_tasks: "{{ kolla_action }}.yml"
|
|
@ -1,24 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Get container facts
|
|
||||||
become: true
|
|
||||||
kolla_container_facts:
|
|
||||||
name:
|
|
||||||
- ceph_rgw
|
|
||||||
register: container_facts
|
|
||||||
|
|
||||||
- name: Checking free port for RadosGW
|
|
||||||
wait_for:
|
|
||||||
host: "{{ api_interface_address }}"
|
|
||||||
port: "{{ rgw_port }}"
|
|
||||||
connect_timeout: 1
|
|
||||||
timeout: 1
|
|
||||||
state: stopped
|
|
||||||
when:
|
|
||||||
- container_facts['ceph_rgw'] is not defined
|
|
||||||
- inventory_hostname in groups['ceph-rgw']
|
|
||||||
|
|
||||||
- name: Check whether the Swift service is enabled
|
|
||||||
fail:
|
|
||||||
msg: 'Ceph-rgw conflicts with Swift, and so you should only enable one of them'
|
|
||||||
when: enable_swift | bool and enable_ceph_rgw | bool
|
|
||||||
run_once: True
|
|
@ -1,53 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Pulling ceph-mon image
|
|
||||||
become: true
|
|
||||||
kolla_docker:
|
|
||||||
action: "pull_image"
|
|
||||||
common_options: "{{ docker_common_options }}"
|
|
||||||
image: "{{ ceph_mon_image_full }}"
|
|
||||||
when: inventory_hostname in groups['ceph-mon']
|
|
||||||
|
|
||||||
- name: Pulling ceph-osd image
|
|
||||||
become: true
|
|
||||||
kolla_docker:
|
|
||||||
action: "pull_image"
|
|
||||||
common_options: "{{ docker_common_options }}"
|
|
||||||
image: "{{ ceph_osd_image_full }}"
|
|
||||||
when: inventory_hostname in groups['ceph-osd']
|
|
||||||
|
|
||||||
- name: Pulling ceph-rgw image
|
|
||||||
become: true
|
|
||||||
kolla_docker:
|
|
||||||
action: "pull_image"
|
|
||||||
common_options: "{{ docker_common_options }}"
|
|
||||||
image: "{{ ceph_rgw_image_full }}"
|
|
||||||
when: inventory_hostname in groups['ceph-rgw']
|
|
||||||
|
|
||||||
- name: Pulling ceph-mgr image
|
|
||||||
become: true
|
|
||||||
kolla_docker:
|
|
||||||
action: "pull_image"
|
|
||||||
common_options: "{{ docker_common_options }}"
|
|
||||||
image: "{{ ceph_mgr_image_full }}"
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups['ceph-mgr']
|
|
||||||
|
|
||||||
- name: Pulling ceph-mds image
|
|
||||||
become: true
|
|
||||||
kolla_docker:
|
|
||||||
action: "pull_image"
|
|
||||||
common_options: "{{ docker_common_options }}"
|
|
||||||
image: "{{ ceph_mds_image_full }}"
|
|
||||||
when:
|
|
||||||
- enable_ceph_mds | bool
|
|
||||||
- inventory_hostname in groups['ceph-mds']
|
|
||||||
|
|
||||||
- name: Pulling ceph-nfs image
|
|
||||||
become: true
|
|
||||||
kolla_docker:
|
|
||||||
action: "pull_image"
|
|
||||||
common_options: "{{ docker_common_options }}"
|
|
||||||
image: "{{ ceph_nfs_image_full }}"
|
|
||||||
when:
|
|
||||||
- enable_ceph_nfs | bool
|
|
||||||
- inventory_hostname in groups['ceph-nfs']
|
|
@ -1,235 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Ensuring the ceph_mon container is up
|
|
||||||
become: true
|
|
||||||
kolla_docker:
|
|
||||||
name: "{{ item.name }}"
|
|
||||||
action: "get_container_state"
|
|
||||||
register: ceph_mon_container_state
|
|
||||||
failed_when: not ceph_mon_container_state.Running
|
|
||||||
when: inventory_hostname in groups[item.group]
|
|
||||||
with_items:
|
|
||||||
- { name: ceph_mon, group: ceph-mon }
|
|
||||||
|
|
||||||
- name: Looking up OSDs for Ceph
|
|
||||||
become: true
|
|
||||||
command: docker exec -t kolla_toolbox sudo -E ansible localhost
|
|
||||||
-m find_disks
|
|
||||||
-a "partition_name={{ partition_name_osd_data }} match_mode='prefix' use_udev={{ kolla_ceph_use_udev }}"
|
|
||||||
register: osd_lookup
|
|
||||||
changed_when: osd_lookup.stdout.find('localhost | SUCCESS => ') != -1 and (osd_lookup.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed
|
|
||||||
failed_when: osd_lookup.stdout.split()[2] != 'SUCCESS'
|
|
||||||
when: inventory_hostname in groups['ceph-osd']
|
|
||||||
|
|
||||||
- name: Reading data from variable
|
|
||||||
set_fact:
|
|
||||||
osds: "{{ (osd_lookup.stdout.split('localhost | SUCCESS => ')[1]|from_json).disks|from_json }}"
|
|
||||||
when: inventory_hostname in groups['ceph-osd']
|
|
||||||
|
|
||||||
- name: Gathering OSD IDs
|
|
||||||
command: "cat /var/lib/ceph/osd/{{ item['fs_uuid'] }}/whoami"
|
|
||||||
with_items: "{{ osds|default({}) }}"
|
|
||||||
register: osd_ids
|
|
||||||
changed_when: False
|
|
||||||
failed_when: osd_ids.rc != 0
|
|
||||||
when: inventory_hostname in groups['ceph-osd']
|
|
||||||
|
|
||||||
- name: Ensuring the ceph_osd container is up
|
|
||||||
become: true
|
|
||||||
kolla_docker:
|
|
||||||
name: "ceph_osd_{{ item.stdout }}"
|
|
||||||
action: "get_container_state"
|
|
||||||
register: ceph_osd_container_state
|
|
||||||
failed_when: not ceph_osd_container_state.Running
|
|
||||||
when: inventory_hostname in groups['ceph-osd']
|
|
||||||
with_items: "{{ osd_ids.results|default({}) }}"
|
|
||||||
|
|
||||||
- name: Ensuring the ceph_rgw container is up
|
|
||||||
become: true
|
|
||||||
kolla_docker:
|
|
||||||
name: "{{ item.name }}"
|
|
||||||
action: "get_container_state"
|
|
||||||
register: ceph_rgw_container_state
|
|
||||||
failed_when: not ceph_rgw_container_state.Running
|
|
||||||
when:
|
|
||||||
- enable_ceph_rgw | bool
|
|
||||||
- inventory_hostname in groups[item.group]
|
|
||||||
with_items:
|
|
||||||
- { name: ceph_rgw, group: ceph-rgw }
|
|
||||||
|
|
||||||
- include_tasks: config.yml
|
|
||||||
|
|
||||||
- name: Check the configs in ceph_mon container
|
|
||||||
become: true
|
|
||||||
command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
|
|
||||||
changed_when: false
|
|
||||||
failed_when: false
|
|
||||||
register: ceph_mon_check_results
|
|
||||||
when: inventory_hostname in groups[item.group]
|
|
||||||
with_items:
|
|
||||||
- { name: ceph_mon, group: ceph-mon }
|
|
||||||
|
|
||||||
- name: Check the configs in the ceph_osd container
|
|
||||||
become: true
|
|
||||||
command: docker exec ceph_osd_{{ item.stdout }} /usr/local/bin/kolla_set_configs --check
|
|
||||||
changed_when: false
|
|
||||||
failed_when: false
|
|
||||||
register: ceph_osd_check_results
|
|
||||||
with_items: "{{ osd_ids.results|default({}) }}"
|
|
||||||
when: inventory_hostname in groups['ceph-osd']
|
|
||||||
|
|
||||||
- name: Check the configs in ceph_rgw container
|
|
||||||
become: true
|
|
||||||
command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
|
|
||||||
changed_when: false
|
|
||||||
failed_when: false
|
|
||||||
register: ceph_rgw_check_results
|
|
||||||
when:
|
|
||||||
- enable_ceph_rgw | bool
|
|
||||||
- inventory_hostname in groups[item.group]
|
|
||||||
with_items:
|
|
||||||
- { name: ceph_rgw, group: ceph-rgw }
|
|
||||||
|
|
||||||
- name: Containers config strategy for ceph_mon container
|
|
||||||
become: true
|
|
||||||
kolla_docker:
|
|
||||||
name: "{{ item.name }}"
|
|
||||||
action: "get_container_env"
|
|
||||||
register: ceph_mon_container_envs
|
|
||||||
when: inventory_hostname in groups[item.group]
|
|
||||||
with_items:
|
|
||||||
- { name: ceph_mon, group: ceph-mon }
|
|
||||||
|
|
||||||
- name: Containers config strategy for the ceph_osd containers
|
|
||||||
become: true
|
|
||||||
kolla_docker:
|
|
||||||
name: "ceph_osd_{{ item.stdout }}"
|
|
||||||
action: "get_container_env"
|
|
||||||
register: ceph_osd_container_envs
|
|
||||||
with_items: "{{ osd_ids.results|default({}) }}"
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups['ceph-osd']
|
|
||||||
- osds
|
|
||||||
|
|
||||||
- name: Containers config strategy for ceph_rgw container
|
|
||||||
become: true
|
|
||||||
kolla_docker:
|
|
||||||
name: "{{ item.name }}"
|
|
||||||
action: "get_container_env"
|
|
||||||
register: ceph_rgw_container_envs
|
|
||||||
when:
|
|
||||||
- enable_ceph_rgw | bool
|
|
||||||
- inventory_hostname in groups[item.group]
|
|
||||||
with_items:
|
|
||||||
- { name: ceph_rgw, group: ceph-rgw }
|
|
||||||
|
|
||||||
- name: Remove the ceph_mon container
|
|
||||||
become: true
|
|
||||||
kolla_docker:
|
|
||||||
name: "{{ item[0]['name'] }}"
|
|
||||||
action: "remove_container"
|
|
||||||
register: remove_ceph_mon_container
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups[item[0]['group']]
|
|
||||||
- config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
|
|
||||||
- item[2]['rc'] == 1
|
|
||||||
with_together:
|
|
||||||
- [{ name: ceph_mon, group: ceph-mon }]
|
|
||||||
- "{{ ceph_mon_container_envs.results }}"
|
|
||||||
- "{{ ceph_mon_check_results.results }}"
|
|
||||||
|
|
||||||
- name: Remove the ceph_osd containers
|
|
||||||
become: true
|
|
||||||
kolla_docker:
|
|
||||||
name: "ceph_osd_{{ item.0.stdout }}"
|
|
||||||
action: "remove_container"
|
|
||||||
register: remove_ceph_osd_containers
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups['ceph-osd']
|
|
||||||
- config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
|
|
||||||
- item[2]['rc'] == 1
|
|
||||||
- osds
|
|
||||||
with_together:
|
|
||||||
- "{{ osd_ids.results|default({}) }}"
|
|
||||||
- "{{ ceph_osd_container_envs.results }}"
|
|
||||||
- "{{ ceph_osd_check_results.results }}"
|
|
||||||
|
|
||||||
- name: Remove the ceph_rgw container
|
|
||||||
become: true
|
|
||||||
kolla_docker:
|
|
||||||
name: "{{ item[0]['name'] }}"
|
|
||||||
action: "remove_container"
|
|
||||||
register: remove_ceph_rgw_container
|
|
||||||
when:
|
|
||||||
- enable_ceph_rgw | bool
|
|
||||||
- inventory_hostname in groups[item[0]['group']]
|
|
||||||
- config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
|
|
||||||
- item[2]['rc'] == 1
|
|
||||||
with_together:
|
|
||||||
- [{ name: ceph_rgw, group: ceph-rgw }]
|
|
||||||
- "{{ ceph_rgw_container_envs.results }}"
|
|
||||||
- "{{ ceph_rgw_check_results.results }}"
|
|
||||||
|
|
||||||
- include_tasks: start_mons.yml
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups['ceph-mon']
|
|
||||||
- remove_ceph_mon_container.changed
|
|
||||||
|
|
||||||
- include_tasks: start_osds.yml
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups['ceph-osd']
|
|
||||||
- remove_ceph_osd_containers.changed
|
|
||||||
|
|
||||||
- include_tasks: start_rgws.yml
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups['ceph-rgw']
|
|
||||||
- remove_ceph_rgw_container.changed
|
|
||||||
|
|
||||||
- name: Restart the ceph_mon container
|
|
||||||
become: true
|
|
||||||
kolla_docker:
|
|
||||||
name: "ceph_mon"
|
|
||||||
action: "restart_container"
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups['ceph-mon']
|
|
||||||
- config_strategy == 'COPY_ALWAYS'
|
|
||||||
- item[0]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
|
|
||||||
- item[1]['rc'] == 1
|
|
||||||
with_together:
|
|
||||||
- "{{ ceph_mon_container_envs.results }}"
|
|
||||||
- "{{ ceph_mon_check_results.results }}"
|
|
||||||
|
|
||||||
- name: Restart the ceph_osd container
|
|
||||||
become: true
|
|
||||||
kolla_docker:
|
|
||||||
name: "ceph_osd_{{ item.0.stdout }}"
|
|
||||||
action: "restart_container"
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups['ceph-osd']
|
|
||||||
- config_strategy == 'COPY_ALWAYS'
|
|
||||||
- item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
|
|
||||||
- item[2]['rc'] == 1
|
|
||||||
- osds
|
|
||||||
with_together:
|
|
||||||
- "{{ osd_ids.results|default({}) }}"
|
|
||||||
- "{{ ceph_osd_container_envs.results }}"
|
|
||||||
- "{{ ceph_osd_check_results.results }}"
|
|
||||||
|
|
||||||
- name: Restart the ceph_rgw container
|
|
||||||
become: true
|
|
||||||
kolla_docker:
|
|
||||||
name: "ceph_rgw"
|
|
||||||
action: "restart_container"
|
|
||||||
when:
|
|
||||||
- enable_ceph_rgw | bool
|
|
||||||
- inventory_hostname in groups['ceph-rgw']
|
|
||||||
- config_strategy == 'COPY_ALWAYS'
|
|
||||||
- item[0]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
|
|
||||||
- item[1]['rc'] == 1
|
|
||||||
with_together:
|
|
||||||
- "{{ ceph_rgw_container_envs.results }}"
|
|
||||||
- "{{ ceph_rgw_check_results.results }}"
|
|
||||||
|
|
||||||
- include_tasks: start_rgw_keystone.yml
|
|
||||||
when:
|
|
||||||
- ceph_rgw_container_envs.results
|
|
||||||
- enable_ceph_rgw_keystone | bool
|
|
@ -1,6 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Enable ceph dashboard
|
|
||||||
become: true
|
|
||||||
command: docker exec ceph_mon ceph mgr module enable dashboard --force
|
|
||||||
changed_when: false
|
|
||||||
run_once: true
|
|
@ -1,63 +0,0 @@
|
|||||||
---
|
|
||||||
- include_tasks: ../../ceph_pools.yml
|
|
||||||
vars:
|
|
||||||
pool_name: "{{ item.pool_name }}"
|
|
||||||
pool_type: "{{ item.pool_type }}"
|
|
||||||
cache_mode: "{{ item.cache_mode }}"
|
|
||||||
pool_pg_num: "{{ item.pool_pg_num }}"
|
|
||||||
pool_pgp_num: "{{ item.pool_pgp_num }}"
|
|
||||||
pool_application: "cephfs"
|
|
||||||
with_items:
|
|
||||||
- pool_name: "{{ cephfs_data_pool_name }}"
|
|
||||||
pool_type: "{{ cephfs_data_pool_type }}"
|
|
||||||
cache_mode: "{{ cephfs_data_pool_cache_mode }}"
|
|
||||||
pool_pg_num: "{{ cephfs_data_pool_pg_num }}"
|
|
||||||
pool_pgp_num: "{{ cephfs_data_pool_pgp_num }}"
|
|
||||||
- pool_name: "{{ cephfs_metadata_pool_name }}"
|
|
||||||
pool_type: "{{ cephfs_metadata_pool_type }}"
|
|
||||||
cache_mode: "{{ cephfs_metadata_pool_cache_mode }}"
|
|
||||||
pool_pg_num: "{{ cephfs_metadata_pool_pg_num }}"
|
|
||||||
pool_pgp_num: "{{ cephfs_metadata_pool_pgp_num }}"
|
|
||||||
|
|
||||||
- name: Geting ceph mds keyring
|
|
||||||
become: true
|
|
||||||
kolla_ceph_keyring:
|
|
||||||
name: "mds.{{ ceph_mds_hostname }}"
|
|
||||||
caps: "{{ ceph_client_mds_keyring_caps }}"
|
|
||||||
register: ceph_mds_auth
|
|
||||||
delegate_to: "{{ groups['ceph-mon'][0] }}"
|
|
||||||
|
|
||||||
- name: Pushing ceph mds keyring to ceph-mds
|
|
||||||
become: true
|
|
||||||
copy:
|
|
||||||
content: |
|
|
||||||
[mds.{{ ceph_mds_hostname }}]
|
|
||||||
key = {{ ceph_mds_auth.keyring.key }}
|
|
||||||
dest: "{{ node_config_directory }}/ceph-mds/ceph.mds.{{ ceph_mds_hostname }}.keyring"
|
|
||||||
mode: "0600"
|
|
||||||
|
|
||||||
- name: Starting ceph-mds container
|
|
||||||
become: true
|
|
||||||
kolla_docker:
|
|
||||||
action: "start_container"
|
|
||||||
common_options: "{{ docker_common_options }}"
|
|
||||||
image: "{{ ceph_mds_image_full }}"
|
|
||||||
name: "ceph_mds"
|
|
||||||
volumes:
|
|
||||||
- "{{ node_config_directory }}/ceph-mds/:{{ container_config_directory }}/:ro"
|
|
||||||
- "/etc/localtime:/etc/localtime:ro"
|
|
||||||
- "kolla_logs:/var/log/kolla/"
|
|
||||||
|
|
||||||
- name: Checking whether cephfs is created
|
|
||||||
become: true
|
|
||||||
command: docker exec ceph_mon ceph fs get cephfs
|
|
||||||
register: cephfs_stat
|
|
||||||
failed_when: false
|
|
||||||
changed_when: false
|
|
||||||
run_once: true
|
|
||||||
|
|
||||||
- name: Creating ceph new filesystem
|
|
||||||
become: true
|
|
||||||
command: docker exec ceph_mon ceph fs new cephfs cephfs_metadata cephfs_data
|
|
||||||
run_once: true
|
|
||||||
when: cephfs_stat.rc != 0
|
|
@ -1,36 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Getting ceph mgr keyring
|
|
||||||
become: true
|
|
||||||
kolla_ceph_keyring:
|
|
||||||
name: "mgr.{{ ceph_mgr_hostname }}"
|
|
||||||
caps: "{{ ceph_client_mgr_keyring_caps }}"
|
|
||||||
register: ceph_mgr_keyring
|
|
||||||
delegate_to: "{{ groups['ceph-mon'][0] }}"
|
|
||||||
|
|
||||||
- name: Pushing ceph mgr keyring to ceph-mgr
|
|
||||||
become: true
|
|
||||||
copy:
|
|
||||||
content: |
|
|
||||||
[mgr.{{ ceph_mgr_hostname }}]
|
|
||||||
key = {{ ceph_mgr_keyring.keyring.key }}
|
|
||||||
dest: "{{ node_config_directory }}/ceph-mgr/ceph.mgr.{{ ceph_mgr_hostname }}.keyring"
|
|
||||||
mode: "0600"
|
|
||||||
|
|
||||||
- name: Starting ceph-mgr container
|
|
||||||
become: true
|
|
||||||
kolla_docker:
|
|
||||||
action: "start_container"
|
|
||||||
common_options: "{{ docker_common_options }}"
|
|
||||||
image: "{{ ceph_mgr_image_full }}"
|
|
||||||
name: "ceph_mgr"
|
|
||||||
volumes:
|
|
||||||
- "{{ node_config_directory }}/ceph-mgr/:{{ container_config_directory }}/:ro"
|
|
||||||
- "/etc/localtime:/etc/localtime:ro"
|
|
||||||
- "kolla_logs:/var/log/kolla"
|
|
||||||
|
|
||||||
- name: Enable the ceph mgr prometheus module
|
|
||||||
become: true
|
|
||||||
command: docker exec ceph_mgr ceph mgr module enable prometheus
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups['ceph-mgr']
|
|
||||||
- enable_prometheus_ceph_mgr_exporter | bool
|
|
@ -1,17 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Starting ceph-mon container
|
|
||||||
become: true
|
|
||||||
kolla_docker:
|
|
||||||
action: "start_container"
|
|
||||||
common_options: "{{ docker_common_options }}"
|
|
||||||
environment:
|
|
||||||
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
|
|
||||||
HOSTNAME: "{{ ceph_mon_hostname }}"
|
|
||||||
image: "{{ ceph_mon_image_full }}"
|
|
||||||
name: "ceph_mon"
|
|
||||||
volumes:
|
|
||||||
- "{{ node_config_directory }}/ceph-mon/:{{ container_config_directory }}/:ro"
|
|
||||||
- "/etc/localtime:/etc/localtime:ro"
|
|
||||||
- "ceph_mon:/var/lib/ceph"
|
|
||||||
- "ceph_mon_config:/etc/ceph"
|
|
||||||
- "kolla_logs:/var/log/kolla/"
|
|
@ -1,12 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Starting ceph-nfs container
|
|
||||||
become: true
|
|
||||||
kolla_docker:
|
|
||||||
action: "start_container"
|
|
||||||
common_options: "{{ docker_common_options }}"
|
|
||||||
image: "{{ ceph_nfs_image_full }}"
|
|
||||||
name: "ceph_nfs"
|
|
||||||
volumes:
|
|
||||||
- "{{ node_config_directory }}/ceph-nfs/:{{ container_config_directory }}/:ro"
|
|
||||||
- "/etc/localtime:/etc/localtime:ro"
|
|
||||||
- "kolla_logs:/var/log/kolla/"
|
|
@ -1,59 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Looking up OSDs for Ceph
|
|
||||||
become: true
|
|
||||||
command: docker exec -t kolla_toolbox sudo -E ansible localhost
|
|
||||||
-m find_disks
|
|
||||||
-a "partition_name={{ partition_name_osd_data }} match_mode='prefix' use_udev={{ kolla_ceph_use_udev }}"
|
|
||||||
register: osd_lookup
|
|
||||||
changed_when: osd_lookup.stdout.find('localhost | SUCCESS => ') != -1 and (osd_lookup.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed
|
|
||||||
failed_when: osd_lookup.stdout.split()[2] != 'SUCCESS'
|
|
||||||
|
|
||||||
- name: Parsing disk info for Ceph OSDs
|
|
||||||
set_fact:
|
|
||||||
osds: "{{ (osd_lookup.stdout.split('localhost | SUCCESS => ')[1]|from_json).disks|from_json }}"
|
|
||||||
|
|
||||||
- name: Mounting Ceph OSD volumes
|
|
||||||
become: true
|
|
||||||
mount:
|
|
||||||
src: "UUID={{ item.fs_uuid }}"
|
|
||||||
fstype: "{{ ceph_osd_filesystem }}"
|
|
||||||
state: mounted
|
|
||||||
name: "/var/lib/ceph/osd/{{ item['fs_uuid'] }}"
|
|
||||||
opts: "{{ ceph_osd_mount_options }}"
|
|
||||||
with_items: "{{ osds }}"
|
|
||||||
become_method: sudo
|
|
||||||
|
|
||||||
- name: Gathering OSD IDs
|
|
||||||
become: true
|
|
||||||
command: "cat /var/lib/ceph/osd/{{ item['fs_uuid'] }}/whoami"
|
|
||||||
with_items: "{{ osds }}"
|
|
||||||
register: id
|
|
||||||
changed_when: False
|
|
||||||
failed_when: id.rc != 0
|
|
||||||
|
|
||||||
- name: Starting ceph-osd container
|
|
||||||
become: true
|
|
||||||
kolla_docker:
|
|
||||||
action: "start_container"
|
|
||||||
common_options: "{{ docker_common_options }}"
|
|
||||||
environment:
|
|
||||||
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
|
|
||||||
OSD_ID: "{{ item.0.stdout }}"
|
|
||||||
JOURNAL_PARTITION: "{{ item.1.journal }}"
|
|
||||||
TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES: "{{ ceph_tcmalloc_tc_bytes }}"
|
|
||||||
OSD_STORETYPE: "{{ ceph_osd_store_type }}"
|
|
||||||
OSD_BS_FSUUID: "{{ item.1['fs_uuid'] }}"
|
|
||||||
image: "{{ ceph_osd_image_full }}"
|
|
||||||
name: "ceph_osd_{{ item.0.stdout }}"
|
|
||||||
pid_mode: "host"
|
|
||||||
privileged: True
|
|
||||||
volumes:
|
|
||||||
- "{{ node_config_directory }}/ceph-osd/:{{ container_config_directory }}/:ro"
|
|
||||||
- "/etc/localtime:/etc/localtime:ro"
|
|
||||||
- "/dev/:/dev/"
|
|
||||||
- "/var/lib/ceph/osd/{{ item.1['fs_uuid'] }}:/var/lib/ceph/osd/ceph-{{ item.0.stdout }}"
|
|
||||||
- "kolla_logs:/var/log/kolla/"
|
|
||||||
with_together:
|
|
||||||
- "{{ id.results }}"
|
|
||||||
- "{{ osds }}"
|
|
||||||
when: osds
|
|
@ -1,20 +0,0 @@
|
|||||||
---
|
|
||||||
- import_role:
|
|
||||||
name: service-ks-register
|
|
||||||
vars:
|
|
||||||
service_ks_register_auth: "{{ openstack_ceph_rgw_auth }}"
|
|
||||||
service_ks_register_services: "{{ ceph_rgw_ks_services }}"
|
|
||||||
service_ks_register_users: "{{ ceph_rgw_ks_users }}"
|
|
||||||
tags: always
|
|
||||||
|
|
||||||
- name: Creating the ResellerAdmin role
|
|
||||||
become: true
|
|
||||||
kolla_toolbox:
|
|
||||||
module_name: "os_keystone_role"
|
|
||||||
module_args:
|
|
||||||
name: "ResellerAdmin"
|
|
||||||
region_name: "{{ openstack_region_name }}"
|
|
||||||
auth: "{{ openstack_ceph_rgw_auth }}"
|
|
||||||
endpoint_type: "{{ openstack_interface }}"
|
|
||||||
cacert: "{{ openstack_cacert }}"
|
|
||||||
run_once: True
|
|
@ -1,12 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Starting ceph-rgw container
|
|
||||||
become: true
|
|
||||||
kolla_docker:
|
|
||||||
action: "start_container"
|
|
||||||
common_options: "{{ docker_common_options }}"
|
|
||||||
image: "{{ ceph_rgw_image_full }}"
|
|
||||||
name: "ceph_rgw"
|
|
||||||
volumes:
|
|
||||||
- "{{ node_config_directory }}/ceph-rgw/:{{ container_config_directory }}/:ro"
|
|
||||||
- "/etc/localtime:/etc/localtime:ro"
|
|
||||||
- "kolla_logs:/var/log/kolla/"
|
|
@ -1,70 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Stopping ceph-mon container
|
|
||||||
become: true
|
|
||||||
kolla_docker:
|
|
||||||
action: "stop_container"
|
|
||||||
common_options: "{{ docker_common_options }}"
|
|
||||||
name: "ceph_mon"
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups['ceph-mon']
|
|
||||||
- "'ceph_mon' not in skip_stop_containers"
|
|
||||||
|
|
||||||
- name: Find running ceph-osds containers
|
|
||||||
become: true
|
|
||||||
command: "docker ps --filter name=ceph_osd_ --format '{% raw %}{{ .Names }}{% endraw %}'"
|
|
||||||
register: ceph_osd_containers
|
|
||||||
|
|
||||||
- name: Stopping ceph-osd container
|
|
||||||
become: true
|
|
||||||
kolla_docker:
|
|
||||||
action: "stop_container"
|
|
||||||
common_options: "{{ docker_common_options }}"
|
|
||||||
name: "{{ item }}"
|
|
||||||
with_items: "{{ ceph_osd_containers.stdout_lines }}"
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups['ceph-osd']
|
|
||||||
- ceph_osd_containers.stdout_lines | length >= 1
|
|
||||||
- item not in skip_stop_containers
|
|
||||||
|
|
||||||
- name: Stopping ceph-rgw container
|
|
||||||
become: true
|
|
||||||
kolla_docker:
|
|
||||||
action: "stop_container"
|
|
||||||
common_options: "{{ docker_common_options }}"
|
|
||||||
name: "ceph_rgw"
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups['ceph-rgw']
|
|
||||||
- enable_ceph_rgw | bool
|
|
||||||
- "'ceph_rgw' not in skip_stop_containers"
|
|
||||||
|
|
||||||
- name: Stopping ceph-mgr container
|
|
||||||
become: true
|
|
||||||
kolla_docker:
|
|
||||||
action: "stop_container"
|
|
||||||
common_options: "{{ docker_common_options }}"
|
|
||||||
name: "ceph_mgr"
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups['ceph-mgr']
|
|
||||||
- "'ceph_mgr' not in skip_stop_containers"
|
|
||||||
|
|
||||||
- name: Stopping ceph-mds container
|
|
||||||
become: true
|
|
||||||
kolla_docker:
|
|
||||||
action: "stop_container"
|
|
||||||
common_options: "{{ docker_common_options }}"
|
|
||||||
name: "ceph_mds"
|
|
||||||
when:
|
|
||||||
- enable_ceph_mds | bool
|
|
||||||
- inventory_hostname in groups['ceph-mds']
|
|
||||||
- "'ceph_mds' not in skip_stop_containers"
|
|
||||||
|
|
||||||
- name: Stopping ceph-nfs container
|
|
||||||
become: true
|
|
||||||
kolla_docker:
|
|
||||||
action: "stop_container"
|
|
||||||
common_options: "{{ docker_common_options }}"
|
|
||||||
name: "ceph_nfs"
|
|
||||||
when:
|
|
||||||
- enable_ceph_nfs | bool
|
|
||||||
- inventory_hostname in groups['ceph-nfs']
|
|
||||||
- "'ceph_nfs' not in skip_stop_containers"
|
|
@ -1,17 +0,0 @@
|
|||||||
---
|
|
||||||
- include_tasks: deploy.yml
|
|
||||||
|
|
||||||
- name: Check final release (as running on MONs)
|
|
||||||
become: true
|
|
||||||
command: "docker exec ceph_mon ceph -m {{ 'storage' | kolla_address }} versions"
|
|
||||||
changed_when: false
|
|
||||||
register: ceph_release_command
|
|
||||||
delegate_to: "{{ groups['ceph-mon'][0] }}"
|
|
||||||
run_once: true
|
|
||||||
|
|
||||||
- name: Finalize the upgrade by disallowing older OSDs
|
|
||||||
become: true
|
|
||||||
command: "docker exec ceph_mon ceph -m {{ 'storage' | kolla_address }} osd require-osd-release {{ ((ceph_release_command.stdout|from_json).mon | string).split(' ')[4] }}"
|
|
||||||
changed_when: false
|
|
||||||
delegate_to: "{{ groups['ceph-mon'][0] }}"
|
|
||||||
run_once: true
|
|
@ -1,17 +0,0 @@
|
|||||||
{
|
|
||||||
"command": "/usr/bin/ceph-mds -f -c /etc/ceph/ceph.conf -i {{ ceph_mds_hostname }}",
|
|
||||||
"config_files": [
|
|
||||||
{
|
|
||||||
"source": "{{ container_config_directory }}/ceph.conf",
|
|
||||||
"dest": "/etc/ceph/ceph.conf",
|
|
||||||
"owner": "ceph",
|
|
||||||
"perm": "0600"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"source": "{{ container_config_directory }}/ceph.mds.{{ ceph_mds_hostname }}.keyring",
|
|
||||||
"dest": "/var/lib/ceph/mds/ceph-{{ ceph_mds_hostname }}/keyring",
|
|
||||||
"owner": "root",
|
|
||||||
"perm": "0644"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
@ -1,23 +0,0 @@
|
|||||||
{
|
|
||||||
"command": "/usr/bin/ceph-mgr -f -i {{ ceph_mgr_hostname }}",
|
|
||||||
"config_files": [
|
|
||||||
{
|
|
||||||
"source": "{{ container_config_directory }}/ceph.conf",
|
|
||||||
"dest": "/etc/ceph/ceph.conf",
|
|
||||||
"owner": "ceph",
|
|
||||||
"perm": "0600"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"source": "{{ container_config_directory }}/ceph.mgr.{{ ceph_mgr_hostname }}.keyring",
|
|
||||||
"dest": "/var/lib/ceph/mgr/ceph-{{ ceph_mgr_hostname }}/keyring",
|
|
||||||
"owner": "ceph",
|
|
||||||
"perm": "0600"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"source": "{{ container_config_directory }}/ceph.client.admin.keyring",
|
|
||||||
"dest": "/etc/ceph/ceph.client.admin.keyring",
|
|
||||||
"owner": "ceph",
|
|
||||||
"perm": "0600"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
@ -1,39 +0,0 @@
|
|||||||
{
|
|
||||||
"command": "/usr/bin/ceph-mon -f {% if ceph_debug %}-d{% endif %} -i {{ ceph_mon_hostname }} --public-addr {{ storage_interface_address | put_address_in_context('url') }}:6789",
|
|
||||||
"config_files": [
|
|
||||||
{
|
|
||||||
"source": "{{ container_config_directory }}/ceph.conf",
|
|
||||||
"dest": "/etc/ceph/ceph.conf",
|
|
||||||
"owner": "ceph",
|
|
||||||
"perm": "0600"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"source": "{{ container_config_directory }}/ceph.client.admin.keyring",
|
|
||||||
"dest": "/etc/ceph/ceph.client.admin.keyring",
|
|
||||||
"owner": "ceph",
|
|
||||||
"perm": "0600",
|
|
||||||
"optional": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"source": "{{ container_config_directory }}/ceph.client.mon.keyring",
|
|
||||||
"dest": "/etc/ceph/ceph.client.mon.keyring",
|
|
||||||
"owner": "ceph",
|
|
||||||
"perm": "0600",
|
|
||||||
"optional": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"source": "{{ container_config_directory }}/ceph.client.radosgw.keyring",
|
|
||||||
"dest": "/etc/ceph/ceph.client.radosgw.keyring",
|
|
||||||
"owner": "ceph",
|
|
||||||
"perm": "0600",
|
|
||||||
"optional": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"source": "{{ container_config_directory }}/ceph.monmap",
|
|
||||||
"dest": "/etc/ceph/ceph.monmap",
|
|
||||||
"owner": "ceph",
|
|
||||||
"perm": "0600",
|
|
||||||
"optional": true
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
@ -1,24 +0,0 @@
|
|||||||
{
|
|
||||||
"command": "/usr/bin/ganesha.nfsd -F -f /etc/ganesha/ganesha.conf",
|
|
||||||
"config_files": [
|
|
||||||
{
|
|
||||||
"source": "{{ container_config_directory }}/ganesha.conf",
|
|
||||||
"dest": "/etc/ganesha/ganesha.conf",
|
|
||||||
"owner": "ceph",
|
|
||||||
"perm": "0600"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"source": "{{ container_config_directory }}/ceph.conf",
|
|
||||||
"dest": "/etc/ceph/ceph.conf",
|
|
||||||
"owner": "ceph",
|
|
||||||
"perm": "0600"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"source": "{{ container_config_directory }}/ceph.client.admin.keyring",
|
|
||||||
"dest": "/etc/ceph/ceph.client.admin.keyring",
|
|
||||||
"owner": "ceph",
|
|
||||||
"perm": "0600",
|
|
||||||
"optional": true
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
@ -1,17 +0,0 @@
|
|||||||
{
|
|
||||||
"command": "/usr/bin/ceph-osd -f {% if ceph_debug %}-d{% endif %} --public-addr {{ storage_interface_address }} --cluster-addr {{ 'cluster' | kolla_address }}",
|
|
||||||
"config_files": [
|
|
||||||
{
|
|
||||||
"source": "{{ container_config_directory }}/ceph.conf",
|
|
||||||
"dest": "/etc/ceph/ceph.conf",
|
|
||||||
"owner": "ceph",
|
|
||||||
"perm": "0600"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"source": "{{ container_config_directory }}/ceph.client.admin.keyring",
|
|
||||||
"dest": "/etc/ceph/ceph.client.admin.keyring",
|
|
||||||
"owner": "ceph",
|
|
||||||
"perm": "0600"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
@ -1,23 +0,0 @@
|
|||||||
{
|
|
||||||
"command": "/usr/bin/radosgw -c /etc/ceph/ceph.conf -n client.radosgw.gateway -f",
|
|
||||||
"config_files": [
|
|
||||||
{
|
|
||||||
"source": "{{ container_config_directory }}/ceph.conf",
|
|
||||||
"dest": "/etc/ceph/ceph.conf",
|
|
||||||
"owner": "ceph",
|
|
||||||
"perm": "0600"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"source": "{{ container_config_directory }}/ceph.client.admin.keyring",
|
|
||||||
"dest": "/etc/ceph/ceph.client.admin.keyring",
|
|
||||||
"owner": "ceph",
|
|
||||||
"perm": "0600"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"source": "{{ container_config_directory }}/ceph.client.radosgw.keyring",
|
|
||||||
"dest": "/etc/ceph/ceph.client.radosgw.keyring",
|
|
||||||
"owner": "ceph",
|
|
||||||
"perm": "0600"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
@ -1,70 +0,0 @@
|
|||||||
[global]
|
|
||||||
log file = /var/log/kolla/ceph/$cluster-$name.log
|
|
||||||
log to syslog = false
|
|
||||||
err to syslog = false
|
|
||||||
log to stderr = false
|
|
||||||
err to stderr = false
|
|
||||||
|
|
||||||
fsid = {{ ceph_cluster_fsid }}
|
|
||||||
{% if ceph_mon_host_type == 'HOSTNAME' %}
|
|
||||||
mon initial members = {% for host in groups['ceph-mon'] %}{{ hostvars[host]['ansible_hostname'] }}{% if not loop.last %}, {% endif %}{% endfor %}
|
|
||||||
{% elif ceph_mon_host_type == 'FQDN' %}
|
|
||||||
mon initial members = {% for host in groups['ceph-mon'] %}{{ hostvars[host]['ansible_fqdn'] }}{% if not loop.last %}, {% endif %}{% endfor %}
|
|
||||||
{% elif ceph_mon_host_type == 'INVENTORY' %}
|
|
||||||
mon initial members = {% for host in groups['ceph-mon'] %}{{ host }}{% if not loop.last %}, {% endif %}{% endfor %}
|
|
||||||
{%- else %}
|
|
||||||
mon initial members = {% for host in groups['ceph-mon'] %}{{ 'storage' | kolla_address(host) }}{% if not loop.last %}, {% endif %}{% endfor %}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
mon host = {% for host in groups['ceph-mon'] %}{{ 'storage' | kolla_address(host) }}{% if not loop.last %}, {% endif %}{% endfor %}
|
|
||||||
|
|
||||||
mon addr = {% for host in groups['ceph-mon'] %}{{ 'storage' | kolla_address(host) | put_address_in_context('url') }}:6789{% if not loop.last %}, {% endif %}{% endfor %}
|
|
||||||
|
|
||||||
{% if storage_address_family == 'ipv6' %}
|
|
||||||
ms bind ipv6 = true
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
auth cluster required = cephx
|
|
||||||
auth service required = cephx
|
|
||||||
auth client required = cephx
|
|
||||||
|
|
||||||
# NOTE(inc0): This line will mean that if ceph was upgraded, it will run as root
|
|
||||||
# until contents of /var/lib/ceph are chowned to ceph user.
|
|
||||||
# This change was introduced in Jewel version and we should include
|
|
||||||
# chown operation in upgrade procedure. https://bugs.launchpad.net/kolla/+bug/1620702
|
|
||||||
setuser match path = /var/lib/ceph/$type/$cluster-$id
|
|
||||||
|
|
||||||
# NOTE(Jeffrey4l): kolla is using node ip as host bucket name, whereas `osd
|
|
||||||
# crush update on start` feature depends on hostname. Disable this feature for
|
|
||||||
# less confusion and upgrade impact.
|
|
||||||
osd crush update on start = false
|
|
||||||
|
|
||||||
[mon]
|
|
||||||
# NOTE(SamYaple): The monitor files have been known to grow very large. The
|
|
||||||
# only fix for that is to compact the files.
|
|
||||||
mon compact on start = true
|
|
||||||
mon cluster log file = /var/log/kolla/ceph/$cluster.log
|
|
||||||
|
|
||||||
{% if service_name is defined and service_name == 'ceph-rgw' %}
|
|
||||||
[client.radosgw.gateway]
|
|
||||||
host = {{ 'storage' | kolla_address }}
|
|
||||||
rgw frontends = civetweb port={{ api_interface_address | put_address_in_context('url') }}:{{ rgw_port }}
|
|
||||||
{% if enable_ceph_rgw_keystone | bool %}
|
|
||||||
rgw_keystone_url = {{ keystone_admin_url }}
|
|
||||||
rgw_keystone_admin_user = {{ ceph_rgw_keystone_user }}
|
|
||||||
rgw_keystone_admin_password = {{ ceph_rgw_keystone_password }}
|
|
||||||
rgw_keystone_admin_project = service
|
|
||||||
rgw_keystone_admin_domain = default
|
|
||||||
rgw_keystone_api_version = 3
|
|
||||||
rgw_keystone_accepted_roles = admin, {{ keystone_default_user_role }}
|
|
||||||
rgw_keystone_accepted_admin_roles = ResellerAdmin
|
|
||||||
rgw_swift_versioning_enabled = true
|
|
||||||
{% endif %}
|
|
||||||
keyring = /etc/ceph/ceph.client.radosgw.keyring
|
|
||||||
log file = /var/log/kolla/ceph/client.radosgw.gateway.log
|
|
||||||
{% if ceph_rgw_compatibility | bool %}
|
|
||||||
rgw_swift_url_prefix = "/"
|
|
||||||
rgw_enable_apis = swift,swift_auth,admin
|
|
||||||
rgw_swift_enforce_content_length = true
|
|
||||||
{% endif %}
|
|
||||||
{% endif %}
|
|
@ -1,37 +0,0 @@
|
|||||||
NFS_CORE_PARAM {
|
|
||||||
Protocols = 4;
|
|
||||||
Enable_NLM = false;
|
|
||||||
Enable_RQUOTA = false;
|
|
||||||
Enable_UDP = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
NFS_KRB5 {
|
|
||||||
Active_krb5 = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT
|
|
||||||
{
|
|
||||||
Export_id=1;
|
|
||||||
|
|
||||||
Path = "/";
|
|
||||||
|
|
||||||
Pseudo = /cephfs;
|
|
||||||
|
|
||||||
Access_Type = RW;
|
|
||||||
|
|
||||||
Protocols = 4;
|
|
||||||
|
|
||||||
Transports = TCP;
|
|
||||||
|
|
||||||
FSAL {
|
|
||||||
Name = CEPH;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
LOG {
|
|
||||||
Facility {
|
|
||||||
name = FILE;
|
|
||||||
destination = "/var/log/kolla/ceph/ceph-nfs.log";
|
|
||||||
enable = active;
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,117 +0,0 @@
|
|||||||
---
|
|
||||||
# TODO(SamYaple): Improve failed_when and changed_when tests if possible
|
|
||||||
- name: Creating ceph erasure profile
|
|
||||||
become: true
|
|
||||||
command: docker exec ceph_mon ceph osd erasure-code-profile set erasure-profile {{ ceph_erasure_profile }}
|
|
||||||
delegate_to: "{{ groups['ceph-mon'][0] }}"
|
|
||||||
changed_when: False
|
|
||||||
failed_when: False
|
|
||||||
run_once: True
|
|
||||||
when: pool_type == "erasure"
|
|
||||||
|
|
||||||
- name: Creating ceph ruleset
|
|
||||||
become: true
|
|
||||||
command: docker exec ceph_mon ceph osd crush rule create-erasure disks erasure-profile
|
|
||||||
delegate_to: "{{ groups['ceph-mon'][0] }}"
|
|
||||||
changed_when: False
|
|
||||||
failed_when: False
|
|
||||||
run_once: True
|
|
||||||
when: pool_type == "erasure"
|
|
||||||
|
|
||||||
- name: Creating ceph ruleset
|
|
||||||
become: true
|
|
||||||
command: docker exec ceph_mon ceph osd crush rule create-simple disks {{ ceph_rule }}
|
|
||||||
delegate_to: "{{ groups['ceph-mon'][0] }}"
|
|
||||||
changed_when: False
|
|
||||||
failed_when: False
|
|
||||||
run_once: True
|
|
||||||
when: pool_type == "replicated"
|
|
||||||
|
|
||||||
- name: Creating ceph pool
|
|
||||||
become: true
|
|
||||||
command: docker exec ceph_mon ceph osd pool create {{ pool_name }} {{ pool_pg_num }} {{ pool_pgp_num }} {{ pool_type }} {{ 'erasure-profile' if pool_type == 'erasure' else '' }} disks
|
|
||||||
delegate_to: "{{ groups['ceph-mon'][0] }}"
|
|
||||||
changed_when: False
|
|
||||||
run_once: True
|
|
||||||
|
|
||||||
- name: enable application for ceph pool
|
|
||||||
become: true
|
|
||||||
command: docker exec ceph_mon ceph osd pool application enable {{ pool_name }} {{ pool_application }}
|
|
||||||
changed_when: False
|
|
||||||
delegate_to: "{{ groups['ceph-mon'][0] }}"
|
|
||||||
run_once: True
|
|
||||||
|
|
||||||
- name: Creating ceph ruleset for cache
|
|
||||||
become: true
|
|
||||||
command: docker exec ceph_mon ceph osd crush rule create-simple cache {{ ceph_cache_rule }}
|
|
||||||
delegate_to: "{{ groups['ceph-mon'][0] }}"
|
|
||||||
changed_when: False
|
|
||||||
failed_when: False
|
|
||||||
run_once: True
|
|
||||||
when: ceph_enable_cache | bool
|
|
||||||
|
|
||||||
- name: Creating ceph pool for cache
|
|
||||||
become: true
|
|
||||||
command: docker exec ceph_mon ceph osd pool create {{ pool_name }}-cache 128 128 replicated cache
|
|
||||||
delegate_to: "{{ groups['ceph-mon'][0] }}"
|
|
||||||
changed_when: False
|
|
||||||
run_once: True
|
|
||||||
when: ceph_enable_cache | bool
|
|
||||||
|
|
||||||
- name: Adding cache to pool
|
|
||||||
become: true
|
|
||||||
command: docker exec ceph_mon ceph osd tier add {{ pool_name }} {{ pool_name }}-cache
|
|
||||||
delegate_to: "{{ groups['ceph-mon'][0] }}"
|
|
||||||
changed_when: False
|
|
||||||
failed_when: False
|
|
||||||
run_once: True
|
|
||||||
when: ceph_enable_cache | bool
|
|
||||||
|
|
||||||
- name: Setting cache-mode
|
|
||||||
become: true
|
|
||||||
command: docker exec ceph_mon ceph osd tier cache-mode {{ pool_name }}-cache {{ cache_mode }}
|
|
||||||
delegate_to: "{{ groups['ceph-mon'][0] }}"
|
|
||||||
changed_when: False
|
|
||||||
failed_when: False
|
|
||||||
run_once: True
|
|
||||||
when: ceph_enable_cache | bool
|
|
||||||
|
|
||||||
- name: Setting cache overlay for pool
|
|
||||||
become: true
|
|
||||||
command: docker exec ceph_mon ceph osd tier set-overlay {{ pool_name }} {{ pool_name }}-cache
|
|
||||||
delegate_to: "{{ groups['ceph-mon'][0] }}"
|
|
||||||
changed_when: False
|
|
||||||
failed_when: False
|
|
||||||
run_once: True
|
|
||||||
when: ceph_enable_cache | bool
|
|
||||||
|
|
||||||
- name: Setting cache hit_set_type
|
|
||||||
become: true
|
|
||||||
command: docker exec ceph_mon ceph osd pool set {{ pool_name }}-cache hit_set_type bloom
|
|
||||||
delegate_to: "{{ groups['ceph-mon'][0] }}"
|
|
||||||
changed_when: False
|
|
||||||
failed_when: False
|
|
||||||
run_once: True
|
|
||||||
when: ceph_enable_cache | bool
|
|
||||||
|
|
||||||
- name: Setting cache target_max_bytes
|
|
||||||
become: true
|
|
||||||
command: docker exec ceph_mon ceph osd pool set {{ pool_name }}-cache target_max_bytes {{ ceph_target_max_bytes }}
|
|
||||||
delegate_to: "{{ groups['ceph-mon'][0] }}"
|
|
||||||
changed_when: False
|
|
||||||
failed_when: False
|
|
||||||
run_once: True
|
|
||||||
when:
|
|
||||||
- ceph_enable_cache | bool
|
|
||||||
- ceph_target_max_bytes != ''
|
|
||||||
|
|
||||||
- name: Setting cache target_max_objects
|
|
||||||
become: true
|
|
||||||
command: docker exec ceph_mon ceph osd pool set {{ pool_name }}-cache target_max_objects {{ ceph_target_max_objects }}
|
|
||||||
delegate_to: "{{ groups['ceph-mon'][0] }}"
|
|
||||||
changed_when: False
|
|
||||||
failed_when: False
|
|
||||||
run_once: True
|
|
||||||
when:
|
|
||||||
- ceph_enable_cache | bool
|
|
||||||
- ceph_target_max_objects != ''
|
|
@ -47,45 +47,6 @@ cinder_services:
|
|||||||
volumes: "{{ cinder_backup_default_volumes + cinder_backup_extra_volumes }}"
|
volumes: "{{ cinder_backup_default_volumes + cinder_backup_extra_volumes }}"
|
||||||
dimensions: "{{ cinder_backup_dimensions }}"
|
dimensions: "{{ cinder_backup_dimensions }}"
|
||||||
|
|
||||||
####################
|
|
||||||
# Ceph
|
|
||||||
####################
|
|
||||||
ceph_cinder_pool_type: "{{ ceph_pool_type }}"
|
|
||||||
ceph_cinder_cache_mode: "{{ ceph_cache_mode }}"
|
|
||||||
ceph_cinder_backup_pool_type: "{{ ceph_pool_type }}"
|
|
||||||
ceph_cinder_backup_cache_mode: "{{ ceph_cache_mode }}"
|
|
||||||
|
|
||||||
# Due to Ansible issues on include, you cannot override these variables. Please
|
|
||||||
# override the variables they reference instead.
|
|
||||||
cinder_pool_name: "{{ ceph_cinder_pool_name }}"
|
|
||||||
cinder_pool_type: "{{ ceph_cinder_pool_type }}"
|
|
||||||
cinder_cache_mode: "{{ ceph_cinder_cache_mode }}"
|
|
||||||
cinder_pool_pg_num: "{{ ceph_pool_pg_num }}"
|
|
||||||
cinder_pool_pgp_num: "{{ ceph_pool_pgp_num }}"
|
|
||||||
|
|
||||||
cinder_backup_pool_name: "{{ ceph_cinder_backup_pool_name }}"
|
|
||||||
cinder_backup_pool_type: "{{ ceph_cinder_backup_pool_type }}"
|
|
||||||
cinder_backup_cache_mode: "{{ ceph_cinder_backup_cache_mode }}"
|
|
||||||
cinder_backup_pool_pg_num: "{{ ceph_pool_pg_num }}"
|
|
||||||
cinder_backup_pool_pgp_num: "{{ ceph_pool_pgp_num }}"
|
|
||||||
|
|
||||||
ceph_client_cinder_keyring_caps:
|
|
||||||
mon: 'profile rbd'
|
|
||||||
osd: >-
|
|
||||||
profile rbd pool={{ ceph_cinder_pool_name }},
|
|
||||||
profile rbd pool={{ ceph_nova_pool_name }},
|
|
||||||
profile rbd pool={{ ceph_glance_pool_name }},
|
|
||||||
profile rbd pool={{ ceph_cinder_pool_name }}-cache,
|
|
||||||
profile rbd pool={{ ceph_nova_pool_name }}-cache,
|
|
||||||
profile rbd pool={{ ceph_glance_pool_name }}-cache
|
|
||||||
|
|
||||||
ceph_client_cinder_backup_keyring_caps:
|
|
||||||
mon: 'profile rbd'
|
|
||||||
osd: >-
|
|
||||||
profile rbd pool={{ ceph_cinder_backup_pool_name }},
|
|
||||||
profile rbd pool={{ ceph_cinder_backup_pool_name }}-cache
|
|
||||||
|
|
||||||
|
|
||||||
####################
|
####################
|
||||||
# Database
|
# Database
|
||||||
####################
|
####################
|
||||||
|
@ -1,75 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Copying over ceph.conf(s)
|
|
||||||
vars:
|
|
||||||
services_need_config:
|
|
||||||
- "cinder-volume"
|
|
||||||
- "cinder-backup"
|
|
||||||
merge_configs:
|
|
||||||
sources:
|
|
||||||
- "{{ role_path }}/../ceph/templates/ceph.conf.j2"
|
|
||||||
- "{{ node_custom_config }}/ceph.conf"
|
|
||||||
- "{{ node_custom_config }}/ceph/{{ inventory_hostname }}/ceph.conf"
|
|
||||||
dest: "{{ node_config_directory }}/{{ item.key }}/ceph.conf"
|
|
||||||
mode: "0660"
|
|
||||||
become: true
|
|
||||||
when:
|
|
||||||
- item.value.enabled | bool
|
|
||||||
- inventory_hostname in groups[item.value.group]
|
|
||||||
- item.key in services_need_config
|
|
||||||
with_dict: "{{ cinder_services }}"
|
|
||||||
notify:
|
|
||||||
- Restart {{ item.key }} container
|
|
||||||
|
|
||||||
- include_tasks: ../../ceph_pools.yml
|
|
||||||
vars:
|
|
||||||
pool_name: "{{ cinder_pool_name }}"
|
|
||||||
pool_type: "{{ cinder_pool_type }}"
|
|
||||||
cache_mode: "{{ cinder_cache_mode }}"
|
|
||||||
pool_pg_num: "{{ cinder_pool_pg_num }}"
|
|
||||||
pool_pgp_num: "{{ cinder_pool_pgp_num }}"
|
|
||||||
pool_application: "rbd"
|
|
||||||
|
|
||||||
- include_tasks: ../../ceph_pools.yml
|
|
||||||
vars:
|
|
||||||
pool_name: "{{ cinder_backup_pool_name }}"
|
|
||||||
pool_type: "{{ cinder_backup_pool_type }}"
|
|
||||||
cache_mode: "{{ cinder_backup_cache_mode }}"
|
|
||||||
pool_pg_num: "{{ cinder_backup_pool_pg_num }}"
|
|
||||||
pool_pgp_num: "{{ cinder_backup_pool_pgp_num }}"
|
|
||||||
pool_application: "rbd"
|
|
||||||
|
|
||||||
- name: Pulling cephx keyring for cinder
|
|
||||||
become: true
|
|
||||||
kolla_ceph_keyring:
|
|
||||||
name: client.cinder
|
|
||||||
caps: "{{ ceph_client_cinder_keyring_caps }}"
|
|
||||||
register: cephx_key_cinder
|
|
||||||
delegate_to: "{{ groups['ceph-mon'][0] }}"
|
|
||||||
run_once: True
|
|
||||||
|
|
||||||
- name: Pulling cephx keyring for cinder-backup
|
|
||||||
become: true
|
|
||||||
kolla_ceph_keyring:
|
|
||||||
name: client.cinder-backup
|
|
||||||
caps: "{{ ceph_client_cinder_backup_keyring_caps }}"
|
|
||||||
register: cephx_key_cinder_backup
|
|
||||||
delegate_to: "{{ groups['ceph-mon'][0] }}"
|
|
||||||
run_once: True
|
|
||||||
|
|
||||||
- name: Pushing cephx keyring
|
|
||||||
copy:
|
|
||||||
content: |
|
|
||||||
[client.{{ item.key_name }}]
|
|
||||||
key = {{ item.key }}
|
|
||||||
dest: "{{ node_config_directory }}/{{ item.service_name }}/ceph.client.{{ item.key_name }}.keyring"
|
|
||||||
mode: "0600"
|
|
||||||
become: true
|
|
||||||
with_items:
|
|
||||||
- { service_name: "cinder-volume", key_name: "cinder", key: "{{ cephx_key_cinder.keyring.key }}" }
|
|
||||||
- { service_name: "cinder-backup", key_name: "cinder", key: "{{ cephx_key_cinder.keyring.key }}" }
|
|
||||||
- { service_name: "cinder-backup", key_name: "cinder-backup", key: "{{ cephx_key_cinder_backup.keyring.key }}" }
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups[item.service_name]
|
|
||||||
- cinder_services[item.service_name].enabled | bool
|
|
||||||
notify:
|
|
||||||
- "Restart {{ item.service_name }} container"
|
|
@ -12,18 +12,9 @@
|
|||||||
- item.value.enabled | bool
|
- item.value.enabled | bool
|
||||||
with_dict: "{{ cinder_services }}"
|
with_dict: "{{ cinder_services }}"
|
||||||
|
|
||||||
- include_tasks: ceph.yml
|
|
||||||
when:
|
|
||||||
- (enable_ceph | bool) and (cinder_backend_ceph | bool)
|
|
||||||
- inventory_hostname in groups['ceph-mon'] or
|
|
||||||
inventory_hostname in groups['cinder-api'] or
|
|
||||||
inventory_hostname in groups['cinder-volume'] or
|
|
||||||
inventory_hostname in groups['cinder-scheduler'] or
|
|
||||||
inventory_hostname in groups['cinder-backup']
|
|
||||||
|
|
||||||
- include_tasks: external_ceph.yml
|
- include_tasks: external_ceph.yml
|
||||||
when:
|
when:
|
||||||
- (not enable_ceph | bool) and (cinder_backend_ceph | bool)
|
- cinder_backend_ceph | bool
|
||||||
- inventory_hostname in groups['cinder-volume'] or
|
- inventory_hostname in groups['cinder-volume'] or
|
||||||
inventory_hostname in groups['cinder-backup']
|
inventory_hostname in groups['cinder-backup']
|
||||||
|
|
||||||
|
@ -3,8 +3,7 @@
|
|||||||
when: inventory_hostname in groups['cinder-api']
|
when: inventory_hostname in groups['cinder-api']
|
||||||
|
|
||||||
- include_tasks: config.yml
|
- include_tasks: config.yml
|
||||||
when: inventory_hostname in groups['ceph-mon'] or
|
when: inventory_hostname in groups['cinder-api'] or
|
||||||
inventory_hostname in groups['cinder-api'] or
|
|
||||||
inventory_hostname in groups['cinder-volume'] or
|
inventory_hostname in groups['cinder-volume'] or
|
||||||
inventory_hostname in groups['cinder-scheduler'] or
|
inventory_hostname in groups['cinder-scheduler'] or
|
||||||
inventory_hostname in groups['cinder-backup']
|
inventory_hostname in groups['cinder-backup']
|
||||||
|
@ -290,7 +290,6 @@
|
|||||||
- { name: "barbican", enabled: "{{ enable_barbican }}" }
|
- { name: "barbican", enabled: "{{ enable_barbican }}" }
|
||||||
- { name: "blazar", enabled: "{{ enable_blazar }}" }
|
- { name: "blazar", enabled: "{{ enable_blazar }}" }
|
||||||
- { name: "ceilometer", enabled: "{{ enable_ceilometer }}" }
|
- { name: "ceilometer", enabled: "{{ enable_ceilometer }}" }
|
||||||
- { name: "ceph", enabled: "{{ enable_ceph }}" }
|
|
||||||
- { name: "chrony", enabled: "{{ enable_chrony }}" }
|
- { name: "chrony", enabled: "{{ enable_chrony }}" }
|
||||||
- { name: "cinder", enabled: "{{ enable_cinder }}" }
|
- { name: "cinder", enabled: "{{ enable_cinder }}" }
|
||||||
- { name: "cloudkitty", enabled: "{{ enable_cloudkitty }}" }
|
- { name: "cloudkitty", enabled: "{{ enable_cloudkitty }}" }
|
||||||
|
@ -1,7 +0,0 @@
|
|||||||
"/var/log/kolla/ceph/*.log"
|
|
||||||
{
|
|
||||||
create 644 root root
|
|
||||||
postrotate
|
|
||||||
chmod 644 /var/log/kolla/ceph/*.log
|
|
||||||
endscript
|
|
||||||
}
|
|
@ -5,7 +5,6 @@
|
|||||||
( 'barbican', enable_barbican ),
|
( 'barbican', enable_barbican ),
|
||||||
( 'blazar', enable_blazar ),
|
( 'blazar', enable_blazar ),
|
||||||
( 'ceilometer', enable_ceilometer ),
|
( 'ceilometer', enable_ceilometer ),
|
||||||
( 'ceph', enable_ceph ),
|
|
||||||
( 'chrony', enable_chrony ),
|
( 'chrony', enable_chrony ),
|
||||||
( 'cinder', enable_cinder ),
|
( 'cinder', enable_cinder ),
|
||||||
( 'cloudkitty', enable_cloudkitty ),
|
( 'cloudkitty', enable_cloudkitty ),
|
||||||
|
@ -67,27 +67,6 @@ glance_notification_topics:
|
|||||||
|
|
||||||
glance_enabled_notification_topics: "{{ glance_notification_topics | selectattr('enabled', 'equalto', true) | list }}"
|
glance_enabled_notification_topics: "{{ glance_notification_topics | selectattr('enabled', 'equalto', true) | list }}"
|
||||||
|
|
||||||
####################
|
|
||||||
# Ceph
|
|
||||||
####################
|
|
||||||
ceph_glance_pool_type: "{{ ceph_pool_type }}"
|
|
||||||
ceph_glance_cache_mode: "{{ ceph_cache_mode }}"
|
|
||||||
|
|
||||||
# Due to Ansible issues on include, you cannot override these variables. Please
|
|
||||||
# override the variables they reference instead.
|
|
||||||
glance_pool_name: "{{ ceph_glance_pool_name }}"
|
|
||||||
glance_pool_type: "{{ ceph_glance_pool_type }}"
|
|
||||||
glance_cache_mode: "{{ ceph_glance_cache_mode }}"
|
|
||||||
glance_pool_pg_num: "{{ ceph_pool_pg_num }}"
|
|
||||||
glance_pool_pgp_num: "{{ ceph_pool_pgp_num }}"
|
|
||||||
|
|
||||||
ceph_client_glance_keyring_caps:
|
|
||||||
mon: 'profile rbd'
|
|
||||||
osd: >-
|
|
||||||
profile rbd pool={{ ceph_glance_pool_name }},
|
|
||||||
profile rbd pool={{ ceph_glance_pool_name }}-cache
|
|
||||||
|
|
||||||
|
|
||||||
####################
|
####################
|
||||||
# Database
|
# Database
|
||||||
####################
|
####################
|
||||||
|
@ -1,60 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Copying over ceph.conf(s)
|
|
||||||
merge_configs:
|
|
||||||
sources:
|
|
||||||
- "{{ role_path }}/../ceph/templates/ceph.conf.j2"
|
|
||||||
- "{{ node_custom_config }}/ceph.conf"
|
|
||||||
- "{{ node_custom_config }}/ceph/{{ inventory_hostname }}/ceph.conf"
|
|
||||||
dest: "{{ node_config_directory }}/glance-api/ceph.conf"
|
|
||||||
mode: "0660"
|
|
||||||
become: true
|
|
||||||
when:
|
|
||||||
- glance_services['glance-api'].host_in_groups | bool
|
|
||||||
- glance_services['glance-api'].enabled | bool
|
|
||||||
notify:
|
|
||||||
- Restart glance-api container
|
|
||||||
|
|
||||||
- include_tasks: ../../ceph_pools.yml
|
|
||||||
vars:
|
|
||||||
pool_name: "{{ glance_pool_name }}"
|
|
||||||
pool_type: "{{ glance_pool_type }}"
|
|
||||||
cache_mode: "{{ glance_cache_mode }}"
|
|
||||||
pool_pg_num: "{{ glance_pool_pg_num }}"
|
|
||||||
pool_pgp_num: "{{ glance_pool_pgp_num }}"
|
|
||||||
pool_application: "rbd"
|
|
||||||
|
|
||||||
- name: Pulling cephx keyring
|
|
||||||
become: true
|
|
||||||
kolla_ceph_keyring:
|
|
||||||
name: client.glance
|
|
||||||
caps: "{{ ceph_client_glance_keyring_caps }}"
|
|
||||||
register: cephx_key
|
|
||||||
delegate_to: "{{ groups['ceph-mon'][0] }}"
|
|
||||||
run_once: True
|
|
||||||
|
|
||||||
- name: Pushing cephx keyring
|
|
||||||
copy:
|
|
||||||
content: |
|
|
||||||
[client.glance]
|
|
||||||
key = {{ cephx_key.keyring.key }}
|
|
||||||
dest: "{{ node_config_directory }}/glance-api/ceph.client.glance.keyring"
|
|
||||||
mode: "0600"
|
|
||||||
become: true
|
|
||||||
when:
|
|
||||||
- glance_services['glance-api'].host_in_groups | bool
|
|
||||||
- glance_services['glance-api'].enabled | bool
|
|
||||||
notify:
|
|
||||||
- Restart glance-api container
|
|
||||||
|
|
||||||
- name: Ensuring config directory has correct owner and permission
|
|
||||||
become: true
|
|
||||||
file:
|
|
||||||
path: "{{ node_config_directory }}/{{ item }}"
|
|
||||||
recurse: yes
|
|
||||||
owner: "{{ config_owner_user }}"
|
|
||||||
group: "{{ config_owner_group }}"
|
|
||||||
when:
|
|
||||||
- glance_services[item].host_in_groups | bool
|
|
||||||
- glance_services[item].enabled | bool
|
|
||||||
with_items:
|
|
||||||
- "glance-api"
|
|
@ -12,14 +12,8 @@
|
|||||||
- item.value.enabled | bool
|
- item.value.enabled | bool
|
||||||
with_dict: "{{ glance_services }}"
|
with_dict: "{{ glance_services }}"
|
||||||
|
|
||||||
- include_tasks: ceph.yml
|
|
||||||
when:
|
|
||||||
- enable_ceph | bool
|
|
||||||
- glance_backend_ceph | bool
|
|
||||||
|
|
||||||
- include_tasks: external_ceph.yml
|
- include_tasks: external_ceph.yml
|
||||||
when:
|
when:
|
||||||
- not enable_ceph | bool
|
|
||||||
- glance_backend_ceph | bool
|
- glance_backend_ceph | bool
|
||||||
|
|
||||||
- name: Check if policies shall be overwritten
|
- name: Check if policies shall be overwritten
|
||||||
|
@ -36,27 +36,6 @@ gnocchi_services:
|
|||||||
dimensions: "{{ gnocchi_statsd_dimensions }}"
|
dimensions: "{{ gnocchi_statsd_dimensions }}"
|
||||||
|
|
||||||
|
|
||||||
####################
|
|
||||||
# Ceph
|
|
||||||
####################
|
|
||||||
ceph_gnocchi_pool_type: "{{ ceph_pool_type }}"
|
|
||||||
ceph_gnocchi_cache_mode: "{{ ceph_cache_mode }}"
|
|
||||||
|
|
||||||
# Due to Ansible issues on include, you cannot override these variables. Please
|
|
||||||
# override the variables they reference instead.
|
|
||||||
gnocchi_pool_name: "{{ ceph_gnocchi_pool_name }}"
|
|
||||||
gnocchi_pool_type: "{{ ceph_gnocchi_pool_type }}"
|
|
||||||
gnocchi_cache_mode: "{{ ceph_gnocchi_cache_mode }}"
|
|
||||||
gnocchi_pool_pg_num: "{{ ceph_pool_pg_num }}"
|
|
||||||
gnocchi_pool_pgp_num: "{{ ceph_pool_pgp_num }}"
|
|
||||||
|
|
||||||
ceph_client_gnocchi_keyring_caps:
|
|
||||||
mon: 'profile rbd'
|
|
||||||
osd: >-
|
|
||||||
profile rbd pool={{ ceph_gnocchi_pool_name }},
|
|
||||||
profile rbd pool={{ ceph_gnocchi_pool_name }}-cache
|
|
||||||
|
|
||||||
|
|
||||||
####################
|
####################
|
||||||
# Swift
|
# Swift
|
||||||
####################
|
####################
|
||||||
|
@ -1,51 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Copying over ceph.conf(s)
|
|
||||||
merge_configs:
|
|
||||||
sources:
|
|
||||||
- "{{ role_path }}/../ceph/templates/ceph.conf.j2"
|
|
||||||
- "{{ node_custom_config }}/ceph.conf"
|
|
||||||
- "{{ node_custom_config }}/ceph/{{ inventory_hostname }}/ceph.conf"
|
|
||||||
dest: "{{ node_config_directory }}/{{ item }}/ceph.conf"
|
|
||||||
mode: "0660"
|
|
||||||
become: true
|
|
||||||
when: inventory_hostname in groups[item]
|
|
||||||
with_items:
|
|
||||||
- "gnocchi-api"
|
|
||||||
- "gnocchi-metricd"
|
|
||||||
- "gnocchi-statsd"
|
|
||||||
notify:
|
|
||||||
- Restart {{ item }} container
|
|
||||||
|
|
||||||
- include_tasks: ../../ceph_pools.yml
|
|
||||||
vars:
|
|
||||||
pool_name: "{{ gnocchi_pool_name }}"
|
|
||||||
pool_type: "{{ gnocchi_pool_type }}"
|
|
||||||
cache_mode: "{{ gnocchi_cache_mode }}"
|
|
||||||
pool_pg_num: "{{ gnocchi_pool_pg_num }}"
|
|
||||||
pool_pgp_num: "{{ gnocchi_pool_pgp_num }}"
|
|
||||||
pool_application: "rgw"
|
|
||||||
|
|
||||||
- name: Pulling cephx keyring
|
|
||||||
become: true
|
|
||||||
kolla_ceph_keyring:
|
|
||||||
name: client.gnocchi
|
|
||||||
caps: "{{ ceph_client_gnocchi_keyring_caps }}"
|
|
||||||
register: cephx_key
|
|
||||||
delegate_to: "{{ groups['ceph-mon'][0] }}"
|
|
||||||
run_once: True
|
|
||||||
|
|
||||||
- name: Pushing cephx keyring
|
|
||||||
copy:
|
|
||||||
content: |
|
|
||||||
[client.gnocchi]
|
|
||||||
key = {{ cephx_key.keyring.key }}
|
|
||||||
dest: "{{ node_config_directory }}/{{ item }}/ceph.client.gnocchi.keyring"
|
|
||||||
mode: "0600"
|
|
||||||
become: true
|
|
||||||
when: inventory_hostname in groups[item]
|
|
||||||
with_items:
|
|
||||||
- "gnocchi-api"
|
|
||||||
- "gnocchi-metricd"
|
|
||||||
- "gnocchi-statsd"
|
|
||||||
notify:
|
|
||||||
- Restart {{ item }} container
|
|
@ -12,14 +12,8 @@
|
|||||||
- item.value.enabled | bool
|
- item.value.enabled | bool
|
||||||
with_dict: "{{ gnocchi_services }}"
|
with_dict: "{{ gnocchi_services }}"
|
||||||
|
|
||||||
- include_tasks: ceph.yml
|
|
||||||
when:
|
|
||||||
- enable_ceph | bool
|
|
||||||
- gnocchi_backend_storage == 'ceph'
|
|
||||||
|
|
||||||
- include_tasks: external_ceph.yml
|
- include_tasks: external_ceph.yml
|
||||||
when:
|
when:
|
||||||
- not enable_ceph | bool
|
|
||||||
- gnocchi_backend_storage == 'ceph'
|
- gnocchi_backend_storage == 'ceph'
|
||||||
|
|
||||||
- name: Check if policies shall be overwritten
|
- name: Check if policies shall be overwritten
|
||||||
|
@ -858,19 +858,6 @@
|
|||||||
- haproxy_stat.find('outward_rabbitmq_management') == -1
|
- haproxy_stat.find('outward_rabbitmq_management') == -1
|
||||||
- "host_running_haproxy == 'None'"
|
- "host_running_haproxy == 'None'"
|
||||||
|
|
||||||
- name: Checking free port for RadosGW HAProxy
|
|
||||||
wait_for:
|
|
||||||
host: "{{ kolla_internal_vip_address }}"
|
|
||||||
port: "{{ rgw_port }}"
|
|
||||||
connect_timeout: 1
|
|
||||||
timeout: 1
|
|
||||||
state: stopped
|
|
||||||
when:
|
|
||||||
- enable_ceph_rgw | bool
|
|
||||||
- inventory_hostname in groups['haproxy']
|
|
||||||
- haproxy_stat.find('radosgw') == -1
|
|
||||||
- "host_running_haproxy == 'None'"
|
|
||||||
|
|
||||||
- name: Checking free port for Sahara API HAProxy
|
- name: Checking free port for Sahara API HAProxy
|
||||||
wait_for:
|
wait_for:
|
||||||
host: "{{ kolla_internal_vip_address }}"
|
host: "{{ kolla_internal_vip_address }}"
|
||||||
|
@ -45,21 +45,6 @@ manila_services:
|
|||||||
dimensions: "{{ manila_data_dimensions }}"
|
dimensions: "{{ manila_data_dimensions }}"
|
||||||
|
|
||||||
|
|
||||||
#####################
|
|
||||||
## Ceph
|
|
||||||
#####################
|
|
||||||
|
|
||||||
ceph_client_manila_keyring_caps:
|
|
||||||
mon: >-
|
|
||||||
allow r,
|
|
||||||
allow command "auth del",
|
|
||||||
allow command "auth caps",
|
|
||||||
allow command "auth get",
|
|
||||||
allow command "auth get-or-create"
|
|
||||||
osd: 'allow rw'
|
|
||||||
mds: 'allow *'
|
|
||||||
|
|
||||||
|
|
||||||
#####################
|
#####################
|
||||||
## Database
|
## Database
|
||||||
#####################
|
#####################
|
||||||
|
@ -1,32 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Copying over ceph.conf for manila
|
|
||||||
merge_configs:
|
|
||||||
sources:
|
|
||||||
- "{{ role_path }}/../ceph/templates/ceph.conf.j2"
|
|
||||||
- "{{ node_custom_config }}/ceph.conf"
|
|
||||||
- "{{ node_custom_config }}/ceph/{{ inventory_hostname }}/ceph.conf"
|
|
||||||
dest: "{{ node_config_directory }}/manila-share/ceph.conf"
|
|
||||||
mode: "0660"
|
|
||||||
become: true
|
|
||||||
notify:
|
|
||||||
- Restart manila-share container
|
|
||||||
|
|
||||||
- name: Pulling cephx keyring for manila
|
|
||||||
become: true
|
|
||||||
kolla_ceph_keyring:
|
|
||||||
name: client.manila
|
|
||||||
caps: "{{ ceph_client_manila_keyring_caps }}"
|
|
||||||
register: cephx_key_manila
|
|
||||||
delegate_to: "{{ groups['ceph-mon'][0] }}"
|
|
||||||
run_once: True
|
|
||||||
|
|
||||||
- name: Pushing cephx keyring
|
|
||||||
copy:
|
|
||||||
content: |
|
|
||||||
[client.manila]
|
|
||||||
key = {{ cephx_key_manila.keyring.key }}
|
|
||||||
dest: "{{ node_config_directory }}/manila-share/ceph.client.manila.keyring"
|
|
||||||
mode: "0600"
|
|
||||||
become: true
|
|
||||||
notify:
|
|
||||||
- Restart manila-share container
|
|
@ -12,17 +12,8 @@
|
|||||||
- item.value.enabled | bool
|
- item.value.enabled | bool
|
||||||
with_dict: "{{ manila_services }}"
|
with_dict: "{{ manila_services }}"
|
||||||
|
|
||||||
- include_tasks: ceph.yml
|
|
||||||
when:
|
|
||||||
- enable_ceph | bool
|
|
||||||
- enable_ceph_mds | bool
|
|
||||||
- (enable_manila_backend_cephfs_native | bool) or (enable_manila_backend_cephfs_nfs | bool)
|
|
||||||
- inventory_hostname in groups['manila-share']
|
|
||||||
|
|
||||||
- include_tasks: external_ceph.yml
|
- include_tasks: external_ceph.yml
|
||||||
when:
|
when:
|
||||||
- not enable_ceph| bool
|
|
||||||
- not enable_ceph_mds| bool
|
|
||||||
- (enable_manila_backend_cephfs_native | bool) or (enable_manila_backend_cephfs_nfs | bool)
|
- (enable_manila_backend_cephfs_native | bool) or (enable_manila_backend_cephfs_nfs | bool)
|
||||||
- inventory_hostname in groups['manila-share']
|
- inventory_hostname in groups['manila-share']
|
||||||
|
|
||||||
|
@ -19,6 +19,5 @@
|
|||||||
|
|
||||||
- include_tasks: fix_cephfs_owner.yml
|
- include_tasks: fix_cephfs_owner.yml
|
||||||
when:
|
when:
|
||||||
- not enable_ceph | bool or enable_ceph_mds | bool
|
|
||||||
- enable_manila_backend_cephfs_native | bool
|
- enable_manila_backend_cephfs_native | bool
|
||||||
- inventory_hostname in groups['manila-share']
|
- inventory_hostname in groups['manila-share']
|
||||||
|
@ -66,33 +66,12 @@ nova_cell_services:
|
|||||||
dimensions: "{{ nova_compute_ironic_dimensions }}"
|
dimensions: "{{ nova_compute_ironic_dimensions }}"
|
||||||
|
|
||||||
####################
|
####################
|
||||||
# Ceph
|
# Ceph options
|
||||||
####################
|
####################
|
||||||
ceph_nova_pool_type: "{{ ceph_pool_type }}"
|
|
||||||
ceph_nova_cache_mode: "{{ ceph_cache_mode }}"
|
|
||||||
|
|
||||||
# Due to Ansible issues on include, you cannot override these variables. Please
|
|
||||||
# override the variables they reference instead.
|
|
||||||
nova_pool_name: "{{ ceph_nova_pool_name }}"
|
|
||||||
nova_pool_type: "{{ ceph_nova_pool_type }}"
|
|
||||||
nova_cache_mode: "{{ ceph_nova_cache_mode }}"
|
|
||||||
nova_pool_pg_num: "{{ ceph_pool_pg_num }}"
|
|
||||||
nova_pool_pgp_num: "{{ ceph_pool_pgp_num }}"
|
|
||||||
|
|
||||||
# Discard option for nova managed disks. Requires libvirt (1, 0, 6) or later and
|
# Discard option for nova managed disks. Requires libvirt (1, 0, 6) or later and
|
||||||
# qemu (1, 6, 0) or later. Set to "" to disable.
|
# qemu (1, 6, 0) or later. Set to "" to disable.
|
||||||
nova_hw_disk_discard: "unmap"
|
nova_hw_disk_discard: "unmap"
|
||||||
|
|
||||||
ceph_client_nova_keyring_caps:
|
|
||||||
mon: 'profile rbd'
|
|
||||||
osd: >-
|
|
||||||
profile rbd pool={{ ceph_cinder_pool_name }},
|
|
||||||
profile rbd pool={{ ceph_cinder_pool_name }}-cache,
|
|
||||||
profile rbd pool={{ ceph_nova_pool_name }},
|
|
||||||
profile rbd pool={{ ceph_nova_pool_name }}-cache,
|
|
||||||
profile rbd pool={{ ceph_glance_pool_name }},
|
|
||||||
profile rbd pool={{ ceph_glance_pool_name }}-cache
|
|
||||||
|
|
||||||
####################
|
####################
|
||||||
# Cells Options
|
# Cells Options
|
||||||
####################
|
####################
|
||||||
|
@ -1,119 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Ensuring config directory exists
|
|
||||||
file:
|
|
||||||
path: "{{ node_config_directory }}/{{ item }}"
|
|
||||||
state: "directory"
|
|
||||||
mode: "0770"
|
|
||||||
become: true
|
|
||||||
with_items:
|
|
||||||
- "nova-libvirt/secrets"
|
|
||||||
when: inventory_hostname in groups[nova_cell_compute_group]
|
|
||||||
|
|
||||||
- name: Copying over ceph.conf(s)
|
|
||||||
vars:
|
|
||||||
service_name: "{{ item }}"
|
|
||||||
merge_configs:
|
|
||||||
sources:
|
|
||||||
- "{{ role_path }}/../ceph/templates/ceph.conf.j2"
|
|
||||||
- "{{ node_custom_config }}/ceph.conf"
|
|
||||||
- "{{ node_custom_config }}/ceph/{{ inventory_hostname }}/ceph.conf"
|
|
||||||
dest: "{{ node_config_directory }}/{{ item }}/ceph.conf"
|
|
||||||
mode: "0660"
|
|
||||||
become: true
|
|
||||||
with_items:
|
|
||||||
- "nova-compute"
|
|
||||||
- "nova-libvirt"
|
|
||||||
when: inventory_hostname in groups[nova_cell_compute_group]
|
|
||||||
notify:
|
|
||||||
- Restart {{ item }} container
|
|
||||||
|
|
||||||
- include_tasks: ../../ceph_pools.yml
|
|
||||||
vars:
|
|
||||||
pool_name: "{{ nova_pool_name }}"
|
|
||||||
pool_type: "{{ nova_pool_type }}"
|
|
||||||
cache_mode: "{{ nova_cache_mode }}"
|
|
||||||
pool_pg_num: "{{ nova_pool_pg_num }}"
|
|
||||||
pool_pgp_num: "{{ nova_pool_pgp_num }}"
|
|
||||||
pool_application: "rbd"
|
|
||||||
|
|
||||||
- name: Pulling cephx keyring for nova
|
|
||||||
become: true
|
|
||||||
kolla_ceph_keyring:
|
|
||||||
name: client.nova
|
|
||||||
caps: "{{ ceph_client_nova_keyring_caps }}"
|
|
||||||
register: nova_cephx_key
|
|
||||||
delegate_to: "{{ groups['ceph-mon'][0] }}"
|
|
||||||
run_once: True
|
|
||||||
|
|
||||||
- name: Pulling cinder cephx keyring for libvirt
|
|
||||||
become: true
|
|
||||||
command: docker exec ceph_mon ceph auth get-key client.cinder
|
|
||||||
register: cinder_cephx_raw_key
|
|
||||||
delegate_to: "{{ groups['ceph-mon'][0] }}"
|
|
||||||
when:
|
|
||||||
- enable_cinder | bool
|
|
||||||
- cinder_backend_ceph | bool
|
|
||||||
changed_when: False
|
|
||||||
run_once: True
|
|
||||||
|
|
||||||
- name: Pushing cephx keyring for nova
|
|
||||||
copy:
|
|
||||||
content: |
|
|
||||||
[client.nova]
|
|
||||||
key = {{ nova_cephx_key.keyring.key }}
|
|
||||||
dest: "{{ node_config_directory }}/nova-compute/ceph.client.nova.keyring"
|
|
||||||
mode: "0600"
|
|
||||||
become: true
|
|
||||||
when: inventory_hostname in groups[nova_cell_compute_group]
|
|
||||||
notify:
|
|
||||||
- Restart nova-compute container
|
|
||||||
|
|
||||||
- name: Pushing secrets xml for libvirt
|
|
||||||
template:
|
|
||||||
src: "secret.xml.j2"
|
|
||||||
dest: "{{ node_config_directory }}/nova-libvirt/secrets/{{ item.uuid }}.xml"
|
|
||||||
mode: "0600"
|
|
||||||
become: true
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups[nova_cell_compute_group]
|
|
||||||
- item.enabled | bool
|
|
||||||
with_items:
|
|
||||||
- uuid: "{{ rbd_secret_uuid }}"
|
|
||||||
name: client.nova secret
|
|
||||||
enabled: true
|
|
||||||
- uuid: "{{ cinder_rbd_secret_uuid }}"
|
|
||||||
name: client.cinder secret
|
|
||||||
enabled: "{{ enable_cinder | bool and cinder_backend_ceph | bool }}"
|
|
||||||
notify:
|
|
||||||
- Restart nova-libvirt container
|
|
||||||
|
|
||||||
- name: Pushing secrets key for libvirt
|
|
||||||
copy:
|
|
||||||
content: "{{ item.content }}"
|
|
||||||
dest: "{{ node_config_directory }}/nova-libvirt/secrets/{{ item.uuid }}.base64"
|
|
||||||
mode: "0600"
|
|
||||||
become: true
|
|
||||||
when:
|
|
||||||
- inventory_hostname in groups[nova_cell_compute_group]
|
|
||||||
- item.enabled | bool
|
|
||||||
with_items:
|
|
||||||
- uuid: "{{ rbd_secret_uuid }}"
|
|
||||||
content: "{{ nova_cephx_key.keyring.key }}"
|
|
||||||
enabled: true
|
|
||||||
- uuid: "{{ cinder_rbd_secret_uuid }}"
|
|
||||||
content: "{{ cinder_cephx_raw_key.stdout|default('') }}"
|
|
||||||
enabled: "{{ enable_cinder | bool and cinder_backend_ceph | bool }}"
|
|
||||||
notify:
|
|
||||||
- Restart nova-libvirt container
|
|
||||||
|
|
||||||
- name: Ensuring config directory has correct owner and permission
|
|
||||||
become: true
|
|
||||||
file:
|
|
||||||
path: "{{ node_config_directory }}/{{ item }}"
|
|
||||||
recurse: yes
|
|
||||||
owner: "{{ config_owner_user }}"
|
|
||||||
group: "{{ config_owner_group }}"
|
|
||||||
with_items:
|
|
||||||
- "nova-compute"
|
|
||||||
- "nova-libvirt/secrets"
|
|
||||||
when: inventory_hostname in groups[nova_cell_compute_group]
|
|
@ -36,15 +36,9 @@
|
|||||||
- kolla_copy_ca_into_containers | bool
|
- kolla_copy_ca_into_containers | bool
|
||||||
with_dict: "{{ nova_cell_services }}"
|
with_dict: "{{ nova_cell_services }}"
|
||||||
|
|
||||||
- include_tasks: ceph.yml
|
|
||||||
when:
|
|
||||||
- enable_ceph | bool and nova_backend == "rbd"
|
|
||||||
- inventory_hostname in groups[nova_cell_conductor_group] or
|
|
||||||
inventory_hostname in groups[nova_cell_compute_group]
|
|
||||||
|
|
||||||
- include_tasks: external_ceph.yml
|
- include_tasks: external_ceph.yml
|
||||||
when:
|
when:
|
||||||
- not enable_ceph | bool and (nova_backend == "rbd" or cinder_backend_ceph | bool)
|
- (nova_backend == "rbd" or cinder_backend_ceph | bool)
|
||||||
- inventory_hostname in groups[nova_cell_compute_group]
|
- inventory_hostname in groups[nova_cell_compute_group]
|
||||||
|
|
||||||
- name: Check if policies shall be overwritten
|
- name: Check if policies shall be overwritten
|
||||||
|
@ -44,3 +44,10 @@
|
|||||||
- kolla_enable_tls_external | bool or kolla_enable_tls_internal | bool
|
- kolla_enable_tls_external | bool or kolla_enable_tls_internal | bool
|
||||||
- not (kolla_enable_tls_external | bool and kolla_enable_tls_internal | bool)
|
- not (kolla_enable_tls_external | bool and kolla_enable_tls_internal | bool)
|
||||||
- kolla_same_external_internal_vip | bool
|
- kolla_same_external_internal_vip | bool
|
||||||
|
|
||||||
|
- name: Validate that enable_ceph is disabled
|
||||||
|
run_once: True
|
||||||
|
fail:
|
||||||
|
msg: "We are sorry but enable_ceph is no longer supported. Please use external ceph support."
|
||||||
|
when:
|
||||||
|
- (enable_ceph | default()) | bool
|
||||||
|
@ -70,8 +70,8 @@ scrape_configs:
|
|||||||
honor_labels: true
|
honor_labels: true
|
||||||
static_configs:
|
static_configs:
|
||||||
- targets:
|
- targets:
|
||||||
{% for host in groups["ceph-mgr"] %}
|
{% for exporter in prometheus_ceph_mgr_exporter_endpoints %}
|
||||||
- '{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ hostvars[host]['prometheus_ceph_mgr_exporter_port'] }}'
|
- '{{ exporter }}'
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
|
@ -20,7 +20,6 @@
|
|||||||
- enable_barbican_{{ enable_barbican | bool }}
|
- enable_barbican_{{ enable_barbican | bool }}
|
||||||
- enable_blazar_{{ enable_blazar | bool }}
|
- enable_blazar_{{ enable_blazar | bool }}
|
||||||
- enable_ceilometer_{{ enable_ceilometer | bool }}
|
- enable_ceilometer_{{ enable_ceilometer | bool }}
|
||||||
- enable_ceph_{{ enable_ceph | bool }}
|
|
||||||
- enable_chrony_{{ enable_chrony | bool }}
|
- enable_chrony_{{ enable_chrony | bool }}
|
||||||
- enable_cinder_{{ enable_cinder | bool }}
|
- enable_cinder_{{ enable_cinder | bool }}
|
||||||
- enable_cloudkitty_{{ enable_cloudkitty | bool }}
|
- enable_cloudkitty_{{ enable_cloudkitty | bool }}
|
||||||
@ -133,11 +132,6 @@
|
|||||||
tasks_from: loadbalancer
|
tasks_from: loadbalancer
|
||||||
tags: blazar
|
tags: blazar
|
||||||
when: enable_blazar | bool
|
when: enable_blazar | bool
|
||||||
- include_role:
|
|
||||||
role: ceph
|
|
||||||
tasks_from: loadbalancer
|
|
||||||
tags: ceph
|
|
||||||
when: enable_ceph | bool
|
|
||||||
- include_role:
|
- include_role:
|
||||||
role: cinder
|
role: cinder
|
||||||
tasks_from: loadbalancer
|
tasks_from: loadbalancer
|
||||||
@ -590,22 +584,6 @@
|
|||||||
tags: keystone,
|
tags: keystone,
|
||||||
when: enable_keystone | bool }
|
when: enable_keystone | bool }
|
||||||
|
|
||||||
- name: Apply role ceph
|
|
||||||
gather_facts: false
|
|
||||||
hosts:
|
|
||||||
- ceph-mds
|
|
||||||
- ceph-mgr
|
|
||||||
- ceph-mon
|
|
||||||
- ceph-nfs
|
|
||||||
- ceph-osd
|
|
||||||
- ceph-rgw
|
|
||||||
- '&enable_ceph_True'
|
|
||||||
serial: '{{ kolla_serial|default("0") }}'
|
|
||||||
roles:
|
|
||||||
- { role: ceph,
|
|
||||||
tags: ceph,
|
|
||||||
when: enable_ceph | bool }
|
|
||||||
|
|
||||||
- name: Apply role kafka
|
- name: Apply role kafka
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
hosts:
|
hosts:
|
||||||
@ -656,7 +634,6 @@
|
|||||||
- name: Apply role glance
|
- name: Apply role glance
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
hosts:
|
hosts:
|
||||||
- ceph-mon
|
|
||||||
- glance-api
|
- glance-api
|
||||||
- '&enable_glance_True'
|
- '&enable_glance_True'
|
||||||
serial: '{{ kolla_serial|default("0") }}'
|
serial: '{{ kolla_serial|default("0") }}'
|
||||||
@ -682,7 +659,6 @@
|
|||||||
- name: Apply role cinder
|
- name: Apply role cinder
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
hosts:
|
hosts:
|
||||||
- ceph-mon
|
|
||||||
- cinder-api
|
- cinder-api
|
||||||
- cinder-backup
|
- cinder-backup
|
||||||
- cinder-scheduler
|
- cinder-scheduler
|
||||||
@ -897,7 +873,6 @@
|
|||||||
- name: Apply role manila
|
- name: Apply role manila
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
hosts:
|
hosts:
|
||||||
- ceph-mon
|
|
||||||
- manila-api
|
- manila-api
|
||||||
- manila-data
|
- manila-data
|
||||||
- manila-share
|
- manila-share
|
||||||
@ -912,7 +887,6 @@
|
|||||||
- name: Apply role gnocchi
|
- name: Apply role gnocchi
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
hosts:
|
hosts:
|
||||||
- ceph-mon
|
|
||||||
- gnocchi-api
|
- gnocchi-api
|
||||||
- gnocchi-metricd
|
- gnocchi-metricd
|
||||||
- gnocchi-statsd
|
- gnocchi-statsd
|
||||||
|
@ -25,7 +25,7 @@ A basic Kolla inventory consists of several types of nodes, known in Ansible as
|
|||||||
* Compute - Compute nodes for compute services. This is where guest VMs
|
* Compute - Compute nodes for compute services. This is where guest VMs
|
||||||
live.
|
live.
|
||||||
|
|
||||||
* Storage - Storage nodes, for cinder-volume, LVM or ceph-osd.
|
* Storage - Storage nodes for cinder-volume, LVM or Swift.
|
||||||
|
|
||||||
* Monitoring - Monitor nodes which host monitoring services.
|
* Monitoring - Monitor nodes which host monitoring services.
|
||||||
|
|
||||||
|
@ -39,10 +39,10 @@ Usage of glance file backend under shared filesystem:
|
|||||||
Ceph backend
|
Ceph backend
|
||||||
~~~~~~~~~~~~
|
~~~~~~~~~~~~
|
||||||
|
|
||||||
To make use of ``ceph`` backend in glance, simply enable ceph or external ceph.
|
To make use of ``ceph`` backend in glance, simply enable external ceph.
|
||||||
By default will enable backend ceph automatically.
|
By default will enable backend ceph automatically.
|
||||||
Please refer to :doc:`../storage/ceph-guide`
|
Please refer to :doc:`../storage/external-ceph-guide`
|
||||||
or :doc:`../storage/external-ceph-guide` on how to configure this backend.
|
on how to configure this backend.
|
||||||
|
|
||||||
To enable the ceph backend manually:
|
To enable the ceph backend manually:
|
||||||
|
|
||||||
|
@ -1,532 +0,0 @@
|
|||||||
.. _ceph-guide:
|
|
||||||
|
|
||||||
============================================
|
|
||||||
Ceph - Software Defined Storage (Deprecated)
|
|
||||||
============================================
|
|
||||||
|
|
||||||
.. warning::
|
|
||||||
Support for deploying Ceph via Kolla Ansible is deprecated. In a future
|
|
||||||
release support for deploying Ceph will be removed from Kolla Ansible. Prior
|
|
||||||
to this we will ensure a migration path to another tool such as `Ceph
|
|
||||||
Ansible <http://docs.ceph.com/ceph-ansible/master/>`__ is available. For new
|
|
||||||
deployments it is recommended to use another tool to deploy Ceph to avoid a
|
|
||||||
future migration. This can be integrated with OpenStack by following
|
|
||||||
:doc:`external-ceph-guide`.
|
|
||||||
|
|
||||||
.. note::
|
|
||||||
The out-of-the-box Ceph deployment requires 3 hosts with at least one block
|
|
||||||
device on each host that can be dedicated for sole use by Ceph.
|
|
||||||
|
|
||||||
However, with tweaks to the Ceph cluster you can deploy a **healthy** cluster
|
|
||||||
with a single host and a single block device.
|
|
||||||
|
|
||||||
Requirements
|
|
||||||
------------
|
|
||||||
|
|
||||||
* A minimum of 3 hosts for a vanilla deploy
|
|
||||||
* A minimum of 1 block device per host
|
|
||||||
|
|
||||||
Preparation
|
|
||||||
-----------
|
|
||||||
|
|
||||||
To prepare a disk for use as a
|
|
||||||
`Ceph OSD <http://docs.ceph.com/docs/master/man/8/ceph-osd/>`_ you must add a
|
|
||||||
special partition label to the disk. This partition label is how Kolla detects
|
|
||||||
the disks to format and bootstrap. Any disk with a matching partition label
|
|
||||||
will be reformatted so use caution.
|
|
||||||
|
|
||||||
Filestore
|
|
||||||
~~~~~~~~~
|
|
||||||
|
|
||||||
.. note::
|
|
||||||
|
|
||||||
From Rocky release - kolla-ansible by default creates Bluestore OSDs.
|
|
||||||
Please see Configuration section to change that behaviour.
|
|
||||||
|
|
||||||
To prepare a filestore OSD as a storage drive, execute the following
|
|
||||||
operations:
|
|
||||||
|
|
||||||
.. warning::
|
|
||||||
|
|
||||||
ALL DATA ON $DISK will be LOST! Where $DISK is /dev/sdb or something similar.
|
|
||||||
|
|
||||||
.. code-block:: console
|
|
||||||
|
|
||||||
parted $DISK -s -- mklabel gpt mkpart KOLLA_CEPH_OSD_BOOTSTRAP 1 -1
|
|
||||||
|
|
||||||
The following shows an example of using parted to configure ``/dev/sdb`` for
|
|
||||||
usage with Kolla.
|
|
||||||
|
|
||||||
.. code-block:: console
|
|
||||||
|
|
||||||
parted /dev/sdb -s -- mklabel gpt mkpart KOLLA_CEPH_OSD_BOOTSTRAP 1 -1
|
|
||||||
parted /dev/sdb print
|
|
||||||
Model: VMware, VMware Virtual S (scsi)
|
|
||||||
Disk /dev/sdb: 10.7GB
|
|
||||||
Sector size (logical/physical): 512B/512B
|
|
||||||
Partition Table: gpt
|
|
||||||
Number Start End Size File system Name Flags
|
|
||||||
1 1049kB 10.7GB 10.7GB KOLLA_CEPH_OSD_BOOTSTRAP
|
|
||||||
|
|
||||||
Bluestore
|
|
||||||
~~~~~~~~~
|
|
||||||
|
|
||||||
To prepare a bluestore OSD partition, execute the following operations:
|
|
||||||
|
|
||||||
.. code-block:: console
|
|
||||||
|
|
||||||
parted $DISK -s -- mklabel gpt mkpart KOLLA_CEPH_OSD_BOOTSTRAP_BS 1 -1
|
|
||||||
|
|
||||||
If only one device is offered, Kolla Ceph will create the bluestore OSD on the
|
|
||||||
device. Kolla Ceph will create two partitions for OSD and block separately.
|
|
||||||
|
|
||||||
If more than one devices are offered for one bluestore OSD, Kolla Ceph will
|
|
||||||
create partitions for block, block.wal and block.db according to the partition
|
|
||||||
labels.
|
|
||||||
|
|
||||||
To prepare a bluestore OSD block partition, execute the following operations:
|
|
||||||
|
|
||||||
.. code-block:: console
|
|
||||||
|
|
||||||
parted $DISK -s -- mklabel gpt mkpart KOLLA_CEPH_OSD_BOOTSTRAP_BS_FOO_B 1 -1
|
|
||||||
|
|
||||||
To prepare a bluestore OSD block.wal partition, execute the following
|
|
||||||
operations:
|
|
||||||
|
|
||||||
.. code-block:: console
|
|
||||||
|
|
||||||
parted $DISK -s -- mklabel gpt mkpart KOLLA_CEPH_OSD_BOOTSTRAP_BS_FOO_W 1 -1
|
|
||||||
|
|
||||||
To prepare a bluestore OSD block.db partition, execute the following
|
|
||||||
operations:
|
|
||||||
|
|
||||||
.. code-block:: console
|
|
||||||
|
|
||||||
parted $DISK -s -- mklabel gpt mkpart KOLLA_CEPH_OSD_BOOTSTRAP_BS_FOO_D 1 -1
|
|
||||||
|
|
||||||
Kolla Ceph will handle the bluestore OSD according to the above up to four
|
|
||||||
partition labels. In Ceph bluestore OSD, the block.wal and block.db partitions
|
|
||||||
are not mandatory.
|
|
||||||
|
|
||||||
.. note::
|
|
||||||
|
|
||||||
In the case there are more than one devices in one bluestore OSD and there
|
|
||||||
are more than one bluestore OSD in one node, it is required to use suffixes
|
|
||||||
(``_42``, ``_FOO``, ``_FOO42``, ..). Kolla Ceph will gather all the
|
|
||||||
partition labels and deploy bluestore OSD on top of the devices which have
|
|
||||||
the same suffix in the partition label.
|
|
||||||
|
|
||||||
|
|
||||||
Using an external journal drive
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
.. note::
|
|
||||||
|
|
||||||
The section is only meaningful for Ceph filestore OSD.
|
|
||||||
|
|
||||||
The steps documented above created a journal partition of 5 GByte
|
|
||||||
and a data partition with the remaining storage capacity on the same tagged
|
|
||||||
drive.
|
|
||||||
|
|
||||||
It is a common practice to place the journal of an OSD on a separate
|
|
||||||
journal drive. This section documents how to use an external journal drive.
|
|
||||||
|
|
||||||
Prepare the storage drive in the same way as documented above:
|
|
||||||
|
|
||||||
.. warning::
|
|
||||||
|
|
||||||
ALL DATA ON $DISK will be LOST! Where $DISK is /dev/sdb or something similar.
|
|
||||||
|
|
||||||
.. code-block:: console
|
|
||||||
|
|
||||||
parted $DISK -s -- mklabel gpt mkpart KOLLA_CEPH_OSD_BOOTSTRAP_FOO 1 -1
|
|
||||||
|
|
||||||
To prepare the journal external drive execute the following command:
|
|
||||||
|
|
||||||
.. code-block:: console
|
|
||||||
|
|
||||||
parted $DISK -s -- mklabel gpt mkpart KOLLA_CEPH_OSD_BOOTSTRAP_FOO_J 1 -1
|
|
||||||
|
|
||||||
.. note::
|
|
||||||
|
|
||||||
Use different suffixes (``_42``, ``_FOO``, ``_FOO42``, ..) to use different external
|
|
||||||
journal drives for different storage drives. One external journal drive can only
|
|
||||||
be used for one storage drive.
|
|
||||||
|
|
||||||
.. note::
|
|
||||||
|
|
||||||
The partition labels ``KOLLA_CEPH_OSD_BOOTSTRAP`` and ``KOLLA_CEPH_OSD_BOOTSTRAP_J``
|
|
||||||
are not working when using external journal drives. It is required to use
|
|
||||||
suffixes (``_42``, ``_FOO``, ``_FOO42``, ..). If you want to setup only one
|
|
||||||
storage drive with one external journal drive it is also necessary to use a suffix.
|
|
||||||
|
|
||||||
|
|
||||||
Configuration
|
|
||||||
-------------
|
|
||||||
|
|
||||||
Edit the ``[storage]`` group in the inventory which contains the hostname
|
|
||||||
of the hosts that have the block devices you have prepped as shown above.
|
|
||||||
|
|
||||||
.. code-block:: ini
|
|
||||||
|
|
||||||
[storage]
|
|
||||||
controller
|
|
||||||
compute1
|
|
||||||
|
|
||||||
Enable Ceph in ``/etc/kolla/globals.yml``:
|
|
||||||
|
|
||||||
.. code-block:: yaml
|
|
||||||
|
|
||||||
enable_ceph: "yes"
|
|
||||||
|
|
||||||
Ceph RADOS Gateway
|
|
||||||
~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
RadosGW is optional, enable it in ``/etc/kolla/globals.yml``:
|
|
||||||
|
|
||||||
.. code-block:: yaml
|
|
||||||
|
|
||||||
enable_ceph_rgw: "yes"
|
|
||||||
|
|
||||||
You can enable RadosGW to be registered as Swift in Keystone catalog:
|
|
||||||
|
|
||||||
.. code-block:: yaml
|
|
||||||
|
|
||||||
enable_ceph_rgw_keystone: "yes"
|
|
||||||
|
|
||||||
.. note::
|
|
||||||
|
|
||||||
By default RadosGW supports both Swift and S3 API, and it is not
|
|
||||||
completely compatible with Swift API. The option `ceph_rgw_compatibility`
|
|
||||||
in ``/etc/kolla/globals.yml`` can enable/disable the RadosGW
|
|
||||||
compatibility with Swift API completely. After changing the value, run the
|
|
||||||
"reconfigure“ command to enable.
|
|
||||||
|
|
||||||
RGW requires a healthy cluster in order to be successfully deployed. On initial
|
|
||||||
start up, RGW will create several pools. The first pool should be in an
|
|
||||||
operational state to proceed with the second one, and so on. So, in the case of
|
|
||||||
an **all-in-one** deployment, it is necessary to change the default number of
|
|
||||||
copies for the pools before deployment. Modify the file
|
|
||||||
``/etc/kolla/config/ceph.conf`` and add the contents:
|
|
||||||
|
|
||||||
.. path /etc/kolla/config/ceph.conf
|
|
||||||
.. code-block:: ini
|
|
||||||
|
|
||||||
[global]
|
|
||||||
osd pool default size = 1
|
|
||||||
osd pool default min size = 1
|
|
||||||
|
|
||||||
NFS
|
|
||||||
~~~
|
|
||||||
|
|
||||||
NFS is an optional feature, you can enable it in ``/etc/kolla/globals.yml``:
|
|
||||||
|
|
||||||
.. code-block:: yaml
|
|
||||||
|
|
||||||
enable_ceph_nfs: "yes"
|
|
||||||
|
|
||||||
.. note::
|
|
||||||
|
|
||||||
If you are using Ubuntu, please enable Ceph NFS before using
|
|
||||||
``kolla-ansible bootstrap-servers`` command - it will install required rpcbind
|
|
||||||
package.
|
|
||||||
|
|
||||||
Store type
|
|
||||||
~~~~~~~~~~
|
|
||||||
|
|
||||||
Configure the Ceph store type in ``/etc/kolla/globals.yml``, the default
|
|
||||||
value is ``bluestore`` in Rocky:
|
|
||||||
|
|
||||||
.. code-block:: yaml
|
|
||||||
|
|
||||||
ceph_osd_store_type: "bluestore"
|
|
||||||
|
|
||||||
Recommendations
|
|
||||||
---------------
|
|
||||||
|
|
||||||
Placement groups
|
|
||||||
~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
Regarding number of placement groups (PGs)
|
|
||||||
|
|
||||||
Kolla sets very conservative values for the number of PGs per pool
|
|
||||||
(`ceph_pool_pg_num` and `ceph_pool_pgp_num`). This is in order to ensure
|
|
||||||
the majority of users will be able to deploy Ceph out of the box. It is
|
|
||||||
*highly* recommended to consult the official Ceph documentation regarding
|
|
||||||
these values before running Ceph in any kind of production scenario.
|
|
||||||
|
|
||||||
Cluster Network
|
|
||||||
~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
To build a high performance and secure Ceph Storage Cluster, the Ceph community
|
|
||||||
recommend the use of two separate networks: public network and cluster network.
|
|
||||||
Edit the ``/etc/kolla/globals.yml`` and configure the ``cluster_interface``:
|
|
||||||
|
|
||||||
.. path /etc/kolla/globals.yml
|
|
||||||
.. code-block:: yaml
|
|
||||||
|
|
||||||
cluster_interface: "eth2"
|
|
||||||
|
|
||||||
For more details, see `NETWORK CONFIGURATION REFERENCE
|
|
||||||
<http://docs.ceph.com/docs/master/rados/configuration/network-config-ref/#ceph-networks>`_
|
|
||||||
of Ceph Documentation.
|
|
||||||
|
|
||||||
Deployment
|
|
||||||
----------
|
|
||||||
|
|
||||||
Finally deploy the Ceph-enabled OpenStack:
|
|
||||||
|
|
||||||
.. code-block:: console
|
|
||||||
|
|
||||||
kolla-ansible deploy -i path/to/inventory
|
|
||||||
|
|
||||||
.. note::
|
|
||||||
|
|
||||||
Kolla Ceph supports mixed Ceph OSD deployment, i.e. some Ceph OSDs are
|
|
||||||
bluestore, the others are filestore. The ``ceph_osd_store_type`` of each
|
|
||||||
Ceph OSD can be configured under ``[storage]`` in the multinode inventory
|
|
||||||
file. The Ceph OSD store type is unique in one storage node. For example:
|
|
||||||
|
|
||||||
.. code-block:: ini
|
|
||||||
|
|
||||||
[storage]
|
|
||||||
storage_node1_hostname ceph_osd_store_type=bluestore
|
|
||||||
storage_node2_hostname ceph_osd_store_type=bluestore
|
|
||||||
storage_node3_hostname ceph_osd_store_type=filestore
|
|
||||||
storage_node4_hostname ceph_osd_store_type=filestore
|
|
||||||
|
|
||||||
Using Cache Tiering
|
|
||||||
-------------------
|
|
||||||
|
|
||||||
An optional `cache tiering <http://docs.ceph.com/docs/jewel/rados/operations/cache-tiering/>`_
|
|
||||||
can be deployed by formatting at least one cache device and enabling cache.
|
|
||||||
tiering in the globals.yml configuration file.
|
|
||||||
|
|
||||||
To prepare a filestore OSD as a cache device, execute the following
|
|
||||||
operations:
|
|
||||||
|
|
||||||
.. code-block:: console
|
|
||||||
|
|
||||||
parted $DISK -s -- mklabel gpt mkpart KOLLA_CEPH_OSD_CACHE_BOOTSTRAP 1 -1
|
|
||||||
|
|
||||||
.. note::
|
|
||||||
|
|
||||||
To prepare a bluestore OSD as a cache device, change the partition name in
|
|
||||||
the above command to "KOLLA_CEPH_OSD_CACHE_BOOTSTRAP_BS". The deployment of
|
|
||||||
bluestore cache OSD is the same as bluestore OSD.
|
|
||||||
|
|
||||||
Enable the Ceph cache tier in ``/etc/kolla/globals.yml``:
|
|
||||||
|
|
||||||
.. code-block:: yaml
|
|
||||||
|
|
||||||
enable_ceph: "yes"
|
|
||||||
ceph_enable_cache: "yes"
|
|
||||||
# Valid options are [ forward, none, writeback ]
|
|
||||||
ceph_cache_mode: "writeback"
|
|
||||||
|
|
||||||
After this run the playbooks as you normally would, for example:
|
|
||||||
|
|
||||||
.. code-block:: console
|
|
||||||
|
|
||||||
kolla-ansible deploy -i path/to/inventory
|
|
||||||
|
|
||||||
Setting up an Erasure Coded Pool
|
|
||||||
--------------------------------
|
|
||||||
|
|
||||||
`Erasure code <http://docs.ceph.com/docs/jewel/rados/operations/erasure-code/>`_
|
|
||||||
is the new big thing from Ceph. Kolla has the ability to setup your Ceph pools
|
|
||||||
as erasure coded pools. Due to technical limitations with Ceph, using erasure
|
|
||||||
coded pools as OpenStack uses them requires a cache tier. Additionally, you
|
|
||||||
must make the choice to use an erasure coded pool or a replicated pool
|
|
||||||
(the default) when you initially deploy. You cannot change this without
|
|
||||||
completely removing the pool and recreating it.
|
|
||||||
|
|
||||||
To enable erasure coded pools add the following options to your
|
|
||||||
``/etc/kolla/globals.yml`` configuration file:
|
|
||||||
|
|
||||||
.. code-block:: yaml
|
|
||||||
|
|
||||||
# A requirement for using the erasure-coded pools is you must setup a cache tier
|
|
||||||
# Valid options are [ erasure, replicated ]
|
|
||||||
ceph_pool_type: "erasure"
|
|
||||||
# Optionally, you can change the profile
|
|
||||||
#ceph_erasure_profile: "k=4 m=2 ruleset-failure-domain=host"
|
|
||||||
|
|
||||||
Managing Ceph
|
|
||||||
-------------
|
|
||||||
|
|
||||||
Check the Ceph status for more diagnostic information. The sample output below
|
|
||||||
indicates a healthy cluster:
|
|
||||||
|
|
||||||
.. code-block:: console
|
|
||||||
|
|
||||||
docker exec ceph_mon ceph -s
|
|
||||||
|
|
||||||
cluster:
|
|
||||||
id: f2ed6c00-c043-4e1c-81b6-07c512db26b1
|
|
||||||
health: HEALTH_OK
|
|
||||||
|
|
||||||
services:
|
|
||||||
mon: 1 daemons, quorum 172.16.31.121
|
|
||||||
mgr: poc12-01(active)
|
|
||||||
osd: 4 osds: 4 up, 4 in; 5 remapped pgs
|
|
||||||
|
|
||||||
data:
|
|
||||||
pools: 4 pools, 512 pgs
|
|
||||||
objects: 0 objects, 0 bytes
|
|
||||||
usage: 432 MB used, 60963 MB / 61395 MB avail
|
|
||||||
pgs: 512 active+clean
|
|
||||||
|
|
||||||
If Ceph is run in an **all-in-one** deployment or with less than three storage
|
|
||||||
nodes, further configuration is required. It is necessary to change the default
|
|
||||||
number of copies for the pool. The following example demonstrates how to change
|
|
||||||
the number of copies for the pool to 1:
|
|
||||||
|
|
||||||
.. code-block:: console
|
|
||||||
|
|
||||||
docker exec ceph_mon ceph osd pool set rbd size 1
|
|
||||||
|
|
||||||
All the pools must be modified if Glance, Nova, and Cinder have been deployed.
|
|
||||||
An example of modifying the pools to have 2 copies:
|
|
||||||
|
|
||||||
.. code-block:: console
|
|
||||||
|
|
||||||
for p in images vms volumes backups; do docker exec ceph_mon ceph osd pool set ${p} size 2; done
|
|
||||||
|
|
||||||
If using a cache tier, these changes must be made as well:
|
|
||||||
|
|
||||||
.. code-block:: console
|
|
||||||
|
|
||||||
for p in images vms volumes backups; do docker exec ceph_mon ceph osd pool set ${p}-cache size 2; done
|
|
||||||
|
|
||||||
The default pool Ceph creates is named **rbd**. It is safe to remove this pool:
|
|
||||||
|
|
||||||
.. code-block:: console
|
|
||||||
|
|
||||||
docker exec ceph_mon ceph osd pool delete rbd rbd --yes-i-really-really-mean-it
|
|
||||||
|
|
||||||
Troubleshooting
|
|
||||||
---------------
|
|
||||||
|
|
||||||
Deploy fails with 'Fetching Ceph keyrings ... No JSON object could be decoded'
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
If an initial deploy of Ceph fails, perhaps due to improper configuration or
|
|
||||||
similar, the cluster will be partially formed and will need to be reset for a
|
|
||||||
successful deploy.
|
|
||||||
|
|
||||||
In order to do this the operator should remove the `ceph_mon_config` volume
|
|
||||||
from each Ceph monitor node:
|
|
||||||
|
|
||||||
.. code-block:: console
|
|
||||||
|
|
||||||
ansible -i ansible/inventory/multinode \
|
|
||||||
-a 'docker volume rm ceph_mon_config' \
|
|
||||||
ceph-mon
|
|
||||||
|
|
||||||
Simple 3 Node Example
|
|
||||||
---------------------
|
|
||||||
|
|
||||||
This example will show how to deploy Ceph in a very simple setup using 3
|
|
||||||
storage nodes. 2 of those nodes (kolla1 and kolla2) will also provide other
|
|
||||||
services like control, network, compute, and monitoring. The 3rd
|
|
||||||
(kolla3) node will only act as a storage node.
|
|
||||||
|
|
||||||
This example will only focus on the Ceph aspect of the deployment and assumes
|
|
||||||
that you can already deploy a fully functional environment using 2 nodes that
|
|
||||||
does not employ Ceph yet. So we will be adding to the existing multinode
|
|
||||||
inventory file you already have.
|
|
||||||
|
|
||||||
Each of the 3 nodes are assumed to have two disk, ``/dev/sda`` (40GB)
|
|
||||||
and ``/dev/sdb`` (10GB). Size is not all that important... but for now make
|
|
||||||
sure each sdb disk are of the same size and are at least 10GB. This example
|
|
||||||
will use a single disk (/dev/sdb) for both Ceph data and journal. It will not
|
|
||||||
implement caching.
|
|
||||||
|
|
||||||
Here is the top part of the multinode inventory file used in the example
|
|
||||||
environment before adding the 3rd node for Ceph:
|
|
||||||
|
|
||||||
.. code-block:: ini
|
|
||||||
|
|
||||||
[control]
|
|
||||||
# These hostname must be resolvable from your deployment host
|
|
||||||
kolla1.ducourrier.com
|
|
||||||
kolla2.ducourrier.com
|
|
||||||
|
|
||||||
[network]
|
|
||||||
kolla1.ducourrier.com
|
|
||||||
kolla2.ducourrier.com
|
|
||||||
|
|
||||||
[compute]
|
|
||||||
kolla1.ducourrier.com
|
|
||||||
kolla2.ducourrier.com
|
|
||||||
|
|
||||||
[monitoring]
|
|
||||||
kolla1.ducourrier.com
|
|
||||||
kolla2.ducourrier.com
|
|
||||||
|
|
||||||
[storage]
|
|
||||||
kolla1.ducourrier.com
|
|
||||||
kolla2.ducourrier.com
|
|
||||||
|
|
||||||
Configuration
|
|
||||||
~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
To prepare the 2nd disk (/dev/sdb) of each nodes for use by Ceph you will need
|
|
||||||
to add a partition label to it as shown below:
|
|
||||||
|
|
||||||
.. code-block:: console
|
|
||||||
|
|
||||||
parted /dev/sdb -s -- mklabel gpt mkpart KOLLA_CEPH_OSD_BOOTSTRAP 1 -1
|
|
||||||
|
|
||||||
Make sure to run this command on each of the 3 nodes or the deployment will
|
|
||||||
fail.
|
|
||||||
|
|
||||||
Next, edit the multinode inventory file and make sure the 3 nodes are listed
|
|
||||||
under ``[storage]``. In this example I will add kolla3.ducourrier.com to the
|
|
||||||
existing inventory file:
|
|
||||||
|
|
||||||
.. code-block:: ini
|
|
||||||
|
|
||||||
[control]
|
|
||||||
# These hostname must be resolvable from your deployment host
|
|
||||||
kolla1.ducourrier.com
|
|
||||||
kolla2.ducourrier.com
|
|
||||||
|
|
||||||
[network]
|
|
||||||
kolla1.ducourrier.com
|
|
||||||
kolla2.ducourrier.com
|
|
||||||
|
|
||||||
[compute]
|
|
||||||
kolla1.ducourrier.com
|
|
||||||
kolla2.ducourrier.com
|
|
||||||
|
|
||||||
[monitoring]
|
|
||||||
kolla1.ducourrier.com
|
|
||||||
kolla2.ducourrier.com
|
|
||||||
|
|
||||||
[storage]
|
|
||||||
kolla1.ducourrier.com
|
|
||||||
kolla2.ducourrier.com
|
|
||||||
kolla3.ducourrier.com
|
|
||||||
|
|
||||||
It is now time to enable Ceph in the environment by editing the
|
|
||||||
``/etc/kolla/globals.yml`` file:
|
|
||||||
|
|
||||||
.. code-block:: yaml
|
|
||||||
|
|
||||||
enable_ceph: "yes"
|
|
||||||
enable_ceph_rgw: "yes"
|
|
||||||
enable_cinder: "yes"
|
|
||||||
glance_backend_file: "no"
|
|
||||||
glance_backend_ceph: "yes"
|
|
||||||
|
|
||||||
Deployment
|
|
||||||
~~~~~~~~~~
|
|
||||||
|
|
||||||
Finally deploy the Ceph-enabled configuration:
|
|
||||||
|
|
||||||
.. code-block:: console
|
|
||||||
|
|
||||||
kolla-ansible deploy -i path/to/inventory-file
|
|
||||||
|
|
@ -22,17 +22,8 @@ creating the pool and keyrings with appropriate permissions for each service.
|
|||||||
Enabling External Ceph
|
Enabling External Ceph
|
||||||
~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Using external Ceph with Kolla means not to deploy Ceph via Kolla. Therefore,
|
To activate external Ceph integration you need to enable Ceph backend.
|
||||||
disable Ceph deployment in ``/etc/kolla/globals.yml``
|
This can be done individually per service in ``/etc/kolla/globals.yml``:
|
||||||
|
|
||||||
.. code-block:: yaml
|
|
||||||
|
|
||||||
enable_ceph: "no"
|
|
||||||
|
|
||||||
There are flags indicating individual services to use ceph or not which default
|
|
||||||
to the value of ``enable_ceph``. Those flags now need to be activated in order
|
|
||||||
to activate external Ceph integration. This can be done individually per
|
|
||||||
service in ``/etc/kolla/globals.yml``:
|
|
||||||
|
|
||||||
.. code-block:: yaml
|
.. code-block:: yaml
|
||||||
|
|
||||||
@ -42,9 +33,6 @@ service in ``/etc/kolla/globals.yml``:
|
|||||||
gnocchi_backend_storage: "ceph"
|
gnocchi_backend_storage: "ceph"
|
||||||
enable_manila_backend_cephfs_native: "yes"
|
enable_manila_backend_cephfs_native: "yes"
|
||||||
|
|
||||||
The combination of ``enable_ceph: "no"`` and ``<service>_backend_ceph: "yes"``
|
|
||||||
triggers the activation of external ceph mechanism in Kolla.
|
|
||||||
|
|
||||||
Edit the Inventory File
|
Edit the Inventory File
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
@ -8,7 +8,6 @@ supported by kolla.
|
|||||||
.. toctree::
|
.. toctree::
|
||||||
:maxdepth: 1
|
:maxdepth: 1
|
||||||
|
|
||||||
ceph-guide
|
|
||||||
external-ceph-guide
|
external-ceph-guide
|
||||||
cinder-guide
|
cinder-guide
|
||||||
cinder-guide-hnas
|
cinder-guide-hnas
|
||||||
|
@ -35,12 +35,11 @@ services are properly working.
|
|||||||
Preparation and Deployment
|
Preparation and Deployment
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Cinder and Ceph are required, enable it in ``/etc/kolla/globals.yml``:
|
Cinder is required, enable it in ``/etc/kolla/globals.yml``:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
enable_cinder: "yes"
|
enable_cinder: "yes"
|
||||||
enable_ceph: "yes"
|
|
||||||
|
|
||||||
Enable Manila and generic back end in ``/etc/kolla/globals.yml``:
|
Enable Manila and generic back end in ``/etc/kolla/globals.yml``:
|
||||||
|
|
||||||
|
@ -388,7 +388,7 @@ There are a few options that are required to deploy Kolla-Ansible:
|
|||||||
|
|
||||||
.. note::
|
.. note::
|
||||||
|
|
||||||
This only affects OpenStack services. Infrastructure services like Ceph are
|
This only affects OpenStack services. Infrastructure services are
|
||||||
always "binary".
|
always "binary".
|
||||||
|
|
||||||
.. note::
|
.. note::
|
||||||
|
@ -232,11 +232,6 @@
|
|||||||
#enable_ceilometer_ipmi: "no"
|
#enable_ceilometer_ipmi: "no"
|
||||||
#enable_cells: "no"
|
#enable_cells: "no"
|
||||||
#enable_central_logging: "no"
|
#enable_central_logging: "no"
|
||||||
#enable_ceph: "no"
|
|
||||||
#enable_ceph_mds: "no"
|
|
||||||
#enable_ceph_rgw: "no"
|
|
||||||
#enable_ceph_nfs: "no"
|
|
||||||
#enable_ceph_dashboard: "{{ enable_ceph | bool }}"
|
|
||||||
#enable_chrony: "yes"
|
#enable_chrony: "yes"
|
||||||
#enable_cinder: "no"
|
#enable_cinder: "no"
|
||||||
#enable_cinder_backup: "yes"
|
#enable_cinder_backup: "yes"
|
||||||
@ -367,37 +362,34 @@
|
|||||||
# to configure IPv6 in RabbitMQ server.
|
# to configure IPv6 in RabbitMQ server.
|
||||||
#rabbitmq_server_additional_erl_args: ""
|
#rabbitmq_server_additional_erl_args: ""
|
||||||
|
|
||||||
##############
|
#######################
|
||||||
# Ceph options
|
# External Ceph options
|
||||||
##############
|
#######################
|
||||||
# Ceph can be setup with a caching to improve performance. To use the cache you
|
# External Ceph - cephx auth enabled (this is the standard nowadays, defaults to yes)
|
||||||
# must provide separate disks than those for the OSDs
|
|
||||||
#ceph_enable_cache: "no"
|
|
||||||
|
|
||||||
# Set to no if using external Ceph without cephx.
|
|
||||||
#external_ceph_cephx_enabled: "yes"
|
#external_ceph_cephx_enabled: "yes"
|
||||||
|
|
||||||
# Ceph is not able to determine the size of a cache pool automatically,
|
# Glance
|
||||||
# so the configuration on the absolute size is required here, otherwise the flush/evict will not work.
|
#ceph_glance_keyring: "ceph.client.glance.keyring"
|
||||||
#ceph_target_max_bytes: ""
|
#ceph_glance_user: "glance"
|
||||||
#ceph_target_max_objects: ""
|
#ceph_glance_pool_name: "images"
|
||||||
|
# Cinder
|
||||||
# Valid options are [ forward, none, writeback ]
|
#ceph_cinder_keyring: "ceph.client.cinder.keyring"
|
||||||
#ceph_cache_mode: "writeback"
|
#ceph_cinder_user: "cinder"
|
||||||
|
#ceph_cinder_pool_name: "volumes"
|
||||||
# A requirement for using the erasure-coded pools is you must setup a cache tier
|
#ceph_cinder_backup_keyring: "ceph.client.cinder-backup.keyring"
|
||||||
# Valid options are [ erasure, replicated ]
|
#ceph_cinder_backup_user: "cinder-backup"
|
||||||
#ceph_pool_type: "replicated"
|
#ceph_cinder_backup_pool_name: "backups"
|
||||||
|
# Nova
|
||||||
# Integrate ceph rados object gateway with openstack keystone
|
#ceph_nova_keyring: "{{ ceph_cinder_keyring }}"
|
||||||
#enable_ceph_rgw_keystone: "no"
|
#ceph_nova_user: "nova"
|
||||||
|
#ceph_nova_pool_name: "vms"
|
||||||
# Set the pgs and pgps for pool
|
# Gnocchi
|
||||||
# WARNING! These values are dependant on the size and shape of your cluster -
|
#ceph_gnocchi_keyring: "ceph.client.gnocchi.keyring"
|
||||||
# the default values are not suitable for production use. Please refer to the
|
#ceph_gnocchi_user: "gnocchi"
|
||||||
# Kolla Ceph documentation for more information.
|
#ceph_gnocchi_pool_name: "gnocchi"
|
||||||
#ceph_pool_pg_num: 8
|
# Manila
|
||||||
#ceph_pool_pgp_num: 8
|
#ceph_manila_keyring: "ceph.client.manila.keyring"
|
||||||
|
#ceph_manila_user: "manila"
|
||||||
|
|
||||||
#############################
|
#############################
|
||||||
# Keystone - Identity Options
|
# Keystone - Identity Options
|
||||||
@ -455,7 +447,7 @@
|
|||||||
# Gnocchi options
|
# Gnocchi options
|
||||||
#################
|
#################
|
||||||
# Valid options are [ file, ceph ]
|
# Valid options are [ file, ceph ]
|
||||||
#gnocchi_backend_storage: "{{ 'ceph' if enable_ceph|bool else 'file' }}"
|
#gnocchi_backend_storage: "file"
|
||||||
|
|
||||||
# Valid options are [redis, '']
|
# Valid options are [redis, '']
|
||||||
#gnocchi_incoming_storage: "{{ 'redis' if enable_redis | bool else '' }}"
|
#gnocchi_incoming_storage: "{{ 'redis' if enable_redis | bool else '' }}"
|
||||||
@ -464,7 +456,7 @@
|
|||||||
# Cinder - Block Storage Options
|
# Cinder - Block Storage Options
|
||||||
################################
|
################################
|
||||||
# Enable / disable Cinder backends
|
# Enable / disable Cinder backends
|
||||||
#cinder_backend_ceph: "{{ enable_ceph }}"
|
#cinder_backend_ceph: "no"
|
||||||
#cinder_backend_vmwarevc_vmdk: "no"
|
#cinder_backend_vmwarevc_vmdk: "no"
|
||||||
#cinder_volume_group: "cinder-volumes"
|
#cinder_volume_group: "cinder-volumes"
|
||||||
# Valid options are [ '', redis, etcd ]
|
# Valid options are [ '', redis, etcd ]
|
||||||
@ -499,7 +491,7 @@
|
|||||||
########################
|
########################
|
||||||
# Nova - Compute Options
|
# Nova - Compute Options
|
||||||
########################
|
########################
|
||||||
#nova_backend_ceph: "{{ enable_ceph }}"
|
#nova_backend_ceph: "no"
|
||||||
|
|
||||||
# Valid options are [ qemu, kvm, vmware, xenapi ]
|
# Valid options are [ qemu, kvm, vmware, xenapi ]
|
||||||
#nova_compute_virt_type: "kvm"
|
#nova_compute_virt_type: "kvm"
|
||||||
@ -632,7 +624,7 @@
|
|||||||
#enable_prometheus_cadvisor: "{{ enable_prometheus | bool }}"
|
#enable_prometheus_cadvisor: "{{ enable_prometheus | bool }}"
|
||||||
#enable_prometheus_memcached: "{{ enable_prometheus | bool }}"
|
#enable_prometheus_memcached: "{{ enable_prometheus | bool }}"
|
||||||
#enable_prometheus_alertmanager: "{{ enable_prometheus | bool }}"
|
#enable_prometheus_alertmanager: "{{ enable_prometheus | bool }}"
|
||||||
#enable_prometheus_ceph_mgr_exporter: "{{ enable_prometheus | bool and enable_ceph | bool }}"
|
#enable_prometheus_ceph_mgr_exporter: "no"
|
||||||
#enable_prometheus_openstack_exporter: "{{ enable_prometheus | bool }}"
|
#enable_prometheus_openstack_exporter: "{{ enable_prometheus | bool }}"
|
||||||
#enable_prometheus_elasticsearch_exporter: "{{ enable_prometheus | bool and enable_elasticsearch | bool }}"
|
#enable_prometheus_elasticsearch_exporter: "{{ enable_prometheus | bool and enable_elasticsearch | bool }}"
|
||||||
#enable_prometheus_blackbox_exporter: "{{ enable_prometheus | bool }}"
|
#enable_prometheus_blackbox_exporter: "{{ enable_prometheus | bool }}"
|
||||||
@ -640,6 +632,12 @@
|
|||||||
# List of extra parameters passed to prometheus. You can add as many to the list.
|
# List of extra parameters passed to prometheus. You can add as many to the list.
|
||||||
#prometheus_cmdline_extras:
|
#prometheus_cmdline_extras:
|
||||||
|
|
||||||
|
# Example of setting endpoints for prometheus ceph mgr exporter.
|
||||||
|
# You should add all ceph mgr's in your external ceph deployment.
|
||||||
|
#prometheus_ceph_mgr_exporter_endpoints:
|
||||||
|
# - host1:port1
|
||||||
|
# - host2:port2
|
||||||
|
|
||||||
#########
|
#########
|
||||||
# Freezer
|
# Freezer
|
||||||
#########
|
#########
|
||||||
|
@ -1,11 +1,9 @@
|
|||||||
---
|
---
|
||||||
###################
|
###################
|
||||||
# Ceph options
|
# External Ceph options
|
||||||
####################
|
####################
|
||||||
# These options must be UUID4 values in string format
|
# These options must be UUID4 values in string format
|
||||||
# XXXXXXXX-XXXX-4XXX-XXXX-XXXXXXXXXXXX
|
# XXXXXXXX-XXXX-4XXX-XXXX-XXXXXXXXXXXX
|
||||||
ceph_cluster_fsid:
|
|
||||||
ceph_rgw_keystone_password:
|
|
||||||
# for backward compatible consideration, rbd_secret_uuid is only used for nova,
|
# for backward compatible consideration, rbd_secret_uuid is only used for nova,
|
||||||
# cinder_rbd_secret_uuid is used for cinder
|
# cinder_rbd_secret_uuid is used for cinder
|
||||||
rbd_secret_uuid:
|
rbd_secret_uuid:
|
||||||
|
@ -103,8 +103,7 @@ def main():
|
|||||||
passwords_file = os.path.expanduser(args.passwords)
|
passwords_file = os.path.expanduser(args.passwords)
|
||||||
|
|
||||||
# These keys should be random uuids
|
# These keys should be random uuids
|
||||||
uuid_keys = ['ceph_cluster_fsid',
|
uuid_keys = ['rbd_secret_uuid',
|
||||||
'rbd_secret_uuid',
|
|
||||||
'cinder_rbd_secret_uuid',
|
'cinder_rbd_secret_uuid',
|
||||||
'gnocchi_project_id',
|
'gnocchi_project_id',
|
||||||
'gnocchi_resource_id',
|
'gnocchi_resource_id',
|
||||||
|
@ -0,0 +1,7 @@
|
|||||||
|
---
|
||||||
|
upgrade:
|
||||||
|
- |
|
||||||
|
Support for deploying Ceph has been removed, after it was deprecated in
|
||||||
|
Stein. Please use an external tool to deploy Ceph and integrate it with
|
||||||
|
Kolla-Ansible deployed OpenStack by following the `external Ceph guide
|
||||||
|
<https://docs.openstack.org/kolla-ansible/latest/reference/storage/external-ceph-guide.html>`__.
|
@ -77,20 +77,14 @@ copy_logs() {
|
|||||||
# docker related information
|
# docker related information
|
||||||
(docker info && docker images && docker ps -a && docker network ls && docker inspect $(docker ps -aq)) > ${LOG_DIR}/system_logs/docker-info.txt
|
(docker info && docker images && docker ps -a && docker network ls && docker inspect $(docker ps -aq)) > ${LOG_DIR}/system_logs/docker-info.txt
|
||||||
|
|
||||||
# ceph related logs
|
# ceph-ansible related logs
|
||||||
# NOTE(mnasiadka): regex to match both ceph_mon and ceph-mon-$hostname
|
mkdir -p ${LOG_DIR}/ceph
|
||||||
for container in $(docker ps --filter name=ceph.\?mon --format "{{.Names}}"); do
|
for container in $(docker ps --filter name=ceph-mon --format "{{.Names}}"); do
|
||||||
if [ $container == "ceph_mon" ]; then
|
docker exec ${container} ceph --connect-timeout 5 -s > ${LOG_DIR}/ceph/ceph_s.txt
|
||||||
CEPH_LOG_DIR="${LOG_DIR}/kolla/ceph"
|
|
||||||
else
|
|
||||||
CEPH_LOG_DIR="${LOG_DIR}/ceph"
|
|
||||||
mkdir -p ${CEPH_LOG_DIR}
|
|
||||||
fi
|
|
||||||
docker exec ${container} ceph --connect-timeout 5 -s > ${CEPH_LOG_DIR}/ceph_s.txt
|
|
||||||
# NOTE(yoctozepto): osd df removed on purpose to avoid CI POST_FAILURE due to a possible hang:
|
# NOTE(yoctozepto): osd df removed on purpose to avoid CI POST_FAILURE due to a possible hang:
|
||||||
# as of ceph mimic it hangs when MON is operational but MGR not
|
# as of ceph mimic it hangs when MON is operational but MGR not
|
||||||
# its usefulness is mediocre and having POST_FAILUREs is bad
|
# its usefulness is mediocre and having POST_FAILUREs is bad
|
||||||
docker exec ${container} ceph --connect-timeout 5 osd tree > ${CEPH_LOG_DIR}/ceph_osd_tree.txt
|
docker exec ${container} ceph --connect-timeout 5 osd tree > ${LOG_DIR}/ceph/ceph_osd_tree.txt
|
||||||
done
|
done
|
||||||
|
|
||||||
# bifrost related logs
|
# bifrost related logs
|
||||||
|
@ -21,7 +21,7 @@
|
|||||||
need_build_image: false
|
need_build_image: false
|
||||||
build_image_tag: "change_{{ zuul.change | default('none') }}"
|
build_image_tag: "change_{{ zuul.change | default('none') }}"
|
||||||
openstack_core_enabled: "{{ openstack_core_enabled }}"
|
openstack_core_enabled: "{{ openstack_core_enabled }}"
|
||||||
openstack_core_tested: "{{ scenario in ['core', 'ceph', 'ceph-ansible', 'zun', 'cells', 'swift'] }}"
|
openstack_core_tested: "{{ scenario in ['core', 'ceph-ansible', 'zun', 'cells', 'swift'] }}"
|
||||||
dashboard_enabled: "{{ openstack_core_enabled }}"
|
dashboard_enabled: "{{ openstack_core_enabled }}"
|
||||||
# TODO(mgoddard): Remove when previous_release is ussuri.
|
# TODO(mgoddard): Remove when previous_release is ussuri.
|
||||||
playbook_python_version: "{{ '2' if is_upgrade and previous_release == 'train' else '3' }}"
|
playbook_python_version: "{{ '2' if is_upgrade and previous_release == 'train' else '3' }}"
|
||||||
@ -34,10 +34,10 @@
|
|||||||
|
|
||||||
- name: Prepare disks for a storage service
|
- name: Prepare disks for a storage service
|
||||||
script: "setup_disks.sh {{ disk_type }}"
|
script: "setup_disks.sh {{ disk_type }}"
|
||||||
when: scenario in ['ceph', 'ceph-ansible', 'zun', 'swift']
|
when: scenario in ['ceph-ansible', 'zun', 'swift']
|
||||||
become: true
|
become: true
|
||||||
vars:
|
vars:
|
||||||
disk_type: "{{ ceph_storetype if scenario in ['ceph', 'ceph-ansible'] else scenario }}"
|
disk_type: "{{ ceph_storetype if scenario in ['ceph-ansible'] else scenario }}"
|
||||||
ceph_storetype: "{{ hostvars[inventory_hostname].get('ceph_osd_storetype') }}"
|
ceph_storetype: "{{ hostvars[inventory_hostname].get('ceph_osd_storetype') }}"
|
||||||
|
|
||||||
- hosts: primary
|
- hosts: primary
|
||||||
@ -124,10 +124,6 @@
|
|||||||
- src: "tests/templates/nova-compute-overrides.j2"
|
- src: "tests/templates/nova-compute-overrides.j2"
|
||||||
dest: /etc/kolla/config/nova/nova-compute.conf
|
dest: /etc/kolla/config/nova/nova-compute.conf
|
||||||
when: "{{ openstack_core_enabled }}"
|
when: "{{ openstack_core_enabled }}"
|
||||||
# ceph.conf
|
|
||||||
- src: "tests/templates/ceph-overrides.j2"
|
|
||||||
dest: /etc/kolla/config/ceph.conf
|
|
||||||
when: "{{ scenario == 'ceph' }}"
|
|
||||||
# bifrost/dib.yml
|
# bifrost/dib.yml
|
||||||
- src: "tests/templates/bifrost-dib-overrides.j2"
|
- src: "tests/templates/bifrost-dib-overrides.j2"
|
||||||
dest: /etc/kolla/config/bifrost/dib.yml
|
dest: /etc/kolla/config/bifrost/dib.yml
|
||||||
@ -471,10 +467,6 @@
|
|||||||
# nova-compute.conf
|
# nova-compute.conf
|
||||||
- src: "tests/templates/nova-compute-overrides.j2"
|
- src: "tests/templates/nova-compute-overrides.j2"
|
||||||
dest: /etc/kolla/config/nova/nova-compute.conf
|
dest: /etc/kolla/config/nova/nova-compute.conf
|
||||||
# ceph.conf
|
|
||||||
- src: "tests/templates/ceph-overrides.j2"
|
|
||||||
dest: /etc/kolla/config/ceph.conf
|
|
||||||
when: "{{ scenario == 'ceph' }}"
|
|
||||||
when: item.when | default(true)
|
when: item.when | default(true)
|
||||||
|
|
||||||
# TODO(mgoddard): Remove this block when previous_release is ussuri.
|
# TODO(mgoddard): Remove this block when previous_release is ussuri.
|
||||||
|
@ -22,38 +22,6 @@ elif [ $1 = 'swift' ]; then
|
|||||||
parted $free_device -s -- mklabel gpt mkpart KOLLA_SWIFT_DATA 1 -1
|
parted $free_device -s -- mklabel gpt mkpart KOLLA_SWIFT_DATA 1 -1
|
||||||
free_partition=${free_device}p1
|
free_partition=${free_device}p1
|
||||||
mkfs.xfs -L d0 $free_partition
|
mkfs.xfs -L d0 $free_partition
|
||||||
elif [ $1 = 'filestore' ]; then
|
|
||||||
#setup devices for Kolla Ceph filestore OSD
|
|
||||||
dd if=/dev/zero of=/opt/data/kolla/ceph-osd1.img bs=5M count=1000
|
|
||||||
LOOP=$(losetup -f)
|
|
||||||
losetup $LOOP /opt/data/kolla/ceph-osd1.img
|
|
||||||
parted $LOOP -s -- mklabel gpt mkpart KOLLA_CEPH_OSD_BOOTSTRAP_OSD1 1 -1
|
|
||||||
|
|
||||||
dd if=/dev/zero of=/opt/data/kolla/ceph-journal1.img bs=5M count=512
|
|
||||||
LOOP=$(losetup -f)
|
|
||||||
losetup $LOOP /opt/data/kolla/ceph-journal1.img
|
|
||||||
parted $LOOP -s -- mklabel gpt mkpart KOLLA_CEPH_OSD_BOOTSTRAP_OSD1_J 1 -1
|
|
||||||
elif [ $1 = 'bluestore' ]; then
|
|
||||||
# Setup devices for Kolla Ceph bluestore OSD
|
|
||||||
dd if=/dev/zero of=/opt/data/kolla/ceph-osd0.img bs=5M count=100
|
|
||||||
LOOP=$(losetup -f)
|
|
||||||
losetup $LOOP /opt/data/kolla/ceph-osd0.img
|
|
||||||
parted $LOOP -s -- mklabel gpt mkpart KOLLA_CEPH_OSD_BOOTSTRAP_BS_OSD0 1 -1
|
|
||||||
|
|
||||||
dd if=/dev/zero of=/opt/data/kolla/ceph-osd0-b.img bs=5M count=1000
|
|
||||||
LOOP=$(losetup -f)
|
|
||||||
losetup $LOOP /opt/data/kolla/ceph-osd0-b.img
|
|
||||||
parted $LOOP -s -- mklabel gpt mkpart KOLLA_CEPH_OSD_BOOTSTRAP_BS_OSD0_B 1 -1
|
|
||||||
|
|
||||||
dd if=/dev/zero of=/opt/data/kolla/ceph-osd0-w.img bs=5M count=200
|
|
||||||
LOOP=$(losetup -f)
|
|
||||||
losetup $LOOP /opt/data/kolla/ceph-osd0-w.img
|
|
||||||
parted $LOOP -s -- mklabel gpt mkpart KOLLA_CEPH_OSD_BOOTSTRAP_BS_OSD0_W 1 -1
|
|
||||||
|
|
||||||
dd if=/dev/zero of=/opt/data/kolla/ceph-osd0-d.img bs=5M count=200
|
|
||||||
LOOP=$(losetup -f)
|
|
||||||
losetup $LOOP /opt/data/kolla/ceph-osd0-d.img
|
|
||||||
parted $LOOP -s -- mklabel gpt mkpart KOLLA_CEPH_OSD_BOOTSTRAP_BS_OSD0_D 1 -1
|
|
||||||
elif [ $1 = 'ceph-lvm' ]; then
|
elif [ $1 = 'ceph-lvm' ]; then
|
||||||
free_device=$(losetup -f)
|
free_device=$(losetup -f)
|
||||||
fallocate -l 10G /var/lib/ceph-osd1.img
|
fallocate -l 10G /var/lib/ceph-osd1.img
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[storage]
|
[storage]
|
||||||
{% for host in hostvars %}
|
{% for host in hostvars %}
|
||||||
{{ host }} ansible_host={{ hostvars[host]['ansible_host'] }} ansible_user=kolla ansible_ssh_private_key_file={{ ansible_env.HOME ~ '/.ssh/id_rsa_kolla' }} ceph_osd_store_type={{ 'filestore' if host == 'primary' else 'bluestore' }}
|
{{ host }} ansible_host={{ hostvars[host]['ansible_host'] }} ansible_user=kolla ansible_ssh_private_key_file={{ ansible_env.HOME ~ '/.ssh/id_rsa_kolla' }}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
|
||||||
# Ceph-Ansible hosts
|
# Ceph-Ansible hosts
|
||||||
|
@ -53,27 +53,6 @@ openstack_release: "{{ previous_release }}"
|
|||||||
{% endif %}
|
{% endif %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{% if scenario == "ceph" %}
|
|
||||||
enable_ceph: "yes"
|
|
||||||
enable_ceph_mds: "yes"
|
|
||||||
enable_ceph_rgw: "yes"
|
|
||||||
enable_ceph_rgw_keystone: "yes"
|
|
||||||
enable_ceph_nfs: "yes"
|
|
||||||
enable_cinder: "yes"
|
|
||||||
ceph_pool_pg_num: 8
|
|
||||||
ceph_pool_pgp_num: 8
|
|
||||||
{% if address_family == 'ipv6' %}
|
|
||||||
# NOTE(yoctozepto): ceph won't accept IPv6 address as hostname (due to ':', '.' were fine)
|
|
||||||
# hence use inventory name as the others do
|
|
||||||
# this is Train feature so would fail upgrades from Stein
|
|
||||||
ceph_mon_host_type: "INVENTORY"
|
|
||||||
ceph_osd_host_type: "INVENTORY"
|
|
||||||
{% endif %}
|
|
||||||
# This is experimental feature, disable if gate fail.
|
|
||||||
# In multinode jobs without ceph rolling upgrade fails.
|
|
||||||
glance_enable_rolling_upgrade: "yes"
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% if scenario == "zun" %}
|
{% if scenario == "zun" %}
|
||||||
enable_zun: "yes"
|
enable_zun: "yes"
|
||||||
enable_kuryr: "yes"
|
enable_kuryr: "yes"
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
|
|
||||||
[storage]
|
[storage]
|
||||||
{% for host in hostvars %}
|
{% for host in hostvars %}
|
||||||
{{ host }} ansible_host={{ hostvars[host]['ansible_host'] }} ansible_user=kolla ansible_ssh_private_key_file={{ ansible_env.HOME ~ '/.ssh/id_rsa_kolla' }} ceph_osd_store_type={{ 'filestore' if host == 'primary' else 'bluestore' }}
|
{{ host }} ansible_host={{ hostvars[host]['ansible_host'] }} ansible_user=kolla ansible_ssh_private_key_file={{ ansible_env.HOME ~ '/.ssh/id_rsa_kolla' }}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
|
||||||
[monitoring]
|
[monitoring]
|
||||||
@ -156,8 +156,11 @@ control
|
|||||||
[murano:children]
|
[murano:children]
|
||||||
control
|
control
|
||||||
|
|
||||||
|
{# TODO(mnasiadka): Remove in Victoria #}
|
||||||
|
{% if scenario == 'ceph-ansible' and is_previous_release and previous_release == 'train' %}
|
||||||
[ceph:children]
|
[ceph:children]
|
||||||
control
|
control
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
[ironic:children]
|
[ironic:children]
|
||||||
control
|
control
|
||||||
@ -321,6 +324,8 @@ neutron
|
|||||||
[ironic-neutron-agent:children]
|
[ironic-neutron-agent:children]
|
||||||
neutron
|
neutron
|
||||||
|
|
||||||
|
{# TODO(mnasiadka): Remove in Victoria #}
|
||||||
|
{% if scenario == 'ceph-ansible' and is_previous_release and previous_release == 'train' %}
|
||||||
# Ceph
|
# Ceph
|
||||||
[ceph-mds:children]
|
[ceph-mds:children]
|
||||||
ceph
|
ceph
|
||||||
@ -339,6 +344,7 @@ ceph
|
|||||||
|
|
||||||
[ceph-osd:children]
|
[ceph-osd:children]
|
||||||
storage
|
storage
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
# Cinder
|
# Cinder
|
||||||
[cinder-api:children]
|
[cinder-api:children]
|
||||||
|
@ -11,7 +11,7 @@ function test_smoke {
|
|||||||
openstack --debug compute service list
|
openstack --debug compute service list
|
||||||
openstack --debug network agent list
|
openstack --debug network agent list
|
||||||
openstack --debug orchestration service list
|
openstack --debug orchestration service list
|
||||||
if [[ $SCENARIO == "ceph" ]] || [[ $SCENARIO == "ceph-ansible" ]] || [[ $SCENARIO == "zun" ]]; then
|
if [[ $SCENARIO == "ceph-ansible" ]] | [[ $SCENARIO == "zun" ]]; then
|
||||||
openstack --debug volume service list
|
openstack --debug volume service list
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
@ -28,7 +28,7 @@ function test_instance_boot {
|
|||||||
fi
|
fi
|
||||||
echo "SUCCESS: Server creation"
|
echo "SUCCESS: Server creation"
|
||||||
|
|
||||||
if [[ $SCENARIO == "ceph" ]] || [ $SCENARIO == "ceph-ansible" ] || [[ $SCENARIO == "zun" ]]; then
|
if [[ $SCENARIO == "ceph-ansible" ]] || [[ $SCENARIO == "zun" ]]; then
|
||||||
echo "TESTING: Cinder volume attachment"
|
echo "TESTING: Cinder volume attachment"
|
||||||
openstack volume create --size 2 test_volume
|
openstack volume create --size 2 test_volume
|
||||||
attempt=1
|
attempt=1
|
||||||
|
@ -37,14 +37,6 @@ fi
|
|||||||
echo "Creating a fstab backup..."
|
echo "Creating a fstab backup..."
|
||||||
sudo cp /etc/fstab /etc/fstab_backup
|
sudo cp /etc/fstab /etc/fstab_backup
|
||||||
|
|
||||||
echo "Unmounting Ceph OSD disks"
|
|
||||||
for mount in $(mount | awk '/\/var\/lib\/ceph/ { print $3 }'); do
|
|
||||||
umount $mount
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "Removing ceph references from fstab..."
|
|
||||||
sudo sed -i '/\/var\/lib\/ceph\/osd\//d' /etc/fstab
|
|
||||||
|
|
||||||
if [[ "$enable_swift" == "yes" ]]; then
|
if [[ "$enable_swift" == "yes" ]]; then
|
||||||
echo "Removing swift references from fstab..."
|
echo "Removing swift references from fstab..."
|
||||||
cat /etc/fstab | grep "/srv/node/d*" | xargs umount
|
cat /etc/fstab | grep "/srv/node/d*" | xargs umount
|
||||||
|
@ -35,10 +35,6 @@ function setup_config {
|
|||||||
GATE_IMAGES="bifrost"
|
GATE_IMAGES="bifrost"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ $SCENARIO == "ceph" ]]; then
|
|
||||||
GATE_IMAGES+=",^ceph,^cinder"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ $SCENARIO == "ceph-ansible" ]]; then
|
if [[ $SCENARIO == "ceph-ansible" ]]; then
|
||||||
GATE_IMAGES+=",^cinder"
|
GATE_IMAGES+=",^cinder"
|
||||||
fi
|
fi
|
||||||
|
@ -150,8 +150,7 @@ def check_docker_become():
|
|||||||
for x in YAML_INCLUDE_PATTERNS])
|
for x in YAML_INCLUDE_PATTERNS])
|
||||||
excludes = r'|'.join([fnmatch.translate(x)
|
excludes = r'|'.join([fnmatch.translate(x)
|
||||||
for x in YAML_EXCLUDE_PATTERNS])
|
for x in YAML_EXCLUDE_PATTERNS])
|
||||||
docker_modules = ('kolla_docker', 'kolla_ceph_keyring',
|
docker_modules = ('kolla_docker', 'kolla_container_facts', 'kolla_toolbox')
|
||||||
'kolla_container_facts', 'kolla_toolbox')
|
|
||||||
cmd_modules = ('command', 'shell')
|
cmd_modules = ('command', 'shell')
|
||||||
return_code = 0
|
return_code = 0
|
||||||
roles_path = os.path.join(PROJECT_ROOT, 'ansible', 'roles')
|
roles_path = os.path.join(PROJECT_ROOT, 'ansible', 'roles')
|
||||||
|
@ -76,24 +76,6 @@
|
|||||||
base_distro: ubuntu
|
base_distro: ubuntu
|
||||||
install_type: binary
|
install_type: binary
|
||||||
|
|
||||||
- job:
|
|
||||||
name: kolla-ansible-ubuntu-source-ceph
|
|
||||||
parent: kolla-ansible-ceph-base
|
|
||||||
nodeset: kolla-ansible-bionic-multi
|
|
||||||
timeout: 9000
|
|
||||||
vars:
|
|
||||||
base_distro: ubuntu
|
|
||||||
install_type: source
|
|
||||||
|
|
||||||
- job:
|
|
||||||
name: kolla-ansible-centos-source-ceph
|
|
||||||
parent: kolla-ansible-ceph-base
|
|
||||||
nodeset: kolla-ansible-centos-multi
|
|
||||||
timeout: 9000
|
|
||||||
vars:
|
|
||||||
base_distro: centos
|
|
||||||
install_type: source
|
|
||||||
|
|
||||||
- job:
|
- job:
|
||||||
name: kolla-ansible-centos-source-ceph-ansible
|
name: kolla-ansible-centos-source-ceph-ansible
|
||||||
parent: kolla-ansible-ceph-ansible-base
|
parent: kolla-ansible-ceph-ansible-base
|
||||||
|
@ -16,8 +16,6 @@
|
|||||||
- kolla-ansible-debian-source
|
- kolla-ansible-debian-source
|
||||||
- kolla-ansible-ubuntu-source
|
- kolla-ansible-ubuntu-source
|
||||||
- kolla-ansible-ubuntu-source-multinode-ipv6
|
- kolla-ansible-ubuntu-source-multinode-ipv6
|
||||||
- kolla-ansible-ubuntu-source-ceph
|
|
||||||
- kolla-ansible-centos-source-ceph
|
|
||||||
- kolla-ansible-bifrost-centos-source
|
- kolla-ansible-bifrost-centos-source
|
||||||
# FIXME(mgoddard): Bifrost CentOS 8 support in progress.
|
# FIXME(mgoddard): Bifrost CentOS 8 support in progress.
|
||||||
# - kolla-ansible-centos8-source-bifrost
|
# - kolla-ansible-centos8-source-bifrost
|
||||||
@ -40,8 +38,6 @@
|
|||||||
- kolla-ansible-ubuntu-source-ironic
|
- kolla-ansible-ubuntu-source-ironic
|
||||||
- kolla-ansible-centos-source-upgrade
|
- kolla-ansible-centos-source-upgrade
|
||||||
- kolla-ansible-ubuntu-source-upgrade
|
- kolla-ansible-ubuntu-source-upgrade
|
||||||
- kolla-ansible-centos-source-upgrade-ceph
|
|
||||||
- kolla-ansible-ubuntu-source-upgrade-ceph
|
|
||||||
- kolla-ansible-centos-binary
|
- kolla-ansible-centos-binary
|
||||||
- kolla-ansible-centos8-binary
|
- kolla-ansible-centos8-binary
|
||||||
- kolla-ansible-ubuntu-binary
|
- kolla-ansible-ubuntu-binary
|
||||||
@ -80,7 +76,5 @@
|
|||||||
- kolla-ansible-ubuntu-source-ironic
|
- kolla-ansible-ubuntu-source-ironic
|
||||||
- kolla-ansible-centos-source-upgrade
|
- kolla-ansible-centos-source-upgrade
|
||||||
- kolla-ansible-ubuntu-source-upgrade
|
- kolla-ansible-ubuntu-source-upgrade
|
||||||
- kolla-ansible-centos-source-upgrade-ceph
|
|
||||||
- kolla-ansible-ubuntu-source-upgrade-ceph
|
|
||||||
- kolla-ansible-centos-source-mariadb
|
- kolla-ansible-centos-source-mariadb
|
||||||
- kolla-ansible-ubuntu-source-mariadb
|
- kolla-ansible-ubuntu-source-mariadb
|
||||||
|
Loading…
Reference in New Issue
Block a user