diff --git a/ansible-role-requirements.yml b/ansible-role-requirements.yml index f43116a3f1..d751dbff62 100644 --- a/ansible-role-requirements.yml +++ b/ansible-role-requirements.yml @@ -261,8 +261,8 @@ - name: ceph-ansible scm: git src: https://github.com/ceph/ceph-ansible - version: stable-3.2 - trackbranch: stable-3.2 + version: stable-4.0 + trackbranch: stable-4.0 - name: opendaylight scm: git src: https://github.com/opendaylight/integration-packaging-ansible-opendaylight diff --git a/inventory/group_vars/all/ceph.yml b/inventory/group_vars/all/ceph.yml index facf594ecb..0d36dd6fc7 100644 --- a/inventory/group_vars/all/ceph.yml +++ b/inventory/group_vars/all/ceph.yml @@ -25,7 +25,7 @@ ceph_repository: community # The _stable_release var is used by both the OSA ceph_client role and the # ceph-ansible roles. It is defaulted in ceph_client but set here to keep the # OSA/ceph-ansible integrations in sync. -ceph_stable_release: mimic +ceph_stable_release: nautilus fetch_directory: "{{ openstack_config_dir }}/ceph-fetch/" # tries to create /var/log/ceph as a directory and fails if the log link already # exists. we handle the log dir creation so this is not something we need diff --git a/inventory/group_vars/ceph-rgw.yml b/inventory/group_vars/ceph-rgw.yml index f7b7355d0b..2b2a0ec056 100644 --- a/inventory/group_vars/ceph-rgw.yml +++ b/inventory/group_vars/ceph-rgw.yml @@ -1,6 +1,6 @@ --- ceph_conf_overrides_rgw: - "client.rgw.{{ hostvars[inventory_hostname]['ansible_hostname'] }}": + "client.rgw.{{ hostvars[inventory_hostname]['ansible_hostname'] }}.rgw0": # OpenStack integration with Keystone rgw_keystone_url: "{{ keystone_service_adminuri }}" rgw_keystone_api_version: 3 @@ -12,4 +12,4 @@ ceph_conf_overrides_rgw: rgw_keystone_implicit_tenants: 'true' rgw_enable_apis: swift rgw_swift_account_in_url: 'true' - rgw_swift_versioning_enabled: 'true' \ No newline at end of file + rgw_swift_versioning_enabled: 'true' diff --git a/releasenotes/notes/ansible-2_8-ceph-nautilus-18b0a5e14a125e26.yaml b/releasenotes/notes/ansible-2_8-ceph-nautilus-18b0a5e14a125e26.yaml new file mode 100644 index 0000000000..a84024d204 --- /dev/null +++ b/releasenotes/notes/ansible-2_8-ceph-nautilus-18b0a5e14a125e26.yaml @@ -0,0 +1,18 @@ +--- +features: + - | + The ansible version used by OSA is updated from the 2.7 to the 2.8 series. + This requires an upgrade of ceph-ansible to 4.0 and this in turn + requires an upgrade of ceph from Mimic to Nautilus. This version dependancy + applies where OSA uses ceph-ansible directly to deploy the ceph infrastructure, + but not when OSA is integrated with an externally provisioned ceph cluster. +upgrade: + - | + Any ceph infrastructure components (OSDs, MONs etc) deployed using the + OSA/ceph-ansible tooling will be upgraded to the Ceph Nautilus release. + Deployers should verify that this upgrade is suitable for their environment + before commencing a major upgrade to Train, and consult the ceph-ansible + and ceph release notes for Nautilus. For integration with external ceph + clusters where OSA does not deploy any of the ceph cluster infrastructure, + overrides can be used to select the specific version of ceph repositories + used by the OSA ceph_client ansible role. diff --git a/scripts/bootstrap-ansible.sh b/scripts/bootstrap-ansible.sh index eae9188a9e..d2c38b5e40 100755 --- a/scripts/bootstrap-ansible.sh +++ b/scripts/bootstrap-ansible.sh @@ -22,7 +22,7 @@ set -e -u -x export HTTP_PROXY=${HTTP_PROXY:-""} export HTTPS_PROXY=${HTTPS_PROXY:-""} # The Ansible version used for testing -export ANSIBLE_PACKAGE=${ANSIBLE_PACKAGE:-"ansible==2.7.10"} +export ANSIBLE_PACKAGE=${ANSIBLE_PACKAGE:-"ansible==2.8.2"} export ANSIBLE_ROLE_FILE=${ANSIBLE_ROLE_FILE:-"ansible-role-requirements.yml"} export SSH_DIR=${SSH_DIR:-"/root/.ssh"} export DEBIAN_FRONTEND=${DEBIAN_FRONTEND:-"noninteractive"} diff --git a/tests/roles/bootstrap-host/tasks/prepare_ceph.yml b/tests/roles/bootstrap-host/tasks/prepare_ceph.yml index f74592c9c0..3c91712900 100644 --- a/tests/roles/bootstrap-host/tasks/prepare_ceph.yml +++ b/tests/roles/bootstrap-host/tasks/prepare_ceph.yml @@ -62,13 +62,34 @@ mode: "0777" with_items: "{{ ceph_create_loopback.results }}" +# NOTE(jrosser) ceph-volume is unwilling to automatically create OSD +# directly on loop devices - see http://tracker.ceph.com/issues/36603 +# Work around this with manual LVM creation and the advanced lvm OSD +# scenario +- name: Create LVM VG + lvg: + vg: "vg-{{ item.stdout | basename }}" + pvs: "{{ item.stdout }}" + loop: "{{ ceph_create_loopback.results }}" + +- name: Create LVM LV + lvol: + lv: "lv-{{ item.stdout | basename }}" + vg: "vg-{{ item.stdout | basename }}" + size: 100%FREE + loop: "{{ ceph_create_loopback.results }}" + # TODO(logan): Move these vars to user_variables.ceph.yml.j2 once LP #1649381 # is fixed and eliminate this task. - name: Write ceph cluster config copy: content: | --- - devices: {{ ceph_create_loopback.results | map(attribute='stdout') | list | to_yaml | trim }} + lvm_volumes: + {% for d in ceph_create_loopback | json_query('results[].stdout') %} + - data_vg: vg-{{ d | basename }} + data: lv-{{ d | basename }} + {% endfor %} cinder_backends: "RBD": volume_driver: cinder.volume.drivers.rbd.RBDDriver