add multinode functional test to swift

This new test job adds multinode testing to swift.
It currently configures 5 nodes: test-runner, proxy,
account, container and object. It starts by checkout out
the latest release and then 'upgrades' the storage nodes
to the current patch under test. That means that it will
the test-runner and the proxy are still running the latest
release.

Future work would include a new set of tests where it will
add objects to the cluster prior to upgrading and then try
to read those objects after the upgrade. It should also be
noted that the 'upgrade' currently only means a new
'git checkout' the configuration files are not being updated.
A future patch should fix that too.

Change-Id: If8e09a082fc024257a98cf332de9a36a18d2adc5
This commit is contained in:
Thiago da Silva 2018-09-11 16:28:55 -04:00
parent 956172623c
commit 5d70396247
11 changed files with 569 additions and 4 deletions

View File

@ -181,7 +181,7 @@
description: | description: |
Setup a SAIO dev environment and run ceph-s3tests Setup a SAIO dev environment and run ceph-s3tests
pre-run: pre-run:
- tools/playbooks/saio_single_node_setup/install_dependencies.yaml - tools/playbooks/common/install_dependencies.yaml
- tools/playbooks/saio_single_node_setup/setup_saio.yaml - tools/playbooks/saio_single_node_setup/setup_saio.yaml
- tools/playbooks/saio_single_node_setup/make_rings.yaml - tools/playbooks/saio_single_node_setup/make_rings.yaml
run: tools/playbooks/ceph-s3tests/run.yaml run: tools/playbooks/ceph-s3tests/run.yaml
@ -197,12 +197,75 @@
Setup a SAIO dev environment and run Swift's probe tests Setup a SAIO dev environment and run Swift's probe tests
timeout: 3600 timeout: 3600
pre-run: pre-run:
- tools/playbooks/saio_single_node_setup/install_dependencies.yaml - tools/playbooks/common/install_dependencies.yaml
- tools/playbooks/saio_single_node_setup/setup_saio.yaml - tools/playbooks/saio_single_node_setup/setup_saio.yaml
- tools/playbooks/saio_single_node_setup/make_rings.yaml - tools/playbooks/saio_single_node_setup/make_rings.yaml
run: tools/playbooks/probetests/run.yaml run: tools/playbooks/probetests/run.yaml
post-run: tools/playbooks/probetests/post.yaml post-run: tools/playbooks/probetests/post.yaml
- nodeset:
name: swift-five-nodes
nodes:
- name: test-runner1
label: centos-7
- name: proxy1
label: centos-7
- name: account1
label: centos-7
- name: container1
label: centos-7
- name: object1
label: centos-7
groups:
- name: test-runner
nodes:
- test-runner1
- name: swift-cluster
nodes:
- proxy1
- account1
- container1
- object1
- name: proxy
nodes:
- proxy1
- name: account
nodes:
- account1
- name: container
nodes:
- container1
- name: object
nodes:
- object1
- name: storage
nodes:
- account1
- container1
- object1
- job:
name: swift-multinode-rolling-upgrade
parent: multinode
nodeset: swift-five-nodes
description: |
Build a 4 node swift cluster and run functional tests
timeout: 3600
pre-run:
- tools/playbooks/multinode_setup/pre.yaml
- tools/playbooks/common/install_dependencies.yaml
- tools/playbooks/multinode_setup/configure_loopback.yaml
- tools/playbooks/multinode_setup/common_config.yaml
- tools/playbooks/multinode_setup/make_rings.yaml
run: tools/playbooks/multinode_setup/run.yaml
post-run: tools/playbooks/probetests/post.yaml
- job:
name: swift-multinode-rolling-upgrade-queens
parent: swift-multinode-rolling-upgrade
vars:
previous_swift_version: origin/stable/queens
- project: - project:
templates: templates:
- publish-openstack-docs-pti - publish-openstack-docs-pti
@ -280,6 +343,11 @@
- ^(api-ref|etc|examples|releasenotes)/.*$ - ^(api-ref|etc|examples|releasenotes)/.*$
# Keep doc/manpages -- we want to syntax check them # Keep doc/manpages -- we want to syntax check them
- ^doc/(requirements.txt|(saio|s3api|source)/.*)$ - ^doc/(requirements.txt|(saio|s3api|source)/.*)$
- swift-multinode-rolling-upgrade:
irrelevant-files:
- ^(api-ref|doc|releasenotes)/.*$
- ^test/probe/.*$
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
gate: gate:
jobs: jobs:
# For gate jobs, err towards running more jobs (so, generally avoid # For gate jobs, err towards running more jobs (so, generally avoid
@ -322,6 +390,7 @@
- swift-tox-func-centos-7 - swift-tox-func-centos-7
- swift-tox-func-encryption-centos-7 - swift-tox-func-encryption-centos-7
- swift-tox-func-ec-centos-7 - swift-tox-func-ec-centos-7
- swift-multinode-rolling-upgrade-queens
post: post:
jobs: jobs:

View File

@ -22,4 +22,8 @@
- python-pyeclib - python-pyeclib
- python-nose - python-nose
- python-swiftclient - python-swiftclient
become: true
- name: install python modules with pip
pip: name={{ item }} state=present extra_args='--upgrade'
with_items:
- crudini

View File

@ -0,0 +1,75 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- hosts: swift-cluster
become: true
tasks:
- name: create /var/run/swift
file:
path: '/var/run/swift'
owner: '{{ ansible_user_id }}'
group: '{{ ansible_user_gid }}'
state: directory
- name: create /var/cache/swift
file:
path: '/var/cache/swift'
owner: '{{ ansible_user_id }}'
group: '{{ ansible_user_gid }}'
state: directory
- name: create rc.local from template
template: src=rc.local.j2 dest=/etc/rc.d/rc.local owner=root group=root mode=0755
- name: set selinux to permissive
selinux: policy=targeted state=disabled
- name: configure rsyslog
command: cp {{ zuul.project.src_dir }}/doc/saio/rsyslog.d/10-swift.conf /etc/rsyslog.d/
- name: modify /etc/rsyslog.conf
lineinfile: dest=/etc/rsyslog.conf
line="$PrivDropToGroup adm"
create=yes
insertafter="^#### GLOBAL DIRECTIVES"
- name: assure /var/log/swift directory exists
file:
path: '/var/log/swift'
state: directory
owner: root
group: adm
mode: 'g+wt'
- name: restart rsyslog
service: name=rsyslog state=restarted enabled=yes
- name: clean up /etc/swift directory
file:
path: '/etc/swift'
state: absent
- name: create /etc/swift directory
file:
path: '/etc/swift'
state: directory
owner: '{{ ansible_user_id }}'
group: '{{ ansible_user_gid }}'
- hosts: test-runner
tasks:
- name: add new env. variable for running tests
lineinfile: dest=/home/{{ ansible_ssh_user }}/.bashrc line="export SWIFT_TEST_CONFIG_FILE=/home/{{ ansible_ssh_user}}/test.conf"
- name: copy the sample configuration files for running tests
template: src=test.conf.j2 dest=/home/{{ ansible_ssh_user }}/test.conf

View File

@ -0,0 +1,64 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- hosts: storage
become: true
tasks:
- name: assure /srv directory exists
file:
path: '/srv'
state: directory
- name: create loopback device
command: truncate -s 1GB /srv/swift-disk creates=/srv/swift-disk
- name: create filesystem /srv/swift-disk
become: true
filesystem: fstype=xfs dev=/srv/swift-disk
- name: create mount path /mnt/sdb1
file:
path: '/mnt/sdb1'
state: directory
- name: mount /mnt/sdb1
mount: name=/mnt/sdb1 src=/srv/swift-disk fstype=xfs opts="loop,noatime,nodiratime,nobarrier,logbufs=8" dump=0 passno=0 state=mounted
- name: create sub-partitions
file:
path: '/mnt/sdb1/{{ item }}'
state: directory
owner: '{{ ansible_user_id }}'
group: '{{ ansible_user_gid }}'
with_items:
- 1
- name: create symlinks
become: true
file:
src: '/mnt/sdb1/{{ item }}'
dest: '/srv/{{ item }}'
owner: '{{ ansible_user_id }}'
group: '{{ ansible_user_gid }}'
state: link
with_items:
- 1
- name: create node partition directories
file:
path: '/srv/1/node/sdb{{ item }}'
owner: '{{ ansible_user_id }}'
group: '{{ ansible_user_gid }}'
state: directory
with_items:
- [1, 2, 3, 4, 5, 6, 7, 8]

View File

@ -0,0 +1,120 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- hosts: all
tasks:
- name: get latest release
shell:
cmd: git describe --abbrev=0
executable: /bin/bash
chdir: '{{ zuul.project.src_dir }}'
register: latest_swift_release
- name: "checkout a previous version: {{ previous_swift_version | default(latest_swift_release.stdout) }}"
shell:
cmd: git checkout {{ previous_swift_verion | default(latest_swift_release.stdout) }} -b previous_swift_version
executable: /bin/bash
chdir: '{{ zuul.project.src_dir }}'
- name: install swift
become: true
shell:
cmd: python setup.py develop
executable: /bin/bash
chdir: '{{ zuul.project.src_dir }}'
- hosts: proxy
become: true
tasks:
- name: start memcache
service: name=memcached state=started enabled=yes
- name: copy proxy-server.conf file
command: cp -r {{ zuul.project.src_dir }}/doc/saio/swift/proxy-server.conf /etc/swift
- name: set the options in the proxy config file
shell:
cmd: |
crudini --set /etc/swift/proxy-server.conf DEFAULT bind_ip {{ hostvars['proxy1'].nodepool.public_ipv4 }}
crudini --set /etc/swift/proxy-server.conf DEFAULT user {{ ansible_user_id }}
executable: /bin/bash
- hosts: account
become: true
tasks:
- name: copy account-server.conf file
command: cp -r {{ zuul.project.src_dir }}/doc/saio/swift/account-server/1.conf /etc/swift/account-server.conf
- name: set the options in the account config file
shell:
cmd: |
crudini --set /etc/swift/account-server.conf DEFAULT bind_ip {{ hostvars['account1'].nodepool.public_ipv4 }}
crudini --set /etc/swift/account-server.conf DEFAULT user {{ ansible_user_id }}
executable: /bin/bash
- hosts: container
become: true
tasks:
- name: copy container-server.conf file
command: cp -r {{ zuul.project.src_dir }}/doc/saio/swift/container-server/1.conf /etc/swift/container-server.conf
- name: set the options in the container config file
shell:
cmd: |
crudini --set /etc/swift/container-server.conf DEFAULT bind_ip {{ hostvars['container1'].nodepool.public_ipv4 }}
crudini --set /etc/swift/container-server.conf DEFAULT user {{ ansible_user_id }}
executable: /bin/bash
- hosts: object
become: true
tasks:
- name: copy object-server.conf file
command: cp -r {{ zuul.project.src_dir }}/doc/saio/swift/object-server/1.conf /etc/swift/object-server.conf
- name: set the options in the object config file
shell:
cmd: |
crudini --set /etc/swift/object-server.conf DEFAULT bind_ip {{ hostvars['object1'].nodepool.public_ipv4 }}
crudini --set /etc/swift/object-server.conf DEFAULT user {{ ansible_user_id }}
executable: /bin/bash
- hosts: swift-cluster
become: true
tasks:
- name: copy swift.conf
command: cp -r {{ zuul.project.src_dir }}/doc/saio/swift/swift.conf /etc/swift
- name: set correct ownership of /etc/swift
file: path=/etc/swift owner={{ ansible_user_id }} group={{ ansible_user_gid }} recurse=yes
- hosts: test-runner
tasks:
- name: create remakerings from template
template: src=make_multinode_rings.j2 dest=/home/{{ ansible_ssh_user }}/make_multinode_rings mode=0755
- name: create rings dir
file: >
path=/home/{{ ansible_ssh_user }}/rings
state=directory
- name: make rings
shell:
cmd: /home/{{ ansible_ssh_user }}/make_multinode_rings
executable: /bin/bash
- name: scp rings to all swift-cluster nodes
command: scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ConnectTimeout=5 -o ConnectionAttempts=360 /home/{{ ansible_ssh_user }}/rings/{{ item[0] }} {{ ansible_ssh_user }}@{{ hostvars[item[1]].nodepool.public_ipv4 }}:/etc/swift
with_nested:
- ['account.ring.gz', 'container.ring.gz', 'object.ring.gz', 'object-1.ring.gz', 'object-2.ring.gz']
- "{{ groups['swift-cluster'] }}"

View File

@ -0,0 +1,8 @@
- hosts: all
roles:
# Run bindep and test-setup after devstack so that they won't interfere
- role: bindep
bindep_profile: test
bindep_dir: "{{ zuul_work_dir }}"
- test-setup
- ensure-tox

View File

@ -0,0 +1,42 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- hosts: storage
tasks:
- name: checkout master swift
shell:
cmd: git checkout master
executable: /bin/bash
chdir: '{{ zuul.project.src_dir }}'
- name: install swift
become: true
shell:
cmd: python setup.py develop
executable: /bin/bash
chdir: '{{ zuul.project.src_dir }}'
- hosts: swift-cluster
tasks:
- name: start services
command: swift-init main start
- hosts: test-runner
tasks:
- name: Run func tests with tempauth users
include_role:
name: tox
vars:
tox_envlist: func
tox_environment:
SWIFT_TEST_CONFIG_FILE: /home/{{ ansible_ssh_user }}/test.conf

View File

@ -0,0 +1,38 @@
#!/bin/bash
set -e
cd /home/{{ ansible_ssh_user }}/rings
rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz
swift-ring-builder object.builder create 10 3 1
swift-ring-builder object.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6010/sdb1 1
swift-ring-builder object.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6010/sdb2 1
swift-ring-builder object.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6010/sdb3 1
swift-ring-builder object.builder rebalance
swift-ring-builder object-1.builder create 10 2 1
swift-ring-builder object-1.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6010/sdb1 1
swift-ring-builder object-1.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6010/sdb2 1
swift-ring-builder object-1.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6010/sdb3 1
swift-ring-builder object-1.builder rebalance
swift-ring-builder object-2.builder create 10 6 1
swift-ring-builder object-2.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6010/sdb1 1
swift-ring-builder object-2.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6010/sdb2 1
swift-ring-builder object-2.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6010/sdb3 1
swift-ring-builder object-2.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6010/sdb4 1
swift-ring-builder object-2.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6010/sdb5 1
swift-ring-builder object-2.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6010/sdb6 1
swift-ring-builder object-2.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6010/sdb7 1
swift-ring-builder object-2.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6010/sdb8 1
swift-ring-builder object-2.builder rebalance
swift-ring-builder container.builder create 10 3 1
swift-ring-builder container.builder add r1z1-{{ hostvars['container1'].nodepool.public_ipv4 }}:6011/sdb1 1
swift-ring-builder container.builder add r1z1-{{ hostvars['container1'].nodepool.public_ipv4 }}:6011/sdb2 1
swift-ring-builder container.builder add r1z1-{{ hostvars['container1'].nodepool.public_ipv4 }}:6011/sdb3 1
swift-ring-builder container.builder rebalance
swift-ring-builder account.builder create 10 3 1
swift-ring-builder account.builder add r1z1-{{ hostvars['account1'].nodepool.public_ipv4 }}:6012/sdb1 1
swift-ring-builder account.builder add r1z1-{{ hostvars['account1'].nodepool.public_ipv4 }}:6012/sdb2 1
swift-ring-builder account.builder add r1z1-{{ hostvars['account1'].nodepool.public_ipv4 }}:6012/sdb3 1
swift-ring-builder account.builder rebalance

View File

@ -0,0 +1,8 @@
#!/bin/bash
mkdir -p /var/cache/swift
chown {{ ansible_user_id }}:{{ ansible_user_gid }} /var/cache/swift*
mkdir -p /var/run/swift
chown {{ ansible_user_id }}:{{ ansible_user_gid }} /var/run/swift
exit 0

View File

@ -0,0 +1,122 @@
[func_test]
# Sample config for Swift with tempauth
auth_host = {{ hostvars['proxy1'].nodepool.public_ipv4 }}
auth_port = 8080
auth_ssl = no
auth_prefix = /auth/
# Sample config for Swift with Keystone v2 API.
# For keystone v2 change auth_version to 2 and auth_prefix to /v2.0/.
# And "allow_account_management" should not be set "true".
#auth_version = 3
#auth_host = localhost
#auth_port = 5000
#auth_ssl = no
#auth_prefix = /v3/
# Primary functional test account (needs admin access to the account)
account = test
username = tester
password = testing
s3_access_key = test:tester
s3_secret_key = testing
# User on a second account (needs admin access to the account)
account2 = test2
username2 = tester2
password2 = testing2
# User on same account as first, but without admin access
username3 = tester3
password3 = testing3
# s3api requires the same account with the primary one and different users
s3_access_key2 = test:tester3
s3_secret_key2 = testing3
# Fourth user is required for keystone v3 specific tests.
# Account must be in a non-default domain.
#account4 = test4
#username4 = tester4
#password4 = testing4
#domain4 = test-domain
# Fifth user is required for service token-specific tests.
# The account must be different from the primary test account.
# The user must not have a group (tempauth) or role (keystoneauth) on
# the primary test account. The user must have a group/role that is unique
# and not given to the primary tester and is specified in the options
# <prefix>_require_group (tempauth) or <prefix>_service_roles (keystoneauth).
#account5 = test5
#username5 = tester5
#password5 = testing5
# The service_prefix option is used for service token-specific tests.
# If service_prefix or username5 above is not supplied, the tests are skipped.
# To set the value and enable the service token tests, look at the
# reseller_prefix option in /etc/swift/proxy-server.conf. There must be at
# least two prefixes. If not, add a prefix as follows (where we add SERVICE):
# reseller_prefix = AUTH, SERVICE
# The service_prefix must match the <prefix> used in <prefix>_require_group
# (tempauth) or <prefix>_service_roles (keystoneauth); for example:
# SERVICE_require_group = service
# SERVICE_service_roles = service
# Note: Do not enable service token tests if the first prefix in
# reseller_prefix is the empty prefix AND the primary functional test
# account contains an underscore.
#service_prefix = SERVICE
# Sixth user is required for access control tests.
# Account must have a role for reseller_admin_role(keystoneauth).
#account6 = test
#username6 = tester6
#password6 = testing6
collate = C
# Only necessary if a pre-existing server uses self-signed certificate
insecure = no
# Tests that are dependent on domain_remap middleware being installed also
# require one of the domain_remap storage_domain values to be specified here,
# otherwise those tests will be skipped.
storage_domain =
[unit_test]
fake_syslog = False
[probe_test]
# check_server_timeout = 30
# validate_rsync = false
[swift-constraints]
# The functional test runner will try to use the constraint values provided in
# the swift-constraints section of test.conf.
#
# If a constraint value does not exist in that section, or because the
# swift-constraints section does not exist, the constraints values found in
# the /info API call (if successful) will be used.
#
# If a constraint value cannot be found in the /info results, either because
# the /info API call failed, or a value is not present, the constraint value
# used will fall back to those loaded by the constraints module at time of
# import (which will attempt to load /etc/swift/swift.conf, see the
# swift.common.constraints module for more information).
#
# Note that the cluster must have "sane" values for the test suite to pass
# (for some definition of sane).
#
#max_file_size = 5368709122
#max_meta_name_length = 128
#max_meta_value_length = 256
#max_meta_count = 90
#max_meta_overall_size = 4096
#max_header_size = 8192
#extra_header_count = 0
#max_object_name_length = 1024
#container_listing_limit = 10000
#account_listing_limit = 10000
#max_account_name_length = 256
#max_container_name_length = 256
# Newer swift versions default to strict cors mode, but older ones were the
# opposite.
#strict_cors_mode = true

View File

@ -9,8 +9,23 @@
recurse: yes recurse: yes
- name: Copy swift logs from worker nodes to executor node - name: Copy swift logs from worker nodes to executor node
synchronize: synchronize:
src: '/var/log/swift' src: '/var/log/swift/'
dest: '{{ zuul.executor.log_root }}' dest: '{{ zuul.executor.log_root }}'
mode: pull mode: pull
copy_links: true copy_links: true
verify_host: true verify_host: true
failed_when: false
- name: Ensure swift configs are readable before syncing
file:
path: '/etc/swift'
mode: u=rwX,g=rX,o=rX
state: directory
recurse: yes
- name: Copy swift config from worker nodes to executor node
synchronize:
src: '/etc/swift/'
dest: '{{ zuul.executor.log_root }}'
mode: pull
copy_links: true
verify_host: true
failed_when: false