Major Hayden 05ae112e20 Update cached LXC image in place
The LXC container creation playbook is one of the longest-running playbooks
in the repository.  It generally takes 15-17 minutes to run during the gate
jobs.  Much of this time is spent updating each container with the latest
packages.

This patch causes the LXC cached image to be updated one time before that
image is used for all of the containers.  It reduces the amount of times the
updates actually run and this shortens the time it takes to complete the
playbook.

The updates to the cached image will only occur if a new cache image has
just been downloaded.

Partial-bug: 1489169

Change-Id: Iba64f9a3aeb999907088f2a99e8904700074550b
2015-09-21 17:10:52 +00:00

235 lines
9.4 KiB
YAML

---
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## Verbosity Options
debug: False
verbose: True
## SSH connection wait time
ssh_delay: 5
## Repo server
repo_service_user_name: nginx
repo_service_home_folder: /var/www
repo_server_port: 8181
repo_pip_default_index: "https://rpc-repo.rackspace.com/pools"
## Rsyslog server
rsyslog_server_storage_directory: /var/log/log-storage
## OpenStack source options
# URL for the frozen internal openstack repo.
openstack_repo_url: "http://{{ internal_lb_vip_address }}:{{ repo_server_port }}"
## LXC options
lxc_container_caches:
- url: "{{ repo_pip_default_index | netorigin }}/container_images/rpc-trusty-container.tgz"
name: "trusty.tgz"
sha256sum: "56c6a6e132ea7d10be2f3e8104f47136ccf408b30e362133f0dc4a0a9adb4d0c"
chroot_path: trusty/rootfs-amd64
## RabbitMQ
rabbitmq_cluster_name: openstack
rabbitmq_port: 5672
rabbitmq_servers: "{% for host in groups['rabbitmq_all'] %}{{ hostvars[host]['ansible_ssh_host'] }}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}"
## Galera
galera_wsrep_cluster_address: "{% for host in groups['galera_all'] %}{{ hostvars[host]['ansible_ssh_host'] }}{% if not loop.last %},{% endif %}{% endfor %}"
galera_wsrep_address: "{{ ansible_ssh_host }}"
## Pip install
# Lock down pip to only a specific version of pip
pip_get_pip_options: >
--no-index
--find-links="{{ openstack_repo_url }}/os-releases/{{ openstack_release }}"
--trusted-host {{ openstack_repo_url | netloc_no_port }}
## Memcached options
memcached_listen: "{{ ansible_ssh_host }}"
memcached_port: 11211
memcached_servers: "{% for host in groups['memcached'] %}{{ hostvars[host]['ansible_ssh_host'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}"
## Region Name
service_region: RegionOne
## DHCP Domain Name
dhcp_domain: openstacklocal
## OpenStack global Endpoint Protos
#openstack_service_publicuri_proto: http
#openstack_service_adminuri_proto: http
#openstack_service_internaluri_proto: http
## Ceilometer
ceilometer_service_port: 8777
ceilometer_service_proto: http
ceilometer_service_user_name: ceilometer
ceilometer_service_tenant_name: service
ceilometer_service_adminuri: "{{ ceilometer_service_proto }}://{{ internal_lb_vip_address }}:{{ ceilometer_service_port }}"
ceilometer_service_adminurl: "{{ ceilometer_service_adminuri }}/"
ceilometer_service_region: "{{ service_region }}"
ceilometer_rabbitmq_userid: ceilometer
ceilometer_rabbitmq_vhost: /ceilometer
## Nova
nova_service_port: 8774
nova_service_proto: http
nova_service_adminuri_proto: "{{ openstack_service_adminuri_proto | default(nova_service_proto) }}"
nova_service_user_name: nova
nova_service_project_name: service
nova_service_project_domain_id: default
nova_service_user_domain_id: default
nova_service_adminuri: "{{ nova_service_adminuri_proto }}://{{ internal_lb_vip_address }}:{{ nova_service_port }}"
nova_service_adminurl: "{{ nova_service_adminuri }}/v2/%(tenant_id)s"
nova_service_region: "{{ service_region }}"
nova_metadata_port: 8775
nova_keystone_auth_plugin: password
nova_ceph_client: '{{ cinder_ceph_client }}'
nova_ceph_client_uuid: '{{ cinder_ceph_client_uuid | default() }}'
nova_dhcp_domain: "{{ dhcp_domain }}"
## Neutron
neutron_service_port: 9696
neutron_service_proto: http
neutron_service_adminuri_proto: "{{ openstack_service_adminuri_proto | default(neutron_service_proto) }}"
neutron_service_user_name: neutron
neutron_service_project_name: service
neutron_service_project_domain_id: default
neutron_service_user_domain_id: default
neutron_service_adminuri: "{{ neutron_service_adminuri_proto }}://{{ internal_lb_vip_address }}:{{ neutron_service_port }}"
neutron_service_adminurl: "{{ neutron_service_adminuri }}"
neutron_service_region: "{{ service_region }}"
neutron_service_program_enabled: true
neutron_service_dhcp_program_enabled: true
neutron_service_l3_program_enabled: true
neutron_service_linuxbridge_program_enabled: true
neutron_service_metadata_program_enabled: true
neutron_service_metering_program_enabled: true
neutron_dhcp_domain: "{{ dhcp_domain }}"
## Glance
glance_service_port: 9292
glance_service_proto: http
glance_service_adminuri_proto: "{{ openstack_service_adminuri_proto | default(glance_service_proto) }}"
glance_service_user_name: glance
glance_service_project_name: service
glance_service_project_domain_id: default
glance_service_user_domain_id: default
glance_service_adminurl: "{{ glance_service_adminuri_proto }}://{{ internal_lb_vip_address }}:{{ glance_service_port }}"
glance_service_region: "{{ service_region }}"
# Only specify this if you want to list the servers - by default LB host/port will be used
#glance_api_servers: "{% for host in groups['glance_all'] %}{{ hostvars[host]['container_address'] }}:{{ glance_service_port }}{% if not loop.last %},{% endif %}{% endfor %}"
## Keystone
keystone_admin_user_name: admin
keystone_admin_tenant_name: admin
keystone_admin_port: 35357
keystone_service_port: 5000
keystone_service_proto: http
keystone_service_adminuri_proto: "{{ openstack_service_adminuri_proto | default(keystone_service_proto) }}"
keystone_service_internaluri_proto: "{{ openstack_service_internaluri_proto | default(keystone_service_proto) }}"
keystone_service_publicuri_proto: "{{ openstack_service_publicuri_proto | default(keystone_service_proto) }}"
keystone_service_user_name: keystone
keystone_service_tenant_name: service
keystone_service_publicuri: "{{ keystone_service_publicuri_proto }}://{{ external_lb_vip_address }}:{{ keystone_service_port }}"
keystone_service_publicurl: "{{ keystone_service_publicuri }}/v2.0"
keystone_service_internaluri: "{{ keystone_service_internaluri_proto }}://{{ internal_lb_vip_address }}:{{ keystone_service_port }}"
keystone_service_internalurl: "{{ keystone_service_internaluri }}/v2.0"
keystone_service_adminuri: "{{ keystone_service_adminuri_proto }}://{{ internal_lb_vip_address }}:{{ keystone_admin_port }}"
keystone_service_adminurl: "{{ keystone_service_adminuri }}/v2.0"
keystone_service_publicuri_v3: "{{ keystone_service_publicuri_proto }}://{{ external_lb_vip_address }}:{{ keystone_service_port }}"
keystone_service_publicurl_v3: "{{ keystone_service_publicuri_v3 }}/v3"
keystone_service_internaluri_v3: "{{ keystone_service_internaluri_proto }}://{{ internal_lb_vip_address }}:{{ keystone_service_port }}"
keystone_service_internalurl_v3: "{{ keystone_service_internaluri_v3 }}/v3"
keystone_service_adminuri_v3: "{{ keystone_service_adminuri_proto }}://{{ internal_lb_vip_address }}:{{ keystone_admin_port }}"
keystone_service_adminurl_v3: "{{ keystone_service_adminuri_v3 }}/v3"
keystone_service_adminurl: "{{ keystone_service_adminurl_v3 }}"
keystone_cache_backend_argument: "url:{% for host in groups['memcached'] %}{{ hostvars[host]['container_address'] }}{% if not loop.last %},{% endif %}{% endfor %}:{{ memcached_port }}"
keystone_memcached_servers: "{% for host in groups['keystone_all'] %}{{ hostvars[host]['container_address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}"
keystone_service_region: "{{ service_region }}"
keystone_service_adminuri_insecure: false
keystone_service_internaluri_insecure: false
## Horizon
horizon_service_region: "{{ service_region }}"
## Heat
heat_service_region: "{{ service_region }}"
## Cinder
# cinder_backend_rbd_inuse: True if current host has an rbd backend
cinder_backend_rbd_inuse: '{{ (cinder_backends|default("")|to_json).find("cinder.volume.drivers.rbd.RBDDriver") != -1 }}'
# cinder_backends_rbd_inuse: true if at least 1 cinder_backend on any
# cinder_volume host uses Ceph RBD
# http://stackoverflow.com/questions/9486393/jinja2-change-the-value-of-a-variable-inside-a-loop
cinder_backends_rbd_inuse: >
{% set _var = {'rbd_inuse': False} %}{%
for host in groups.cinder_volume %}{%
if hostvars[host].cinder_backend_rbd_inuse | bool %}{%
if _var.update({'rbd_inuse': True }) %}{%
endif %}{%
endif %}{%
endfor %}{{
_var.rbd_inuse }}
cinder_ceph_client: cinder
# cinder_backend_lvm_inuse: True if current host has an lvm backend
cinder_backend_lvm_inuse: '{{ (cinder_backends|default("")|to_json).find("cinder.volume.drivers.lvm.LVMVolumeDriver") != -1 }}'
## OpenStack Openrc
openrc_os_auth_url: "{{ keystone_service_internalurl_v3 }}"
openrc_os_password: "{{ keystone_auth_admin_password }}"
openrc_os_domain_name: "Default"
## Tempest Options
tempest_pip_instructions: >
--isolated
--pre
--allow-all-external
--index-url {{ repo_pip_default_index }}
--extra-index-url https://pypi.python.org/simple
--trusted-host pypi.python.org
--trusted-host {{ openstack_repo_url | netloc_no_port }}
## Swift
swift_system_user_name: swift
swift_system_group_name: swift
swift_system_shell: /bin/bash
swift_system_comment: swift system user
swift_system_home_folder: "/var/lib/{{ swift_system_user_name }}"
swift_service_region: "{{ service_region }}"
## HAProxy
haproxy_bind_on_non_local: "{% if groups.haproxy_hosts[1] is defined and internal_lb_vip_address != external_lb_vip_address %}True{% else %}False{% endif %}"