Use FQDN when registering agents with Nova
The change of behaviour will only have affect on newly installed deployments on OpenStack Train and onwards. Also set upper constraint for ``python-cinderclient`` in the functional test requirements as it relies on the v1 client which has been removed. We will not fix this in Amulet, charm pending migration to the Zaza framework. Change-Id: Ia73ed6b76fc7f18014d4fa913397cc069e51ff07 Depends-On: Iee73164358745628a4b8658614608bc872771fd1 Closes-Bug: #1839300
This commit is contained in:
parent
82c5027814
commit
c6455cc955
@ -13,10 +13,11 @@
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
import uuid
|
||||
import os
|
||||
import platform
|
||||
import shutil
|
||||
import socket
|
||||
import uuid
|
||||
|
||||
from charmhelpers.core.unitdata import kv
|
||||
from charmhelpers.contrib.openstack import context
|
||||
@ -777,6 +778,23 @@ class HostIPContext(context.OSContextGenerator):
|
||||
# NOTE: do not format this even for ipv6 (see bug 1499656)
|
||||
ctxt['host_ip'] = host_ip
|
||||
|
||||
# the contents of the Nova ``host`` configuration option is
|
||||
# referenced throughout a OpenStack deployment, an example being
|
||||
# Neutron port bindings. It's value should not change after a
|
||||
# individual units initial deployment.
|
||||
#
|
||||
# We do want to migrate to using FQDNs so we enable this for new
|
||||
# installations.
|
||||
db = kv()
|
||||
if db.get('install_version', 0) >= 1910:
|
||||
fqdn = socket.getfqdn(host_ip)
|
||||
if '.' in fqdn:
|
||||
# only populate the value if getfqdn() is able to find an
|
||||
# actual FQDN for this host. If not, we revert back to
|
||||
# not setting the configuration option and use Nova's
|
||||
# default behaviour.
|
||||
ctxt['host'] = fqdn
|
||||
|
||||
return ctxt
|
||||
|
||||
|
||||
|
@ -156,6 +156,10 @@ def install():
|
||||
apt_update()
|
||||
apt_install(determine_packages(), fatal=True)
|
||||
|
||||
db = kv()
|
||||
db.set('install_version', 1910)
|
||||
db.flush()
|
||||
|
||||
|
||||
@hooks.hook('config-changed')
|
||||
@restart_on_change(restart_map())
|
||||
|
310
templates/stein/nova.conf
Normal file
310
templates/stein/nova.conf
Normal file
@ -0,0 +1,310 @@
|
||||
# stein
|
||||
###############################################################################
|
||||
# [ WARNING ]
|
||||
# Configuration file maintained by Juju. Local changes may be overwritten.
|
||||
{% if restart_trigger -%}
|
||||
# restart trigger: {{ restart_trigger }}
|
||||
{% endif -%}
|
||||
###############################################################################
|
||||
[DEFAULT]
|
||||
verbose={{ verbose }}
|
||||
debug={{ debug }}
|
||||
dhcpbridge_flagfile=/etc/nova/nova.conf
|
||||
dhcpbridge=/usr/bin/nova-dhcpbridge
|
||||
logdir=/var/log/nova
|
||||
state_path=/var/lib/nova
|
||||
force_dhcp_release=True
|
||||
use_syslog = {{ use_syslog }}
|
||||
ec2_private_dns_show_ip=True
|
||||
api_paste_config=/etc/nova/api-paste.ini
|
||||
enabled_apis=osapi_compute,metadata
|
||||
auth_strategy=keystone
|
||||
my_ip = {{ host_ip }}
|
||||
force_raw_images = {{ force_raw_images }}
|
||||
|
||||
{% if host -%}
|
||||
host = {{ host }}
|
||||
{% endif -%}
|
||||
|
||||
{% if debug -%}
|
||||
default_log_levels = "amqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN, sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, oslo_messaging=DEBUG, iso8601=WARN, requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN, websocket=WARN, requests.packages.urllib3.util.retry=WARN, urllib3.util.retry=WARN, keystonemiddleware=WARN, routes.middleware=WARN, stevedore=WARN, taskflow=WARN, keystoneauth=WARN, oslo.cache=INFO, dogpile.core.dogpile=INFO, glanceclient=WARN, oslo.privsep.daemon=INFO"
|
||||
{% endif -%}
|
||||
|
||||
{% if transport_url %}
|
||||
transport_url = {{ transport_url }}
|
||||
{% endif %}
|
||||
|
||||
{% if arch == 'aarch64' -%}
|
||||
libvirt_use_virtio_for_bridges=False
|
||||
libvirt_disk_prefix=vd
|
||||
{% endif -%}
|
||||
|
||||
{% if console_vnc_type -%}
|
||||
vnc_enabled = True
|
||||
novnc_enabled = True
|
||||
vnc_keymap = {{ console_keymap }}
|
||||
vncserver_listen = {{ console_listen_addr }}
|
||||
vncserver_proxyclient_address = {{ console_listen_addr }}
|
||||
{% if console_access_protocol == 'novnc' or console_access_protocol == 'vnc' -%}
|
||||
novncproxy_base_url = {{ novnc_proxy_address }}
|
||||
{% endif -%}
|
||||
{% if console_access_protocol == 'xvpvnc' or console_access_protocol == 'vnc' -%}
|
||||
xvpvncproxy_port = {{ xvpvnc_proxy_port }}
|
||||
xvpvncproxy_host = {{ xvpvnc_proxy_host }}
|
||||
xvpvncproxy_base_url = {{ xvpvnc_proxy_address }}
|
||||
{% endif -%}
|
||||
{% else -%}
|
||||
vnc_enabled = False
|
||||
novnc_enabled = False
|
||||
{% endif -%}
|
||||
|
||||
{% if neutron_plugin and neutron_plugin in ('ovs', 'midonet') -%}
|
||||
libvirt_vif_driver = nova.virt.libvirt.vif.LibvirtGenericVIFDriver
|
||||
{% if neutron_security_groups -%}
|
||||
security_group_api = neutron
|
||||
firewall_driver = nova.virt.firewall.NoopFirewallDriver
|
||||
{% endif -%}
|
||||
{% endif -%}
|
||||
|
||||
{% if neutron_plugin and neutron_plugin == 'vsp' -%}
|
||||
network_api_class=nova.network.neutronv2.api.API
|
||||
libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtGenericVIFDriver
|
||||
neutron_ovs_bridge=alubr0
|
||||
security_group_api=neutron
|
||||
firewall_driver = nova.virt.firewall.NoopFirewallDriver
|
||||
{% endif -%}
|
||||
|
||||
{% if neutron_plugin and (neutron_plugin == 'nvp' or neutron_plugin == 'nsx') -%}
|
||||
libvirt_vif_driver = nova.virt.libvirt.vif.LibvirtOpenVswitchVirtualPortDriver
|
||||
security_group_api = neutron
|
||||
firewall_driver = nova.virt.firewall.NoopFirewallDriver
|
||||
{% endif -%}
|
||||
|
||||
{% if neutron_plugin and neutron_plugin == 'Calico' -%}
|
||||
security_group_api = neutron
|
||||
firewall_driver = nova.virt.firewall.NoopFirewallDriver
|
||||
{% endif -%}
|
||||
|
||||
{% if neutron_plugin and neutron_plugin == 'plumgrid' -%}
|
||||
security_group_api=neutron
|
||||
firewall_driver = nova.virt.firewall.NoopFirewallDriver
|
||||
{% endif -%}
|
||||
|
||||
{% if network_manager != 'neutron' and network_manager_config -%}
|
||||
{% for key, value in network_manager_config.items() -%}
|
||||
{{ key }} = {{ value }}
|
||||
{% endfor -%}
|
||||
{% endif -%}
|
||||
|
||||
{% if network_manager == 'neutron' -%}
|
||||
network_api_class = nova.network.neutronv2.api.API
|
||||
use_neutron = True
|
||||
{% else -%}
|
||||
network_manager = nova.network.manager.FlatDHCPManager
|
||||
{% endif -%}
|
||||
|
||||
{% if network_device_mtu -%}
|
||||
network_device_mtu = {{ network_device_mtu }}
|
||||
{% endif -%}
|
||||
|
||||
{% if volume_service -%}
|
||||
volume_api_class = nova.volume.cinder.API
|
||||
{% endif -%}
|
||||
|
||||
{% if user_config_flags -%}
|
||||
{% for key, value in user_config_flags.items() -%}
|
||||
{{ key }} = {{ value }}
|
||||
{% endfor -%}
|
||||
{% endif -%}
|
||||
|
||||
{% if instances_path -%}
|
||||
instances_path = {{ instances_path }}
|
||||
{% endif -%}
|
||||
|
||||
{% if sections and 'DEFAULT' in sections -%}
|
||||
{% for key, value in sections['DEFAULT'] -%}
|
||||
{{ key }} = {{ value }}
|
||||
{% endfor -%}
|
||||
{% endif -%}
|
||||
|
||||
{% if vcpu_pin_set -%}
|
||||
vcpu_pin_set = {{ vcpu_pin_set }}
|
||||
{% endif -%}
|
||||
reserved_host_memory_mb = {{ reserved_host_memory }}
|
||||
|
||||
{% if reserved_huge_pages -%}
|
||||
{% for value in reserved_huge_pages -%}
|
||||
reserved_huge_pages = {{ value }}
|
||||
{% endfor -%}
|
||||
{% endif -%}
|
||||
|
||||
{% include "section-zeromq" %}
|
||||
|
||||
{% if default_availability_zone -%}
|
||||
default_availability_zone = {{ default_availability_zone }}
|
||||
{% endif -%}
|
||||
|
||||
{% if resume_guests_state_on_host_boot -%}
|
||||
resume_guests_state_on_host_boot = {{ resume_guests_state_on_host_boot }}
|
||||
{% endif -%}
|
||||
|
||||
metadata_workers = {{ workers }}
|
||||
|
||||
[pci]
|
||||
{% if pci_passthrough_whitelist -%}
|
||||
passthrough_whitelist = {{ pci_passthrough_whitelist }}
|
||||
{% endif -%}
|
||||
{% if pci_alias %}
|
||||
alias = {{ pci_alias }}
|
||||
{% endif %}
|
||||
|
||||
{% if network_manager == 'neutron' and network_manager_config -%}
|
||||
[neutron]
|
||||
url = {{ network_manager_config.neutron_url }}
|
||||
{% if network_manager_config.keystone_host or auth_host -%}
|
||||
{% if neutron_plugin and neutron_plugin == 'vsp' -%}
|
||||
ovs_bridge = alubr0
|
||||
{% endif -%}
|
||||
{% if auth_host -%}
|
||||
auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}
|
||||
auth_type = password
|
||||
{% if admin_domain_name -%}
|
||||
project_domain_name = {{ admin_domain_name }}
|
||||
user_domain_name = {{ admin_domain_name }}
|
||||
{% else -%}
|
||||
project_domain_name = default
|
||||
user_domain_name = default
|
||||
{% endif -%}
|
||||
project_name = {{ admin_tenant_name }}
|
||||
username = {{ admin_user }}
|
||||
password = {{ admin_password }}
|
||||
signing_dir = {{ signing_dir }}
|
||||
{% endif -%}
|
||||
{% if metadata_shared_secret -%}
|
||||
metadata_proxy_shared_secret = {{ metadata_shared_secret }}
|
||||
service_metadata_proxy=True
|
||||
{% endif -%}
|
||||
{% endif -%}
|
||||
{% endif -%}
|
||||
|
||||
{% include "section-keystone-authtoken-mitaka" %}
|
||||
|
||||
{% if glance_api_servers -%}
|
||||
[glance]
|
||||
api_servers = {{ glance_api_servers }}
|
||||
{% endif -%}
|
||||
|
||||
{% if vendor_data or vendor_data_url -%}
|
||||
[api]
|
||||
vendordata_providers = {{ vendordata_providers }}
|
||||
{% if vendor_data -%}
|
||||
vendordata_jsonfile_path = /etc/nova/vendor_data.json
|
||||
{% endif -%}
|
||||
{% if vendor_data_url -%}
|
||||
vendordata_dynamic_targets = {{ vendor_data_url }}
|
||||
{% endif -%}
|
||||
{% endif -%}
|
||||
|
||||
{% if console_access_protocol == 'spice' -%}
|
||||
[spice]
|
||||
agent_enabled = True
|
||||
enabled = True
|
||||
html5proxy_base_url = {{ spice_proxy_address }}
|
||||
keymap = {{ console_keymap }}
|
||||
server_listen = {{ console_listen_addr }}
|
||||
server_proxyclient_address = {{ console_listen_addr }}
|
||||
{% endif -%}
|
||||
|
||||
[libvirt]
|
||||
{% if cpu_mode -%}
|
||||
cpu_mode = {{ cpu_mode }}
|
||||
{% endif -%}
|
||||
{% if cpu_model -%}
|
||||
cpu_model = {{ cpu_model }}
|
||||
{% endif -%}
|
||||
{% if cpu_model_extra_flags %}
|
||||
cpu_model_extra_flags = {{ cpu_model_extra_flags }}
|
||||
{% endif %}
|
||||
{% if libvirt_images_type -%}
|
||||
images_type = {{ libvirt_images_type }}
|
||||
{% endif -%}
|
||||
{% if libvirt_images_type and rbd_pool -%}
|
||||
images_rbd_pool = {{ rbd_pool }}
|
||||
images_rbd_ceph_conf = {{ libvirt_rbd_images_ceph_conf }}
|
||||
inject_password = false
|
||||
inject_key = false
|
||||
inject_partition = -2
|
||||
{% endif -%}
|
||||
rbd_user = {{ rbd_user }}
|
||||
rbd_secret_uuid = {{ rbd_secret_uuid }}
|
||||
{% if live_migration_uri -%}
|
||||
live_migration_uri = {{ live_migration_uri }}
|
||||
{% endif -%}
|
||||
{% if live_migration_permit_post_copy -%}
|
||||
live_migration_permit_post_copy = {{ live_migration_permit_post_copy }}
|
||||
{% endif -%}
|
||||
{% if live_migration_permit_auto_converge -%}
|
||||
live_migration_permit_auto_converge = {{ live_migration_permit_auto_converge }}
|
||||
{% endif -%}
|
||||
{% if disk_cachemodes -%}
|
||||
disk_cachemodes = {{ disk_cachemodes }}
|
||||
{% endif %}
|
||||
# Disable tunnelled migration so that selective
|
||||
# live block migration can be supported.
|
||||
live_migration_tunnelled = False
|
||||
{% if use_multipath -%}
|
||||
volume_use_multipath = {{ use_multipath }}
|
||||
{% endif %}
|
||||
{% if default_ephemeral_format -%}
|
||||
default_ephemeral_format = {{ default_ephemeral_format }}
|
||||
{% endif %}
|
||||
hw_disk_discard = unmap
|
||||
{% if virtio_net_tx_queue_size -%}
|
||||
tx_queue_size = {{ virtio_net_tx_queue_size }}
|
||||
{% endif %}
|
||||
{% if virtio_net_rx_queue_size -%}
|
||||
rx_queue_size = {{ virtio_net_rx_queue_size }}
|
||||
{% endif %}
|
||||
|
||||
{% if virt_type == 'lxd' -%}
|
||||
[lxd]
|
||||
{% if enable_live_migration -%}
|
||||
allow_live_migration = True
|
||||
{% endif -%}
|
||||
{% if storage_pool -%}
|
||||
pool = {{ storage_pool }}
|
||||
{% endif -%}
|
||||
{% endif -%}
|
||||
|
||||
{% include "parts/section-database" %}
|
||||
|
||||
{% include "section-oslo-messaging-rabbit" %}
|
||||
|
||||
[notifications]
|
||||
# Starting in the Pike release, the notification_format includes both the
|
||||
# versioned and unversioned message notifications. Ceilometer does not yet
|
||||
# consume the versioned message notifications, so intentionally make the
|
||||
# notification format unversioned until this is implemented.
|
||||
notification_format = unversioned
|
||||
|
||||
{% include "section-oslo-notifications" %}
|
||||
|
||||
{% include "parts/section-cinder" %}
|
||||
|
||||
[oslo_concurrency]
|
||||
lock_path=/var/lock/nova
|
||||
|
||||
[workarounds]
|
||||
disable_libvirt_livesnapshot = False
|
||||
|
||||
{% include "parts/section-ephemeral" %}
|
||||
|
||||
{% include "parts/section-serial-console" %}
|
||||
|
||||
{% include "parts/section-placement" %}
|
||||
|
||||
[compute]
|
||||
{% if cpu_shared_set -%}
|
||||
cpu_shared_set = {{ cpu_shared_set }}
|
||||
{% endif -%}
|
||||
|
@ -12,7 +12,7 @@ requests>=2.18.4
|
||||
amulet>=1.14.3,<2.0;python_version=='2.7'
|
||||
bundletester>=0.6.1,<1.0;python_version=='2.7'
|
||||
python-ceilometerclient>=1.5.0
|
||||
python-cinderclient>=1.4.0
|
||||
python-cinderclient>=1.4.0,<5.0.0
|
||||
python-glanceclient>=1.1.0
|
||||
python-heatclient>=0.8.0
|
||||
python-keystoneclient>=1.7.1
|
||||
|
@ -64,7 +64,7 @@ class FakeUnitdata(object):
|
||||
self.unit_data[name] = value
|
||||
|
||||
def get(self, key, default=None, record=False):
|
||||
return self.unit_data.get(key)
|
||||
return self.unit_data.get(key, default)
|
||||
|
||||
def set(self, key, value):
|
||||
self.unit_data[key] = value
|
||||
@ -597,20 +597,31 @@ class NovaComputeContextTests(CharmTestCase):
|
||||
libvirt = context.NovaComputeLibvirtContext()
|
||||
self.assertFalse('cpu-mode' in libvirt())
|
||||
|
||||
@patch.object(context.socket, 'getfqdn')
|
||||
@patch('subprocess.call')
|
||||
def test_host_IP_context(self, _call):
|
||||
def test_host_IP_context(self, _call, _getfqdn):
|
||||
self.log = fake_log
|
||||
self.get_relation_ip.return_value = '172.24.0.79'
|
||||
self.kv.return_value = FakeUnitdata()
|
||||
host_ip = context.HostIPContext()
|
||||
self.assertEqual({'host_ip': '172.24.0.79'}, host_ip())
|
||||
self.get_relation_ip.assert_called_with('cloud-compute',
|
||||
cidr_network=None)
|
||||
self.kv.return_value = FakeUnitdata(install_version=1910)
|
||||
_getfqdn.return_value = 'some'
|
||||
host_ip = context.HostIPContext()
|
||||
self.assertEqual({'host_ip': '172.24.0.79'}, host_ip())
|
||||
_getfqdn.return_value = 'some.hostname'
|
||||
host_ip = context.HostIPContext()
|
||||
self.assertDictEqual({'host': 'some.hostname',
|
||||
'host_ip': '172.24.0.79'}, host_ip())
|
||||
|
||||
@patch('subprocess.call')
|
||||
def test_host_IP_context_ipv6(self, _call):
|
||||
self.log = fake_log
|
||||
self.test_config.set('prefer-ipv6', True)
|
||||
self.get_relation_ip.return_value = '2001:db8:0:1::2'
|
||||
self.kv.return_value = FakeUnitdata()
|
||||
host_ip = context.HostIPContext()
|
||||
self.assertEqual({'host_ip': '2001:db8:0:1::2'}, host_ip())
|
||||
self.assertTrue(self.get_relation_ip.called)
|
||||
|
Loading…
x
Reference in New Issue
Block a user