From 0bf8de365c402e319ef1e2a5a20f9cfbe325625d Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 24 Mar 2016 16:00:51 +0000 Subject: [PATCH] Support using internal network for clients Openstack mostly defaults to using public endpoints for internal communication between services. This patch adds a new option use-internal-endpoints which, if set to True, will configure services to use internal endpoints where possible. Closes-Bug: 1456876 Change-Id: I736a0a281ec434067bc92fa70898b16a027f7422 --- config.yaml | 7 ++++ hooks/charmhelpers/contrib/network/ip.py | 9 ++++ .../contrib/openstack/amulet/deployment.py | 4 +- .../charmhelpers/contrib/openstack/context.py | 12 ++++++ hooks/charmhelpers/contrib/openstack/ip.py | 42 +++++++++++++++---- hooks/nova_compute_utils.py | 3 +- templates/juno/nova.conf | 2 +- templates/kilo/nova.conf | 2 +- templates/liberty/nova.conf | 2 + templates/mitaka/nova.conf | 2 +- templates/parts/cinder | 5 --- templates/parts/section-cinder | 9 ++++ .../contrib/openstack/amulet/deployment.py | 4 +- 13 files changed, 85 insertions(+), 18 deletions(-) delete mode 100644 templates/parts/cinder create mode 100644 templates/parts/section-cinder diff --git a/config.yaml b/config.yaml index 830c514d..7db31cc6 100644 --- a/config.yaml +++ b/config.yaml @@ -175,6 +175,13 @@ options: nagios_context will be used as the servicegroup. # Network configuration options # NOTE: by default all access is over 'private-address' + use-internal-endpoints: + default: False + type: boolean + description: | + Openstack mostly defaults to using public endpoints for + internal communication between services. If set to True this option will + configure services to use internal endpoints where possible. network-device-mtu: type: int default: diff --git a/hooks/charmhelpers/contrib/network/ip.py b/hooks/charmhelpers/contrib/network/ip.py index 4efe7993..b9c79000 100644 --- a/hooks/charmhelpers/contrib/network/ip.py +++ b/hooks/charmhelpers/contrib/network/ip.py @@ -191,6 +191,15 @@ get_iface_for_address = partial(_get_for_address, key='iface') get_netmask_for_address = partial(_get_for_address, key='netmask') +def resolve_network_cidr(ip_address): + ''' + Resolves the full address cidr of an ip_address based on + configured network interfaces + ''' + netmask = get_netmask_for_address(ip_address) + return str(netaddr.IPNetwork("%s/%s" % (ip_address, netmask)).cidr) + + def format_ipv6_addr(address): """If address is IPv6, wrap it in '[]' otherwise return None. diff --git a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index d2ede320..d21c9c78 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -126,7 +126,9 @@ class OpenStackAmuletDeployment(AmuletDeployment): # Charms which can not use openstack-origin, ie. many subordinates no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', 'openvswitch-odl', 'neutron-api-odl', 'odl-controller', - 'cinder-backup'] + 'cinder-backup', 'nexentaedge-data', + 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', + 'cinder-nexentaedge', 'nexentaedge-mgmt'] if self.openstack: for svc in services: diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index a8c6ab0c..d495da3f 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -1479,3 +1479,15 @@ class NetworkServiceContext(OSContextGenerator): if self.context_complete(ctxt): return ctxt return {} + + +class InternalEndpointContext(OSContextGenerator): + """Internal endpoint context. + + This context provides the endpoint type used for communication between + services e.g. between Nova and Cinder internally. Openstack uses Public + endpoints by default so this allows admins to optionally use internal + endpoints. + """ + def __call__(self): + return {'use_internal_endpoints': config('use-internal-endpoints')} diff --git a/hooks/charmhelpers/contrib/openstack/ip.py b/hooks/charmhelpers/contrib/openstack/ip.py index 3dca6dc1..532a1dc1 100644 --- a/hooks/charmhelpers/contrib/openstack/ip.py +++ b/hooks/charmhelpers/contrib/openstack/ip.py @@ -14,16 +14,19 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . + from charmhelpers.core.hookenv import ( config, unit_get, service_name, + network_get_primary_address, ) from charmhelpers.contrib.network.ip import ( get_address_in_network, is_address_in_network, is_ipv6, get_ipv6_addr, + resolve_network_cidr, ) from charmhelpers.contrib.hahelpers.cluster import is_clustered @@ -33,16 +36,19 @@ ADMIN = 'admin' ADDRESS_MAP = { PUBLIC: { + 'binding': 'public', 'config': 'os-public-network', 'fallback': 'public-address', 'override': 'os-public-hostname', }, INTERNAL: { + 'binding': 'internal', 'config': 'os-internal-network', 'fallback': 'private-address', 'override': 'os-internal-hostname', }, ADMIN: { + 'binding': 'admin', 'config': 'os-admin-network', 'fallback': 'private-address', 'override': 'os-admin-hostname', @@ -110,7 +116,7 @@ def resolve_address(endpoint_type=PUBLIC): correct network. If clustered with no nets defined, return primary vip. If not clustered, return unit address ensuring address is on configured net - split if one is configured. + split if one is configured, or a Juju 2.0 extra-binding has been used. :param endpoint_type: Network endpoing type """ @@ -125,23 +131,45 @@ def resolve_address(endpoint_type=PUBLIC): net_type = ADDRESS_MAP[endpoint_type]['config'] net_addr = config(net_type) net_fallback = ADDRESS_MAP[endpoint_type]['fallback'] + binding = ADDRESS_MAP[endpoint_type]['binding'] clustered = is_clustered() - if clustered: - if not net_addr: - # If no net-splits defined, we expect a single vip - resolved_address = vips[0] - else: + + if clustered and vips: + if net_addr: for vip in vips: if is_address_in_network(net_addr, vip): resolved_address = vip break + else: + # NOTE: endeavour to check vips against network space + # bindings + try: + bound_cidr = resolve_network_cidr( + network_get_primary_address(binding) + ) + for vip in vips: + if is_address_in_network(bound_cidr, vip): + resolved_address = vip + break + except NotImplementedError: + # If no net-splits configured and no support for extra + # bindings/network spaces so we expect a single vip + resolved_address = vips[0] else: if config('prefer-ipv6'): fallback_addr = get_ipv6_addr(exc_list=vips)[0] else: fallback_addr = unit_get(net_fallback) - resolved_address = get_address_in_network(net_addr, fallback_addr) + if net_addr: + resolved_address = get_address_in_network(net_addr, fallback_addr) + else: + # NOTE: only try to use extra bindings if legacy network + # configuration is not in use + try: + resolved_address = network_get_primary_address(binding) + except NotImplementedError: + resolved_address = fallback_addr if resolved_address is None: raise ValueError("Unable to resolve a suitable IP address based on " diff --git a/hooks/nova_compute_utils.py b/hooks/nova_compute_utils.py index 4046d8e7..cb4ff755 100644 --- a/hooks/nova_compute_utils.py +++ b/hooks/nova_compute_utils.py @@ -181,7 +181,8 @@ BASE_RESOURCE_MAP = { MetadataServiceContext(), HostIPContext(), DesignateContext(), - context.LogLevelContext()], + context.LogLevelContext(), + context.InternalEndpointContext()], }, } diff --git a/templates/juno/nova.conf b/templates/juno/nova.conf index 773e90eb..1a5e2db8 100644 --- a/templates/juno/nova.conf +++ b/templates/juno/nova.conf @@ -165,5 +165,5 @@ live_migration_uri = {{ live_migration_uri }} disk_cachemodes = {{ disk_cachemodes }} {% endif %} -{% include "parts/cinder" %} +{% include "parts/section-cinder" %} diff --git a/templates/kilo/nova.conf b/templates/kilo/nova.conf index c47e99bb..37ca88f6 100644 --- a/templates/kilo/nova.conf +++ b/templates/kilo/nova.conf @@ -177,7 +177,7 @@ disk_cachemodes = {{ disk_cachemodes }} {% include "section-rabbitmq-oslo" %} -{% include "parts/cinder" %} +{% include "parts/section-cinder" %} [oslo_concurrency] lock_path=/var/lock/nova diff --git a/templates/liberty/nova.conf b/templates/liberty/nova.conf index f7681e73..e14cb792 100644 --- a/templates/liberty/nova.conf +++ b/templates/liberty/nova.conf @@ -183,6 +183,8 @@ disk_cachemodes = {{ disk_cachemodes }} {% include "section-rabbitmq-oslo" %} +{% include "parts/section-cinder" %} + [oslo_concurrency] lock_path=/var/lock/nova diff --git a/templates/mitaka/nova.conf b/templates/mitaka/nova.conf index 93ad9f03..a44d4b3f 100644 --- a/templates/mitaka/nova.conf +++ b/templates/mitaka/nova.conf @@ -182,7 +182,7 @@ disk_cachemodes = {{ disk_cachemodes }} {% include "section-rabbitmq-oslo" %} -{% include "parts/cinder" %} +{% include "parts/section-cinder" %} [oslo_concurrency] lock_path=/var/lock/nova diff --git a/templates/parts/cinder b/templates/parts/cinder deleted file mode 100644 index d06d5e60..00000000 --- a/templates/parts/cinder +++ /dev/null @@ -1,5 +0,0 @@ -{% if volume_service and volume_service == 'cinder' and region -%} -[cinder] -os_region_name = {{ region }} -{% endif -%} - diff --git a/templates/parts/section-cinder b/templates/parts/section-cinder new file mode 100644 index 00000000..aa91c4c1 --- /dev/null +++ b/templates/parts/section-cinder @@ -0,0 +1,9 @@ +{% if volume_service and volume_service == 'cinder' -%} +[cinder] +{% if use_internal_endpoints -%} +catalog_info = volumev2:cinderv2:internalURL +{% endif %} +{% if region -%} +os_region_name = {{ region }} +{% endif %} +{% endif -%} diff --git a/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/tests/charmhelpers/contrib/openstack/amulet/deployment.py index d2ede320..d21c9c78 100644 --- a/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -126,7 +126,9 @@ class OpenStackAmuletDeployment(AmuletDeployment): # Charms which can not use openstack-origin, ie. many subordinates no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', 'openvswitch-odl', 'neutron-api-odl', 'odl-controller', - 'cinder-backup'] + 'cinder-backup', 'nexentaedge-data', + 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', + 'cinder-nexentaedge', 'nexentaedge-mgmt'] if self.openstack: for svc in services: