Support using internal network for clients
Openstack mostly defaults to using public endpoints for internal communication between services. This patch adds a new option use-internal-endpoints which, if set to True, will configure services to use internal endpoints where possible. Closes-Bug: 1456876 Change-Id: I736a0a281ec434067bc92fa70898b16a027f7422
This commit is contained in:
parent
da4f012a9b
commit
0bf8de365c
@ -175,6 +175,13 @@ options:
|
|||||||
nagios_context will be used as the servicegroup.
|
nagios_context will be used as the servicegroup.
|
||||||
# Network configuration options
|
# Network configuration options
|
||||||
# NOTE: by default all access is over 'private-address'
|
# NOTE: by default all access is over 'private-address'
|
||||||
|
use-internal-endpoints:
|
||||||
|
default: False
|
||||||
|
type: boolean
|
||||||
|
description: |
|
||||||
|
Openstack mostly defaults to using public endpoints for
|
||||||
|
internal communication between services. If set to True this option will
|
||||||
|
configure services to use internal endpoints where possible.
|
||||||
network-device-mtu:
|
network-device-mtu:
|
||||||
type: int
|
type: int
|
||||||
default:
|
default:
|
||||||
|
@ -191,6 +191,15 @@ get_iface_for_address = partial(_get_for_address, key='iface')
|
|||||||
get_netmask_for_address = partial(_get_for_address, key='netmask')
|
get_netmask_for_address = partial(_get_for_address, key='netmask')
|
||||||
|
|
||||||
|
|
||||||
|
def resolve_network_cidr(ip_address):
|
||||||
|
'''
|
||||||
|
Resolves the full address cidr of an ip_address based on
|
||||||
|
configured network interfaces
|
||||||
|
'''
|
||||||
|
netmask = get_netmask_for_address(ip_address)
|
||||||
|
return str(netaddr.IPNetwork("%s/%s" % (ip_address, netmask)).cidr)
|
||||||
|
|
||||||
|
|
||||||
def format_ipv6_addr(address):
|
def format_ipv6_addr(address):
|
||||||
"""If address is IPv6, wrap it in '[]' otherwise return None.
|
"""If address is IPv6, wrap it in '[]' otherwise return None.
|
||||||
|
|
||||||
|
@ -126,7 +126,9 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
# Charms which can not use openstack-origin, ie. many subordinates
|
# Charms which can not use openstack-origin, ie. many subordinates
|
||||||
no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
|
no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
|
||||||
'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
|
'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
|
||||||
'cinder-backup']
|
'cinder-backup', 'nexentaedge-data',
|
||||||
|
'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
|
||||||
|
'cinder-nexentaedge', 'nexentaedge-mgmt']
|
||||||
|
|
||||||
if self.openstack:
|
if self.openstack:
|
||||||
for svc in services:
|
for svc in services:
|
||||||
|
@ -1479,3 +1479,15 @@ class NetworkServiceContext(OSContextGenerator):
|
|||||||
if self.context_complete(ctxt):
|
if self.context_complete(ctxt):
|
||||||
return ctxt
|
return ctxt
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
class InternalEndpointContext(OSContextGenerator):
|
||||||
|
"""Internal endpoint context.
|
||||||
|
|
||||||
|
This context provides the endpoint type used for communication between
|
||||||
|
services e.g. between Nova and Cinder internally. Openstack uses Public
|
||||||
|
endpoints by default so this allows admins to optionally use internal
|
||||||
|
endpoints.
|
||||||
|
"""
|
||||||
|
def __call__(self):
|
||||||
|
return {'use_internal_endpoints': config('use-internal-endpoints')}
|
||||||
|
@ -14,16 +14,19 @@
|
|||||||
# You should have received a copy of the GNU Lesser General Public License
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
from charmhelpers.core.hookenv import (
|
||||||
config,
|
config,
|
||||||
unit_get,
|
unit_get,
|
||||||
service_name,
|
service_name,
|
||||||
|
network_get_primary_address,
|
||||||
)
|
)
|
||||||
from charmhelpers.contrib.network.ip import (
|
from charmhelpers.contrib.network.ip import (
|
||||||
get_address_in_network,
|
get_address_in_network,
|
||||||
is_address_in_network,
|
is_address_in_network,
|
||||||
is_ipv6,
|
is_ipv6,
|
||||||
get_ipv6_addr,
|
get_ipv6_addr,
|
||||||
|
resolve_network_cidr,
|
||||||
)
|
)
|
||||||
from charmhelpers.contrib.hahelpers.cluster import is_clustered
|
from charmhelpers.contrib.hahelpers.cluster import is_clustered
|
||||||
|
|
||||||
@ -33,16 +36,19 @@ ADMIN = 'admin'
|
|||||||
|
|
||||||
ADDRESS_MAP = {
|
ADDRESS_MAP = {
|
||||||
PUBLIC: {
|
PUBLIC: {
|
||||||
|
'binding': 'public',
|
||||||
'config': 'os-public-network',
|
'config': 'os-public-network',
|
||||||
'fallback': 'public-address',
|
'fallback': 'public-address',
|
||||||
'override': 'os-public-hostname',
|
'override': 'os-public-hostname',
|
||||||
},
|
},
|
||||||
INTERNAL: {
|
INTERNAL: {
|
||||||
|
'binding': 'internal',
|
||||||
'config': 'os-internal-network',
|
'config': 'os-internal-network',
|
||||||
'fallback': 'private-address',
|
'fallback': 'private-address',
|
||||||
'override': 'os-internal-hostname',
|
'override': 'os-internal-hostname',
|
||||||
},
|
},
|
||||||
ADMIN: {
|
ADMIN: {
|
||||||
|
'binding': 'admin',
|
||||||
'config': 'os-admin-network',
|
'config': 'os-admin-network',
|
||||||
'fallback': 'private-address',
|
'fallback': 'private-address',
|
||||||
'override': 'os-admin-hostname',
|
'override': 'os-admin-hostname',
|
||||||
@ -110,7 +116,7 @@ def resolve_address(endpoint_type=PUBLIC):
|
|||||||
correct network. If clustered with no nets defined, return primary vip.
|
correct network. If clustered with no nets defined, return primary vip.
|
||||||
|
|
||||||
If not clustered, return unit address ensuring address is on configured net
|
If not clustered, return unit address ensuring address is on configured net
|
||||||
split if one is configured.
|
split if one is configured, or a Juju 2.0 extra-binding has been used.
|
||||||
|
|
||||||
:param endpoint_type: Network endpoing type
|
:param endpoint_type: Network endpoing type
|
||||||
"""
|
"""
|
||||||
@ -125,23 +131,45 @@ def resolve_address(endpoint_type=PUBLIC):
|
|||||||
net_type = ADDRESS_MAP[endpoint_type]['config']
|
net_type = ADDRESS_MAP[endpoint_type]['config']
|
||||||
net_addr = config(net_type)
|
net_addr = config(net_type)
|
||||||
net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
|
net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
|
||||||
|
binding = ADDRESS_MAP[endpoint_type]['binding']
|
||||||
clustered = is_clustered()
|
clustered = is_clustered()
|
||||||
if clustered:
|
|
||||||
if not net_addr:
|
if clustered and vips:
|
||||||
# If no net-splits defined, we expect a single vip
|
if net_addr:
|
||||||
resolved_address = vips[0]
|
|
||||||
else:
|
|
||||||
for vip in vips:
|
for vip in vips:
|
||||||
if is_address_in_network(net_addr, vip):
|
if is_address_in_network(net_addr, vip):
|
||||||
resolved_address = vip
|
resolved_address = vip
|
||||||
break
|
break
|
||||||
|
else:
|
||||||
|
# NOTE: endeavour to check vips against network space
|
||||||
|
# bindings
|
||||||
|
try:
|
||||||
|
bound_cidr = resolve_network_cidr(
|
||||||
|
network_get_primary_address(binding)
|
||||||
|
)
|
||||||
|
for vip in vips:
|
||||||
|
if is_address_in_network(bound_cidr, vip):
|
||||||
|
resolved_address = vip
|
||||||
|
break
|
||||||
|
except NotImplementedError:
|
||||||
|
# If no net-splits configured and no support for extra
|
||||||
|
# bindings/network spaces so we expect a single vip
|
||||||
|
resolved_address = vips[0]
|
||||||
else:
|
else:
|
||||||
if config('prefer-ipv6'):
|
if config('prefer-ipv6'):
|
||||||
fallback_addr = get_ipv6_addr(exc_list=vips)[0]
|
fallback_addr = get_ipv6_addr(exc_list=vips)[0]
|
||||||
else:
|
else:
|
||||||
fallback_addr = unit_get(net_fallback)
|
fallback_addr = unit_get(net_fallback)
|
||||||
|
|
||||||
|
if net_addr:
|
||||||
resolved_address = get_address_in_network(net_addr, fallback_addr)
|
resolved_address = get_address_in_network(net_addr, fallback_addr)
|
||||||
|
else:
|
||||||
|
# NOTE: only try to use extra bindings if legacy network
|
||||||
|
# configuration is not in use
|
||||||
|
try:
|
||||||
|
resolved_address = network_get_primary_address(binding)
|
||||||
|
except NotImplementedError:
|
||||||
|
resolved_address = fallback_addr
|
||||||
|
|
||||||
if resolved_address is None:
|
if resolved_address is None:
|
||||||
raise ValueError("Unable to resolve a suitable IP address based on "
|
raise ValueError("Unable to resolve a suitable IP address based on "
|
||||||
|
@ -181,7 +181,8 @@ BASE_RESOURCE_MAP = {
|
|||||||
MetadataServiceContext(),
|
MetadataServiceContext(),
|
||||||
HostIPContext(),
|
HostIPContext(),
|
||||||
DesignateContext(),
|
DesignateContext(),
|
||||||
context.LogLevelContext()],
|
context.LogLevelContext(),
|
||||||
|
context.InternalEndpointContext()],
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -165,5 +165,5 @@ live_migration_uri = {{ live_migration_uri }}
|
|||||||
disk_cachemodes = {{ disk_cachemodes }}
|
disk_cachemodes = {{ disk_cachemodes }}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{% include "parts/cinder" %}
|
{% include "parts/section-cinder" %}
|
||||||
|
|
||||||
|
@ -177,7 +177,7 @@ disk_cachemodes = {{ disk_cachemodes }}
|
|||||||
|
|
||||||
{% include "section-rabbitmq-oslo" %}
|
{% include "section-rabbitmq-oslo" %}
|
||||||
|
|
||||||
{% include "parts/cinder" %}
|
{% include "parts/section-cinder" %}
|
||||||
|
|
||||||
[oslo_concurrency]
|
[oslo_concurrency]
|
||||||
lock_path=/var/lock/nova
|
lock_path=/var/lock/nova
|
||||||
|
@ -183,6 +183,8 @@ disk_cachemodes = {{ disk_cachemodes }}
|
|||||||
|
|
||||||
{% include "section-rabbitmq-oslo" %}
|
{% include "section-rabbitmq-oslo" %}
|
||||||
|
|
||||||
|
{% include "parts/section-cinder" %}
|
||||||
|
|
||||||
[oslo_concurrency]
|
[oslo_concurrency]
|
||||||
lock_path=/var/lock/nova
|
lock_path=/var/lock/nova
|
||||||
|
|
||||||
|
@ -182,7 +182,7 @@ disk_cachemodes = {{ disk_cachemodes }}
|
|||||||
|
|
||||||
{% include "section-rabbitmq-oslo" %}
|
{% include "section-rabbitmq-oslo" %}
|
||||||
|
|
||||||
{% include "parts/cinder" %}
|
{% include "parts/section-cinder" %}
|
||||||
|
|
||||||
[oslo_concurrency]
|
[oslo_concurrency]
|
||||||
lock_path=/var/lock/nova
|
lock_path=/var/lock/nova
|
||||||
|
@ -1,5 +0,0 @@
|
|||||||
{% if volume_service and volume_service == 'cinder' and region -%}
|
|
||||||
[cinder]
|
|
||||||
os_region_name = {{ region }}
|
|
||||||
{% endif -%}
|
|
||||||
|
|
9
templates/parts/section-cinder
Normal file
9
templates/parts/section-cinder
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
{% if volume_service and volume_service == 'cinder' -%}
|
||||||
|
[cinder]
|
||||||
|
{% if use_internal_endpoints -%}
|
||||||
|
catalog_info = volumev2:cinderv2:internalURL
|
||||||
|
{% endif %}
|
||||||
|
{% if region -%}
|
||||||
|
os_region_name = {{ region }}
|
||||||
|
{% endif %}
|
||||||
|
{% endif -%}
|
@ -126,7 +126,9 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
# Charms which can not use openstack-origin, ie. many subordinates
|
# Charms which can not use openstack-origin, ie. many subordinates
|
||||||
no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
|
no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
|
||||||
'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
|
'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
|
||||||
'cinder-backup']
|
'cinder-backup', 'nexentaedge-data',
|
||||||
|
'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
|
||||||
|
'cinder-nexentaedge', 'nexentaedge-mgmt']
|
||||||
|
|
||||||
if self.openstack:
|
if self.openstack:
|
||||||
for svc in services:
|
for svc in services:
|
||||||
|
Loading…
x
Reference in New Issue
Block a user