Migrate to haproxy interface for VIPs

This commit is contained in:
Chris MacNaughton 2022-03-21 12:27:20 +01:00
parent dcb58955c8
commit ac1048ed1c
10 changed files with 76 additions and 76 deletions

View File

@ -34,10 +34,6 @@ grant-access:
description: IP address or network to change access for
type: string
default:
mode:
description: Access mode to grant
type: string
default: "RW"
revoke-access:
description: |

View File

@ -62,5 +62,7 @@ options:
type: string
default:
description: |
VIP to associate with this service. This VIP will only be functional
with a relation to the hacluster charm.
Virtual IP(s) to use to front API services in HA configuration.
.
If multiple networks are being used, a VIP should be provided for each
network, separated by spaces.

View File

@ -19,8 +19,9 @@ extra-bindings:
requires:
ceph-client:
interface: ceph-client
loadbalancer:
interface: openstack-loadbalancer
ha:
interface: hacluster
scope: container
peers:
cluster:
interface: ceph-nfs-peer

View File

@ -3,4 +3,4 @@ git+https://github.com/juju/charm-helpers.git#egg=charmhelpers
git+https://github.com/canonical/operator.git#egg=ops
git+https://opendev.org/openstack/charm-ops-interface-ceph-client#egg=interface_ceph_client
git+https://github.com/ChrisMacNaughton/charm-ops-openstack.git@feature/charm-instance-to-relation-adapter#egg=ops_openstack
git+https://github.com/openstack-charmers/ops-interface-openstack-loadbalancer#egg=interface_openstack_loadbalancer
git+https://opendev.org/openstack/charm-interface-hacluster#egg=interface_hacluster

View File

@ -12,6 +12,7 @@ develop a new k8s charm using the Operator Framework:
https://discourse.charmhub.io/t/4208
"""
import ipaddress
import logging
import os
from pathlib import Path
@ -28,7 +29,7 @@ import charmhelpers.core.templating as ch_templating
import interface_ceph_client.ceph_client as ceph_client
import interface_ceph_nfs_peer
import interface_openstack_loadbalancer.loadbalancer as ops_lb_interface
import interface_hacluster.ops_ha_interface as ops_ha_interface
# TODO: Add the below class functionaity to action / relations
from ganesha import GaneshaNfs
@ -159,9 +160,8 @@ class CephNfsCharm(
self.peers = interface_ceph_nfs_peer.CephNfsPeers(
self,
'cluster')
self.ingress = ops_lb_interface.OSLoadbalancerRequires(
self,
'loadbalancer')
self.ha = ops_ha_interface.HAServiceRequires(self, 'ha')
self.adapters = CephNFSAdapters(
(self.ceph_client, self.peers),
contexts=(CephNFSContext(self),),
@ -191,11 +191,8 @@ class CephNfsCharm(
self.peers.on.reload_nonce,
self.on_reload_nonce)
self.framework.observe(
self.ingress.on.lb_relation_ready,
self._request_loadbalancer)
self.framework.observe(
self.ingress.on.lb_configured,
self.render_config)
self.ha.on.ha_ready,
self._configure_hacluster)
# Actions
self.framework.observe(
self.on.create_share_action,
@ -216,15 +213,6 @@ class CephNfsCharm(
self.revoke_access_action
)
def _request_loadbalancer(self, _) -> None:
"""Send request to create loadbalancer"""
self.ingress.request_loadbalancer(
self.LB_SERVICE_NAME,
self.NFS_PORT,
self.NFS_PORT,
self._get_bind_ip(),
'tcp')
def _get_bind_ip(self) -> str:
"""Return the IP to bind the dashboard to"""
binding = self.model.get_binding('public')
@ -361,6 +349,18 @@ class CephNfsCharm(
logging.error("Failed to setup ganesha index object")
event.defer()
def _configure_hacluster(self, _):
vip_config = self.config.get('vip')
if not vip_config:
logging.warn("Cannot setup vips, vip config missing")
return
for vip in vip_config.split():
self.ha.add_vip('vip', vip)
self.ha.add_systemd_service('ganesha-systemd', 'nfs-ganesha')
self.ha.add_colocation(
self.model.app.name, 'ALWAYS', ['ganesha-vip', 'ganesha-systemd'])
self.ha.bind_resources()
def on_pool_initialised(self, event):
try:
logging.debug("Restarting Ganesha after pool initialisation")
@ -373,19 +373,34 @@ class CephNfsCharm(
logging.info("Reloading Ganesha after nonce triggered reload")
subprocess.call(['killall', '-HUP', 'ganesha.nfsd'])
def _get_binding_subnet_map(self):
bindings = {}
for binding_name in self.meta.extra_bindings.keys():
network = self.model.get_binding(binding_name).network
bindings[binding_name] = [i.subnet for i in network.interfaces]
return bindings
@property
def vips(self):
return self.config.get('vip').split()
def _get_space_vip_mapping(self):
bindings = {}
for binding_name, subnets in self._get_binding_subnet_map().items():
bindings[binding_name] = [
vip
for subnet in subnets
for vip in self.vips
if ipaddress.ip_address(vip) in subnet]
return bindings
def access_address(self) -> str:
"""Return the IP to advertise Ganesha on"""
binding = self.model.get_binding('public')
ingress_address = str(binding.network.ingress_address)
if self.ingress.relations:
lb_response = self.ingress.get_frontend_data()
if lb_response:
lb_config = lb_response[self.LB_SERVICE_NAME]
return [i for d in lb_config.values() for i in d['ip']][0]
else:
return ingress_address
else:
return ingress_address
# Try to get the VIP for the public binding, fall back to ingress on it
return self._get_space_vip_mapping().get(
'public', [ingress_address])[0]
def create_share_action(self, event):
if not self.model.unit.is_leader():
@ -432,10 +447,7 @@ class CephNfsCharm(
client = GaneshaNfs(self.client_name, self.pool_name)
name = event.params.get('name')
address = event.params.get('client')
mode = event.params.get('mode')
if mode not in ['R', 'RW']:
event.fail('Mode must be either R (read) or RW (read/write)')
res = client.grant_access(name, address, mode)
res = client.grant_access(name, address)
if res is not None:
event.fail(res)
return

View File

@ -66,15 +66,17 @@ class Export(object):
def path(self) -> str:
return self.export_options['EXPORT']['Path']
def add_client(self, client: str, mode: str):
if mode not in ['r', 'rw']:
return 'Mode must be either r (read) or rw (read/write)'
def add_client(self, client: str):
mode = "rw"
clients_by_mode = self.clients_by_mode
logging.info(f"About to add {client} to {clients_by_mode}")
if client not in clients_by_mode[mode.lower()]:
clients_by_mode[mode.lower()].append(client)
logging.info(f"new clients_by_mode: to {clients_by_mode}")
self.export_options['EXPORT']['CLIENT'] = []
for (mode, clients) in clients_by_mode.items():
if clients:
logging.info(f"Adding {clients} to self.export_options")
self.export_options['EXPORT']['CLIENT'].append(
{'Access_Type': mode, 'Clients': ', '.join(clients)})
@ -188,11 +190,11 @@ class GaneshaNfs(object):
logging.debug("Removing export file from RADOS")
self._rados_rm('ganesha-export-{}'.format(share.export_id))
def grant_access(self, name: str, client: str, mode: str) -> Optional[str]:
def grant_access(self, name: str, client: str) -> Optional[str]:
share = self.get_share(name)
if share is None:
return 'Share does not exist'
share.add_client(client, mode)
share.add_client(client)
export_template = share.to_export()
logging.debug("Export template::\n{}".format(export_template))
tmp_file = self._tmpfile(export_template)

View File

@ -31,15 +31,11 @@ applications:
num_units: 2
options:
source: cloud:focal-wallaby
loadbalancer:
charm: ch:openstack-loadbalancer
channel: latest/edge
num_units: 3
hacluster:
charm: ch:hacluster
channel: 2.0.3/edge
options:
cluster_count: 3
cluster_count: 2
relations:
- - 'ceph-mon:client'
- 'ceph-nfs:ceph-client'
@ -47,7 +43,5 @@ relations:
- 'ceph-mon:osd'
- - 'ceph-fs'
- 'ceph-mon'
- - ceph-nfs
- loadbalancer
- - 'loadbalancer:ha'
- - 'ceph-nfs:ha'
- 'hacluster:ha'

View File

@ -31,15 +31,11 @@ applications:
num_units: 2
options:
source: cloud:focal-yoga
loadbalancer:
charm: ch:openstack-loadbalancer
channel: latest/edge
num_units: 3
hacluster:
charm: ch:hacluster
channel: 2.0.3/edge
options:
cluster_count: 3
cluster_count: 2
relations:
- - 'ceph-mon:client'
- 'ceph-nfs:ceph-client'
@ -47,7 +43,5 @@ relations:
- 'ceph-mon:osd'
- - 'ceph-fs'
- 'ceph-mon'
- - ceph-nfs
- loadbalancer
- - 'loadbalancer:ha'
- - 'ceph-nfs:ha'
- 'hacluster:ha'

View File

@ -1,4 +1,4 @@
applications:
loadbalancer:
ceph-nfs:
options:
vip: '{{ TEST_VIP00 }}'

View File

@ -66,18 +66,18 @@ class NfsGaneshaTest(unittest.TestCase):
logging.debug("Action results: {}".format(results))
return results
def _grant_access(self, share_name: str, access_ip: str, access_mode: str):
def _grant_access(self, share_name: str, access_ip: str):
action = zaza.model.run_action_on_leader(
'ceph-nfs',
'grant-access',
action_params={
'name': share_name,
'client': access_ip,
'mode': access_mode,
})
self.assertEqual(action.status, 'completed')
def _mount_share(self, unit_name: str, share_ip: str, export_path: str, retry: bool = True):
self._install_dependencies(unit_name)
ssh_cmd = (
'sudo mkdir -p {0} && '
'sudo mount -t {1} -o nfsvers=4.1,proto=tcp {2}:{3} {0}'.format(
@ -126,17 +126,15 @@ class NfsGaneshaTest(unittest.TestCase):
def test_create_share(self):
logging.info("Creating a share")
# Todo - enable ACL testing
# ubuntu_0_ip = zaza.model.get_unit_public_address(zaza.model.get_unit_from_name('ubuntu/0'))
# ubuntu_1_ip = zaza.model.get_unit_public_address(zaza.model.get_unit_from_name('ubuntu/1'))
# share = self._create_share('test_ganesha_share', access_ip=ubuntu_0_ip)
share = self._create_share('test_ganesha_share')
ubuntu_0_ip = zaza.model.get_unit_public_address(zaza.model.get_unit_from_name('ubuntu/0'))
ubuntu_1_ip = zaza.model.get_unit_public_address(zaza.model.get_unit_from_name('ubuntu/1'))
share = self._create_share('test_ganesha_share', access_ip=ubuntu_0_ip)
# share = self._create_share('test_ganesha_share')
zaza.model.wait_for_application_states(states={
'ubuntu': {
"workload-status-message-regex": "^$",
}
})
for unit in ['0', '1']:
self._install_dependencies('ubuntu/{}'.format(unit))
export_path = share['path']
ip = share['ip']
logging.info("Mounting share on ubuntu units")
@ -144,12 +142,13 @@ class NfsGaneshaTest(unittest.TestCase):
logging.info("writing to the share on ubuntu/0")
self._write_testing_file_on_instance('ubuntu/0')
# Todo - enable ACL testing
# try:
# self._mount_share('ubuntu/1', ip, export_path, retry=False)
# self.fail('Mounting should not have succeeded')
# except: # noqa: E722
# pass
# self._grant_access('test_ganesha_share', access_ip=ubuntu_1_ip, access_mode='RW')
try:
self._mount_share('ubuntu/1', ip, export_path, retry=False)
self.fail('Mounting should not have succeeded')
except: # noqa: E722
pass
self._grant_access('test_ganesha_share', access_ip=ubuntu_1_ip)
self._mount_share('ubuntu/1', ip, export_path)
logging.info("reading from the share on ubuntu/1")
self._verify_testing_file_on_instance('ubuntu/1')