diff --git a/actions.yaml b/actions.yaml index a2a9877..adef146 100644 --- a/actions.yaml +++ b/actions.yaml @@ -34,10 +34,6 @@ grant-access: description: IP address or network to change access for type: string default: - mode: - description: Access mode to grant - type: string - default: "RW" revoke-access: description: | diff --git a/config.yaml b/config.yaml index 2f373e2..a02c7a1 100644 --- a/config.yaml +++ b/config.yaml @@ -62,5 +62,7 @@ options: type: string default: description: | - VIP to associate with this service. This VIP will only be functional - with a relation to the hacluster charm. \ No newline at end of file + Virtual IP(s) to use to front API services in HA configuration. + . + If multiple networks are being used, a VIP should be provided for each + network, separated by spaces. diff --git a/metadata.yaml b/metadata.yaml index 04ccef9..6f402dc 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -19,8 +19,9 @@ extra-bindings: requires: ceph-client: interface: ceph-client - loadbalancer: - interface: openstack-loadbalancer + ha: + interface: hacluster + scope: container peers: cluster: interface: ceph-nfs-peer \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index ea346e7..cda466a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,4 +3,4 @@ git+https://github.com/juju/charm-helpers.git#egg=charmhelpers git+https://github.com/canonical/operator.git#egg=ops git+https://opendev.org/openstack/charm-ops-interface-ceph-client#egg=interface_ceph_client git+https://github.com/ChrisMacNaughton/charm-ops-openstack.git@feature/charm-instance-to-relation-adapter#egg=ops_openstack -git+https://github.com/openstack-charmers/ops-interface-openstack-loadbalancer#egg=interface_openstack_loadbalancer +git+https://opendev.org/openstack/charm-interface-hacluster#egg=interface_hacluster diff --git a/src/charm.py b/src/charm.py index acc5efc..e505ecf 100755 --- a/src/charm.py +++ b/src/charm.py @@ -12,6 +12,7 @@ develop a new k8s charm using the Operator Framework: https://discourse.charmhub.io/t/4208 """ +import ipaddress import logging import os from pathlib import Path @@ -28,7 +29,7 @@ import charmhelpers.core.templating as ch_templating import interface_ceph_client.ceph_client as ceph_client import interface_ceph_nfs_peer -import interface_openstack_loadbalancer.loadbalancer as ops_lb_interface +import interface_hacluster.ops_ha_interface as ops_ha_interface # TODO: Add the below class functionaity to action / relations from ganesha import GaneshaNfs @@ -159,9 +160,8 @@ class CephNfsCharm( self.peers = interface_ceph_nfs_peer.CephNfsPeers( self, 'cluster') - self.ingress = ops_lb_interface.OSLoadbalancerRequires( - self, - 'loadbalancer') + self.ha = ops_ha_interface.HAServiceRequires(self, 'ha') + self.adapters = CephNFSAdapters( (self.ceph_client, self.peers), contexts=(CephNFSContext(self),), @@ -191,11 +191,8 @@ class CephNfsCharm( self.peers.on.reload_nonce, self.on_reload_nonce) self.framework.observe( - self.ingress.on.lb_relation_ready, - self._request_loadbalancer) - self.framework.observe( - self.ingress.on.lb_configured, - self.render_config) + self.ha.on.ha_ready, + self._configure_hacluster) # Actions self.framework.observe( self.on.create_share_action, @@ -216,15 +213,6 @@ class CephNfsCharm( self.revoke_access_action ) - def _request_loadbalancer(self, _) -> None: - """Send request to create loadbalancer""" - self.ingress.request_loadbalancer( - self.LB_SERVICE_NAME, - self.NFS_PORT, - self.NFS_PORT, - self._get_bind_ip(), - 'tcp') - def _get_bind_ip(self) -> str: """Return the IP to bind the dashboard to""" binding = self.model.get_binding('public') @@ -361,6 +349,18 @@ class CephNfsCharm( logging.error("Failed to setup ganesha index object") event.defer() + def _configure_hacluster(self, _): + vip_config = self.config.get('vip') + if not vip_config: + logging.warn("Cannot setup vips, vip config missing") + return + for vip in vip_config.split(): + self.ha.add_vip('vip', vip) + self.ha.add_systemd_service('ganesha-systemd', 'nfs-ganesha') + self.ha.add_colocation( + self.model.app.name, 'ALWAYS', ['ganesha-vip', 'ganesha-systemd']) + self.ha.bind_resources() + def on_pool_initialised(self, event): try: logging.debug("Restarting Ganesha after pool initialisation") @@ -373,19 +373,34 @@ class CephNfsCharm( logging.info("Reloading Ganesha after nonce triggered reload") subprocess.call(['killall', '-HUP', 'ganesha.nfsd']) + def _get_binding_subnet_map(self): + bindings = {} + for binding_name in self.meta.extra_bindings.keys(): + network = self.model.get_binding(binding_name).network + bindings[binding_name] = [i.subnet for i in network.interfaces] + return bindings + + @property + def vips(self): + return self.config.get('vip').split() + + def _get_space_vip_mapping(self): + bindings = {} + for binding_name, subnets in self._get_binding_subnet_map().items(): + bindings[binding_name] = [ + vip + for subnet in subnets + for vip in self.vips + if ipaddress.ip_address(vip) in subnet] + return bindings + def access_address(self) -> str: """Return the IP to advertise Ganesha on""" binding = self.model.get_binding('public') ingress_address = str(binding.network.ingress_address) - if self.ingress.relations: - lb_response = self.ingress.get_frontend_data() - if lb_response: - lb_config = lb_response[self.LB_SERVICE_NAME] - return [i for d in lb_config.values() for i in d['ip']][0] - else: - return ingress_address - else: - return ingress_address + # Try to get the VIP for the public binding, fall back to ingress on it + return self._get_space_vip_mapping().get( + 'public', [ingress_address])[0] def create_share_action(self, event): if not self.model.unit.is_leader(): @@ -432,10 +447,7 @@ class CephNfsCharm( client = GaneshaNfs(self.client_name, self.pool_name) name = event.params.get('name') address = event.params.get('client') - mode = event.params.get('mode') - if mode not in ['R', 'RW']: - event.fail('Mode must be either R (read) or RW (read/write)') - res = client.grant_access(name, address, mode) + res = client.grant_access(name, address) if res is not None: event.fail(res) return diff --git a/src/ganesha.py b/src/ganesha.py index 9d9a561..196cf82 100644 --- a/src/ganesha.py +++ b/src/ganesha.py @@ -66,15 +66,17 @@ class Export(object): def path(self) -> str: return self.export_options['EXPORT']['Path'] - def add_client(self, client: str, mode: str): - if mode not in ['r', 'rw']: - return 'Mode must be either r (read) or rw (read/write)' + def add_client(self, client: str): + mode = "rw" clients_by_mode = self.clients_by_mode + logging.info(f"About to add {client} to {clients_by_mode}") if client not in clients_by_mode[mode.lower()]: clients_by_mode[mode.lower()].append(client) + logging.info(f"new clients_by_mode: to {clients_by_mode}") self.export_options['EXPORT']['CLIENT'] = [] for (mode, clients) in clients_by_mode.items(): if clients: + logging.info(f"Adding {clients} to self.export_options") self.export_options['EXPORT']['CLIENT'].append( {'Access_Type': mode, 'Clients': ', '.join(clients)}) @@ -188,11 +190,11 @@ class GaneshaNfs(object): logging.debug("Removing export file from RADOS") self._rados_rm('ganesha-export-{}'.format(share.export_id)) - def grant_access(self, name: str, client: str, mode: str) -> Optional[str]: + def grant_access(self, name: str, client: str) -> Optional[str]: share = self.get_share(name) if share is None: return 'Share does not exist' - share.add_client(client, mode) + share.add_client(client) export_template = share.to_export() logging.debug("Export template::\n{}".format(export_template)) tmp_file = self._tmpfile(export_template) diff --git a/tests/bundles/focal-pacific.yaml b/tests/bundles/focal-pacific.yaml index 655107a..faad303 100644 --- a/tests/bundles/focal-pacific.yaml +++ b/tests/bundles/focal-pacific.yaml @@ -31,15 +31,11 @@ applications: num_units: 2 options: source: cloud:focal-wallaby - loadbalancer: - charm: ch:openstack-loadbalancer - channel: latest/edge - num_units: 3 hacluster: charm: ch:hacluster channel: 2.0.3/edge options: - cluster_count: 3 + cluster_count: 2 relations: - - 'ceph-mon:client' - 'ceph-nfs:ceph-client' @@ -47,7 +43,5 @@ relations: - 'ceph-mon:osd' - - 'ceph-fs' - 'ceph-mon' - - - ceph-nfs - - loadbalancer - - - 'loadbalancer:ha' + - - 'ceph-nfs:ha' - 'hacluster:ha' diff --git a/tests/bundles/focal-quincy.yaml b/tests/bundles/focal-quincy.yaml index 9cd77a2..07c7896 100644 --- a/tests/bundles/focal-quincy.yaml +++ b/tests/bundles/focal-quincy.yaml @@ -31,15 +31,11 @@ applications: num_units: 2 options: source: cloud:focal-yoga - loadbalancer: - charm: ch:openstack-loadbalancer - channel: latest/edge - num_units: 3 hacluster: charm: ch:hacluster channel: 2.0.3/edge options: - cluster_count: 3 + cluster_count: 2 relations: - - 'ceph-mon:client' - 'ceph-nfs:ceph-client' @@ -47,7 +43,5 @@ relations: - 'ceph-mon:osd' - - 'ceph-fs' - 'ceph-mon' - - - ceph-nfs - - loadbalancer - - - 'loadbalancer:ha' + - - 'ceph-nfs:ha' - 'hacluster:ha' diff --git a/tests/bundles/overlays/local-charm-overlay.yaml.j2 b/tests/bundles/overlays/local-charm-overlay.yaml.j2 index fa52dfc..a8bed22 100644 --- a/tests/bundles/overlays/local-charm-overlay.yaml.j2 +++ b/tests/bundles/overlays/local-charm-overlay.yaml.j2 @@ -1,4 +1,4 @@ applications: - loadbalancer: + ceph-nfs: options: vip: '{{ TEST_VIP00 }}' diff --git a/tests/nfs_ganesha.py b/tests/nfs_ganesha.py index 2545876..577d109 100644 --- a/tests/nfs_ganesha.py +++ b/tests/nfs_ganesha.py @@ -66,18 +66,18 @@ class NfsGaneshaTest(unittest.TestCase): logging.debug("Action results: {}".format(results)) return results - def _grant_access(self, share_name: str, access_ip: str, access_mode: str): + def _grant_access(self, share_name: str, access_ip: str): action = zaza.model.run_action_on_leader( 'ceph-nfs', 'grant-access', action_params={ 'name': share_name, 'client': access_ip, - 'mode': access_mode, }) self.assertEqual(action.status, 'completed') def _mount_share(self, unit_name: str, share_ip: str, export_path: str, retry: bool = True): + self._install_dependencies(unit_name) ssh_cmd = ( 'sudo mkdir -p {0} && ' 'sudo mount -t {1} -o nfsvers=4.1,proto=tcp {2}:{3} {0}'.format( @@ -126,17 +126,15 @@ class NfsGaneshaTest(unittest.TestCase): def test_create_share(self): logging.info("Creating a share") # Todo - enable ACL testing - # ubuntu_0_ip = zaza.model.get_unit_public_address(zaza.model.get_unit_from_name('ubuntu/0')) - # ubuntu_1_ip = zaza.model.get_unit_public_address(zaza.model.get_unit_from_name('ubuntu/1')) - # share = self._create_share('test_ganesha_share', access_ip=ubuntu_0_ip) - share = self._create_share('test_ganesha_share') + ubuntu_0_ip = zaza.model.get_unit_public_address(zaza.model.get_unit_from_name('ubuntu/0')) + ubuntu_1_ip = zaza.model.get_unit_public_address(zaza.model.get_unit_from_name('ubuntu/1')) + share = self._create_share('test_ganesha_share', access_ip=ubuntu_0_ip) + # share = self._create_share('test_ganesha_share') zaza.model.wait_for_application_states(states={ 'ubuntu': { "workload-status-message-regex": "^$", } }) - for unit in ['0', '1']: - self._install_dependencies('ubuntu/{}'.format(unit)) export_path = share['path'] ip = share['ip'] logging.info("Mounting share on ubuntu units") @@ -144,12 +142,13 @@ class NfsGaneshaTest(unittest.TestCase): logging.info("writing to the share on ubuntu/0") self._write_testing_file_on_instance('ubuntu/0') # Todo - enable ACL testing - # try: - # self._mount_share('ubuntu/1', ip, export_path, retry=False) - # self.fail('Mounting should not have succeeded') - # except: # noqa: E722 - # pass - # self._grant_access('test_ganesha_share', access_ip=ubuntu_1_ip, access_mode='RW') + try: + self._mount_share('ubuntu/1', ip, export_path, retry=False) + self.fail('Mounting should not have succeeded') + except: # noqa: E722 + pass + self._grant_access('test_ganesha_share', access_ip=ubuntu_1_ip) + self._mount_share('ubuntu/1', ip, export_path) logging.info("reading from the share on ubuntu/1") self._verify_testing_file_on_instance('ubuntu/1')