From 96e59cfe582ab95bfe7afa11cabe7a09fc6be81d Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 7 Jan 2022 12:57:36 -0600 Subject: [PATCH] Add action to create a new share --- actions.yaml | 4 +++- config.yaml | 6 ++++++ metadata.yaml | 3 +++ src/charm.py | 28 ++++++++++++++++++++++++---- src/ganesha.py | 2 +- src/interface_ceph_nfs_peer.py | 16 ++++++++++++---- 6 files changed, 49 insertions(+), 10 deletions(-) diff --git a/actions.yaml b/actions.yaml index 3e15c29..4b55f8f 100644 --- a/actions.yaml +++ b/actions.yaml @@ -7,4 +7,6 @@ create-share: allowed-ips: description: IP Addresses to grant Read/Write access to type: string - default: "0.0.0.0" \ No newline at end of file + default: "0.0.0.0" + # TODO: CephFS Share name +# TODO: Update, delete share \ No newline at end of file diff --git a/config.yaml b/config.yaml index 9a6b986..3c53bb4 100644 --- a/config.yaml +++ b/config.yaml @@ -58,3 +58,9 @@ options: description: | Optionally specify an existing pool that Ganesha should store recovery data into. Defaults to the application's name. + vip: + type: string + default: + description: | + VIP to associate with this service. This VIP will only be functional + with a relation to the hacluster charm. \ No newline at end of file diff --git a/metadata.yaml b/metadata.yaml index 94c0284..4128ea9 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -19,6 +19,9 @@ extra-bindings: requires: ceph-client: interface: ceph-client + hacluster: + interface: hacluster + scope: container peers: cluster: interface: ceph-nfs-peer \ No newline at end of file diff --git a/src/charm.py b/src/charm.py index 9682a76..9caafe1 100755 --- a/src/charm.py +++ b/src/charm.py @@ -28,7 +28,7 @@ import charmhelpers.core.templating as ch_templating import interface_ceph_client.ceph_client as ceph_client import interface_ceph_nfs_peer # TODO: Add the below class functionaity to action / relations -# from ganesha import GaneshaNfs +from ganesha import GaneshaNfs import ops_openstack.adapters import ops_openstack.core @@ -178,6 +178,10 @@ class CephNfsCharm( self.framework.observe( self.peers.on.reload_nonce, self.on_reload_nonce) + # Actions + self.framework.observe( + self.on.create_share_action, + self.create_share_action) def config_get(self, key, default=None): """Retrieve config option. @@ -265,14 +269,14 @@ class CephNfsCharm( self._stored.is_started = True self.update_status() logging.info("on_pools_available: status updated") + + def setup_ganesha(self, event): if not self._stored.is_cluster_setup: subprocess.check_call([ 'ganesha-rados-grace', '--userid', self.client_name, '--cephconf', self.CEPH_CONF, '--pool', self.pool_name, 'add', socket.gethostname()]) self._stored.is_cluster_setup = True - - def setup_ganesha(self, event): if not self.model.unit.is_leader(): return cmd = [ @@ -293,7 +297,7 @@ class CephNfsCharm( 'put', 'ganesha-export-counter', counter.name ] subprocess.check_call(cmd) - self.peers.pool_initialised() + self.peers.initialised_pool() except subprocess.CalledProcessError: logging.error("Failed to setup ganesha index object") event.defer() @@ -309,6 +313,22 @@ class CephNfsCharm( logging.info("Reloading Ganesha after nonce triggered reload") subprocess.call(['killall', '-HUP', 'ganesha.nfsd']) + def access_address(self) -> str: + """Return the IP to advertise Ganesha on""" + binding = self.model.get_binding('public') + if self.model.get_relation('hacluster'): + return self.config_get('vip') + else: + return str(binding.network.ingress_address) + + def create_share_action(self, event): + if not self.model.unit.is_leader(): + event.fail("Share creation needs to be run from the application leader") + return + client = GaneshaNfs(self.client_name, self.pool_name) + export_path = client.create_share() + self.peers.trigger_reload() + event.set_results({"message": "Share created", "path": export_path, "ip": self.access_address()}) @ops_openstack.core.charm_class class CephNFSCharmOcto(CephNfsCharm): diff --git a/src/ganesha.py b/src/ganesha.py index daa4268..d3f9ad0 100644 --- a/src/ganesha.py +++ b/src/ganesha.py @@ -134,7 +134,7 @@ class GaneshaNfs(object): def _ceph_command(self, *cmd): """Run a ceph command""" cmd = ["ceph", "--id", self.client_name, "--conf=/etc/ceph/ceph.conf"] + [*cmd] - return subprocess.check_output(cmd) + return subprocess.check_output(cmd, stderr=subprocess.DEVNULL) def _get_next_export_id(self): """Retrieve the next available export ID, and update the rados key diff --git a/src/interface_ceph_nfs_peer.py b/src/interface_ceph_nfs_peer.py index ff10c5e..e38325d 100644 --- a/src/interface_ceph_nfs_peer.py +++ b/src/interface_ceph_nfs_peer.py @@ -45,19 +45,27 @@ class CephNfsPeers(Object): if self.pool_initialised == 'True' and not self._stored.pool_initialised: self.on.pool_initialised.emit() self._stored.pool_initialised = True - if self._stored.reload_nonce != self.reload_nonce(): + if self._stored.reload_nonce != self.reload_nonce: self.on.reload_nonce.emit() - self._stored.reload_nonce = self.reload_nonce() + self._stored.reload_nonce = self.reload_nonce - def pool_initialised(self): + def initialised_pool(self): logging.info("Setting pool initialised") self.peer_rel.data[self.peer_rel.app]['pool_initialised'] = 'True' self.on.pool_initialised.emit() def trigger_reload(self): - self.peer_rel.data[self.peer_rel.app]['reload_nonce'] = uuid.uuid4() + self.peer_rel.data[self.peer_rel.app]['reload_nonce'] = str(uuid.uuid4()) self.on.reload_nonce.emit() + @property + def pool_initialised(self): + return self.peer_rel.data[self.peer_rel.app].get('pool_initialised') + + @property + def reload_nonce(self): + return self.peer_rel.data[self.peer_rel.app].get('reload_nonce') + @property def peer_rel(self): return self.framework.model.get_relation(self.relation_name)