Add action to create a new share
This commit is contained in:
parent
c9817a4b17
commit
96e59cfe58
@ -7,4 +7,6 @@ create-share:
|
||||
allowed-ips:
|
||||
description: IP Addresses to grant Read/Write access to
|
||||
type: string
|
||||
default: "0.0.0.0"
|
||||
default: "0.0.0.0"
|
||||
# TODO: CephFS Share name
|
||||
# TODO: Update, delete share
|
@ -58,3 +58,9 @@ options:
|
||||
description: |
|
||||
Optionally specify an existing pool that Ganesha should store recovery
|
||||
data into. Defaults to the application's name.
|
||||
vip:
|
||||
type: string
|
||||
default:
|
||||
description: |
|
||||
VIP to associate with this service. This VIP will only be functional
|
||||
with a relation to the hacluster charm.
|
@ -19,6 +19,9 @@ extra-bindings:
|
||||
requires:
|
||||
ceph-client:
|
||||
interface: ceph-client
|
||||
hacluster:
|
||||
interface: hacluster
|
||||
scope: container
|
||||
peers:
|
||||
cluster:
|
||||
interface: ceph-nfs-peer
|
28
src/charm.py
28
src/charm.py
@ -28,7 +28,7 @@ import charmhelpers.core.templating as ch_templating
|
||||
import interface_ceph_client.ceph_client as ceph_client
|
||||
import interface_ceph_nfs_peer
|
||||
# TODO: Add the below class functionaity to action / relations
|
||||
# from ganesha import GaneshaNfs
|
||||
from ganesha import GaneshaNfs
|
||||
|
||||
import ops_openstack.adapters
|
||||
import ops_openstack.core
|
||||
@ -178,6 +178,10 @@ class CephNfsCharm(
|
||||
self.framework.observe(
|
||||
self.peers.on.reload_nonce,
|
||||
self.on_reload_nonce)
|
||||
# Actions
|
||||
self.framework.observe(
|
||||
self.on.create_share_action,
|
||||
self.create_share_action)
|
||||
|
||||
def config_get(self, key, default=None):
|
||||
"""Retrieve config option.
|
||||
@ -265,14 +269,14 @@ class CephNfsCharm(
|
||||
self._stored.is_started = True
|
||||
self.update_status()
|
||||
logging.info("on_pools_available: status updated")
|
||||
|
||||
def setup_ganesha(self, event):
|
||||
if not self._stored.is_cluster_setup:
|
||||
subprocess.check_call([
|
||||
'ganesha-rados-grace', '--userid', self.client_name,
|
||||
'--cephconf', self.CEPH_CONF, '--pool', self.pool_name,
|
||||
'add', socket.gethostname()])
|
||||
self._stored.is_cluster_setup = True
|
||||
|
||||
def setup_ganesha(self, event):
|
||||
if not self.model.unit.is_leader():
|
||||
return
|
||||
cmd = [
|
||||
@ -293,7 +297,7 @@ class CephNfsCharm(
|
||||
'put', 'ganesha-export-counter', counter.name
|
||||
]
|
||||
subprocess.check_call(cmd)
|
||||
self.peers.pool_initialised()
|
||||
self.peers.initialised_pool()
|
||||
except subprocess.CalledProcessError:
|
||||
logging.error("Failed to setup ganesha index object")
|
||||
event.defer()
|
||||
@ -309,6 +313,22 @@ class CephNfsCharm(
|
||||
logging.info("Reloading Ganesha after nonce triggered reload")
|
||||
subprocess.call(['killall', '-HUP', 'ganesha.nfsd'])
|
||||
|
||||
def access_address(self) -> str:
|
||||
"""Return the IP to advertise Ganesha on"""
|
||||
binding = self.model.get_binding('public')
|
||||
if self.model.get_relation('hacluster'):
|
||||
return self.config_get('vip')
|
||||
else:
|
||||
return str(binding.network.ingress_address)
|
||||
|
||||
def create_share_action(self, event):
|
||||
if not self.model.unit.is_leader():
|
||||
event.fail("Share creation needs to be run from the application leader")
|
||||
return
|
||||
client = GaneshaNfs(self.client_name, self.pool_name)
|
||||
export_path = client.create_share()
|
||||
self.peers.trigger_reload()
|
||||
event.set_results({"message": "Share created", "path": export_path, "ip": self.access_address()})
|
||||
|
||||
@ops_openstack.core.charm_class
|
||||
class CephNFSCharmOcto(CephNfsCharm):
|
||||
|
@ -134,7 +134,7 @@ class GaneshaNfs(object):
|
||||
def _ceph_command(self, *cmd):
|
||||
"""Run a ceph command"""
|
||||
cmd = ["ceph", "--id", self.client_name, "--conf=/etc/ceph/ceph.conf"] + [*cmd]
|
||||
return subprocess.check_output(cmd)
|
||||
return subprocess.check_output(cmd, stderr=subprocess.DEVNULL)
|
||||
|
||||
def _get_next_export_id(self):
|
||||
"""Retrieve the next available export ID, and update the rados key
|
||||
|
@ -45,19 +45,27 @@ class CephNfsPeers(Object):
|
||||
if self.pool_initialised == 'True' and not self._stored.pool_initialised:
|
||||
self.on.pool_initialised.emit()
|
||||
self._stored.pool_initialised = True
|
||||
if self._stored.reload_nonce != self.reload_nonce():
|
||||
if self._stored.reload_nonce != self.reload_nonce:
|
||||
self.on.reload_nonce.emit()
|
||||
self._stored.reload_nonce = self.reload_nonce()
|
||||
self._stored.reload_nonce = self.reload_nonce
|
||||
|
||||
def pool_initialised(self):
|
||||
def initialised_pool(self):
|
||||
logging.info("Setting pool initialised")
|
||||
self.peer_rel.data[self.peer_rel.app]['pool_initialised'] = 'True'
|
||||
self.on.pool_initialised.emit()
|
||||
|
||||
def trigger_reload(self):
|
||||
self.peer_rel.data[self.peer_rel.app]['reload_nonce'] = uuid.uuid4()
|
||||
self.peer_rel.data[self.peer_rel.app]['reload_nonce'] = str(uuid.uuid4())
|
||||
self.on.reload_nonce.emit()
|
||||
|
||||
@property
|
||||
def pool_initialised(self):
|
||||
return self.peer_rel.data[self.peer_rel.app].get('pool_initialised')
|
||||
|
||||
@property
|
||||
def reload_nonce(self):
|
||||
return self.peer_rel.data[self.peer_rel.app].get('reload_nonce')
|
||||
|
||||
@property
|
||||
def peer_rel(self):
|
||||
return self.framework.model.get_relation(self.relation_name)
|
||||
|
Loading…
x
Reference in New Issue
Block a user