Improve consistency
This change does a couple of things to improve the consistency of the code: - Replace all uses of Nfs with NFS - Remove repeated instantiation of the GaneshaNFS object - Make name a peroperty fo the Export
This commit is contained in:
parent
0461a0cc81
commit
99b1a70c1f
30
src/charm.py
30
src/charm.py
@ -32,7 +32,7 @@ import interface_ceph_nfs_peer
|
||||
import interface_hacluster.ops_ha_interface as ops_ha_interface
|
||||
|
||||
# TODO: Add the below class functionaity to action / relations
|
||||
from ganesha import GaneshaNfs
|
||||
from ganesha import GaneshaNFS
|
||||
|
||||
import ops_openstack.adapters
|
||||
import ops_openstack.core
|
||||
@ -108,7 +108,7 @@ class CephNFSAdapters(
|
||||
}
|
||||
|
||||
|
||||
class CephNfsCharm(
|
||||
class CephNFSCharm(
|
||||
ops_openstack.plugins.classes.BaseCephClientCharm):
|
||||
"""Ceph NFS Base Charm."""
|
||||
|
||||
@ -157,7 +157,7 @@ class CephNfsCharm(
|
||||
self.ceph_client = ceph_client.CephClientRequires(
|
||||
self,
|
||||
'ceph-client')
|
||||
self.peers = interface_ceph_nfs_peer.CephNfsPeers(
|
||||
self.peers = interface_ceph_nfs_peer.CephNFSPeers(
|
||||
self,
|
||||
'cluster')
|
||||
self.ha = ops_ha_interface.HAServiceRequires(self, 'ha')
|
||||
@ -242,6 +242,10 @@ class CephNfsCharm(
|
||||
def client_name(self):
|
||||
return self.app.name
|
||||
|
||||
@property
|
||||
def ganesha_client(self):
|
||||
GaneshaNFS(self.client_name, self.pool_name)
|
||||
|
||||
def request_ceph_pool(self, event):
|
||||
"""Request pools from Ceph cluster."""
|
||||
if not self.ceph_client.broker_available:
|
||||
@ -413,8 +417,7 @@ class CephNfsCharm(
|
||||
name = event.params.get('name')
|
||||
allowed_ips = event.params.get('allowed-ips')
|
||||
allowed_ips = [ip.strip() for ip in allowed_ips.split(',')]
|
||||
client = GaneshaNfs(self.client_name, self.pool_name)
|
||||
export_path = client.create_share(size=share_size, name=name, access_ips=allowed_ips)
|
||||
export_path = self.ganesha_client.create_share(size=share_size, name=name, access_ips=allowed_ips)
|
||||
if not export_path:
|
||||
event.fail("Failed to create share, check the log for more details")
|
||||
return
|
||||
@ -425,8 +428,7 @@ class CephNfsCharm(
|
||||
"ip": self.access_address()})
|
||||
|
||||
def list_shares_action(self, event):
|
||||
client = GaneshaNfs(self.client_name, self.pool_name)
|
||||
exports = client.list_shares()
|
||||
exports = self.ganesha_client.list_shares()
|
||||
event.set_results({
|
||||
"exports": [{"id": export.export_id, "name": export.name} for export in exports]
|
||||
})
|
||||
@ -435,10 +437,9 @@ class CephNfsCharm(
|
||||
if not self.model.unit.is_leader():
|
||||
event.fail("Share creation needs to be run from the application leader")
|
||||
return
|
||||
client = GaneshaNfs(self.client_name, self.pool_name)
|
||||
name = event.params.get('name')
|
||||
purge = event.params.get('purge')
|
||||
client.delete_share(name, purge=purge)
|
||||
self.ganesha_client.delete_share(name, purge=purge)
|
||||
self.peers.trigger_reload()
|
||||
event.set_results({
|
||||
"message": "Share deleted",
|
||||
@ -448,10 +449,9 @@ class CephNfsCharm(
|
||||
if not self.model.unit.is_leader():
|
||||
event.fail("Share creation needs to be run from the application leader")
|
||||
return
|
||||
client = GaneshaNfs(self.client_name, self.pool_name)
|
||||
name = event.params.get('name')
|
||||
address = event.params.get('client')
|
||||
res = client.grant_access(name, address)
|
||||
res = self.ganesha_client.grant_access(name, address)
|
||||
if res is not None:
|
||||
event.fail(res)
|
||||
return
|
||||
@ -464,10 +464,9 @@ class CephNfsCharm(
|
||||
if not self.model.unit.is_leader():
|
||||
event.fail("Share creation needs to be run from the application leader")
|
||||
return
|
||||
client = GaneshaNfs(self.client_name, self.pool_name)
|
||||
name = event.params.get('name')
|
||||
address = event.params.get('client')
|
||||
res = client.revoke_access(name, address)
|
||||
res = self.ganesha_client.revoke_access(name, address)
|
||||
if res is not None:
|
||||
event.fail(res)
|
||||
return
|
||||
@ -481,15 +480,14 @@ class CephNfsCharm(
|
||||
size = event.params.get('size')
|
||||
if size is None:
|
||||
event.fail("Size must be set")
|
||||
client = GaneshaNfs(self.client_name, self.pool_name)
|
||||
client.resize_share(name=name, size=size)
|
||||
self.ganesha_client.resize_share(name=name, size=size)
|
||||
event.set_results({
|
||||
"message": f"{name} is now {size}GB",
|
||||
})
|
||||
|
||||
|
||||
@ops_openstack.core.charm_class
|
||||
class CephNFSCharmPacific(CephNfsCharm):
|
||||
class CephNFSCharmPacific(CephNFSCharm):
|
||||
"""Ceph iSCSI Charm for Pacific."""
|
||||
|
||||
_stored = StoredState()
|
||||
|
@ -19,16 +19,12 @@ logger = logging.getLogger(__name__)
|
||||
class Export(object):
|
||||
"""Object that encodes and decodes Ganesha export blocks"""
|
||||
|
||||
name = None
|
||||
|
||||
def __init__(self, export_options: Optional[Dict] = None):
|
||||
if export_options is None:
|
||||
export_options = {}
|
||||
if isinstance(export_options, Export):
|
||||
raise RuntimeError('export_options must be a dictionary')
|
||||
self.export_options = export_options
|
||||
if self.path:
|
||||
self.name = self.path.split('/')[-2]
|
||||
if not isinstance(self.export_options['EXPORT']['CLIENT'], list):
|
||||
self.export_options['EXPORT']['CLIENT'] = [self.export_options['EXPORT']['CLIENT']]
|
||||
|
||||
@ -38,6 +34,11 @@ class Export(object):
|
||||
def to_export(self) -> str:
|
||||
return manager.mkconf(self.export_options)
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
if self.path:
|
||||
return self.path.split('/')[-2]
|
||||
|
||||
@property
|
||||
def export(self):
|
||||
return self.export_options['EXPORT']
|
||||
@ -91,7 +92,7 @@ class Export(object):
|
||||
{'Access_Type': mode, 'Clients': ', '.join(clients)})
|
||||
|
||||
|
||||
class GaneshaNfs(object):
|
||||
class GaneshaNFS(object):
|
||||
|
||||
export_index = "ganesha-export-index"
|
||||
export_counter = "ganesha-export-counter"
|
||||
|
@ -26,15 +26,15 @@ class DepartedEvent(EventBase):
|
||||
pass
|
||||
|
||||
|
||||
class CephNfsPeerEvents(ObjectEvents):
|
||||
class CephNFSPeerEvents(ObjectEvents):
|
||||
pool_initialised = EventSource(PoolInitialisedEvent)
|
||||
reload_nonce = EventSource(ReloadNonceEvent)
|
||||
departing = EventSource(DepartedEvent)
|
||||
|
||||
|
||||
class CephNfsPeers(Object):
|
||||
class CephNFSPeers(Object):
|
||||
|
||||
on = CephNfsPeerEvents()
|
||||
on = CephNFSPeerEvents()
|
||||
_stored = StoredState()
|
||||
|
||||
def __init__(self, charm, relation_name):
|
||||
@ -52,7 +52,7 @@ class CephNfsPeers(Object):
|
||||
self.on_departed)
|
||||
|
||||
def on_changed(self, event):
|
||||
logging.info("CephNfsPeers on_changed")
|
||||
logging.info("CephNFSPeers on_changed")
|
||||
logging.debug('pool_initialised: {}'.format(self.pool_initialised))
|
||||
if self.pool_initialised == 'True' and not self._stored.pool_initialised:
|
||||
logging.info("emiting pool initialised")
|
||||
@ -65,7 +65,7 @@ class CephNfsPeers(Object):
|
||||
self._stored.reload_nonce = self.reload_nonce
|
||||
|
||||
def on_departed(self, event):
|
||||
logging.warning("CephNfsPeers on_departed")
|
||||
logging.warning("CephNFSPeers on_departed")
|
||||
if self.this_unit.name == os.getenv('JUJU_DEPARTING_UNIT'):
|
||||
self.on.departing.emit()
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user