Update to get ganesha setup.

This change also includes a new helper class to actually create
CephFS shares that are then provided by Ganesha.
This commit is contained in:
Chris MacNaughton 2022-01-05 20:10:57 +01:00
parent b8e7c454ae
commit 3b5c303aa7
13 changed files with 332 additions and 141 deletions

3
.gitignore vendored
View File

@ -4,5 +4,4 @@ __pycache__
.stestr/
lib/*
!lib/README.txt
build
ceph-iscsi.charm
*.charm

View File

@ -8,10 +8,25 @@
# Learn more about config at: https://juju.is/docs/sdk/config
options:
thing:
default: 🎁
description: A thing used by the charm.
source:
type: string
default: ppa:chris.macnaughton/focal-ussuri
description: |
Optional configuration to support use of additional sources such as:
- ppa:myteam/ppa
- cloud:trusty-proposed/kilo
- http://my.archive.com/ubuntu main
The last option should be used in conjunction with the key configuration
option.
Note that a minimum ceph version of 0.48.2 is required for use with this
charm which is NOT provided by the packages in the main Ubuntu archive
for precise but is provided in the Ubuntu cloud archive.
key:
type: string
default:
description: |
Key ID to import to the apt keyring to support use with arbitary source
configuration from outside of Launchpad archives or PPA's.
ceph-osd-replication-count:
type: int
default: 3
@ -41,5 +56,5 @@ options:
default:
type: string
description: |
Optionally specify an existing pool that shares should map to. Defaults
to the application's name.
Optionally specify an existing pool that Ganesha should store recovery
data into. Defaults to the application's name.

View File

@ -5,7 +5,6 @@ description: |
The NFS gateway is provided by NFS-Ganesha and provides NFS shares
that are backed by CephFS.
tags:
- openstack
- storage
- misc
series:
@ -20,3 +19,6 @@ extra-bindings:
requires:
ceph-client:
interface: ceph-client
peers:
cluster:
interface: ceph-nfs-peer

View File

@ -2,4 +2,4 @@
git+https://github.com/juju/charm-helpers.git#egg=charmhelpers
git+https://github.com/canonical/operator.git#egg=ops
git+https://opendev.org/openstack/charm-ops-interface-ceph-client#egg=interface_ceph_client
git+https://opendev.org/openstack/charm-ops-openstack#egg=ops_openstack
git+https://github.com/ChrisMacNaughton/charm-ops-openstack.git@feature/charm-instance-to-relation-adapter#egg=ops_openstack

View File

@ -15,9 +15,9 @@ develop a new k8s charm using the Operator Framework:
import logging
import os
from pathlib import Path
import socket
import subprocess
from ops.charm import CharmBase
from ops.framework import StoredState
from ops.main import main
# from ops.model import ActiveStatus
@ -25,6 +25,9 @@ from ops.main import main
import charmhelpers.core.host as ch_host
import charmhelpers.core.templating as ch_templating
import interface_ceph_client.ceph_client as ceph_client
import interface_ceph_nfs_peer
# TODO: Add the below class functionaity to action / relations
# from ganesha import GaneshaNfs
import ops_openstack.adapters
import ops_openstack.core
@ -65,6 +68,32 @@ class CephClientAdapter(ops_openstack.adapters.OpenStackOperRelationAdapter):
return self.relation.get_relation_data()['key']
class CephNFSContext(object):
"""Adapter for ceph NFS config."""
name = 'ceph_nfs'
def __init__(self, charm_instance):
self.charm_instance = charm_instance
@property
def pool_name(self):
"""The name of the default rbd data pool to be used for shares.
:returns: Data pool name.
:rtype: str
"""
return self.charm_instance.config_get('rbd-pool-name', self.charm_instance.app.name)
@property
def client_name(self):
return self.charm_instance.app.name
@property
def hostname(self):
return socket.gethostname()
class CephNFSAdapters(
ops_openstack.adapters.OpenStackRelationAdapters):
"""Collection of relation adapters."""
@ -74,13 +103,14 @@ class CephNFSAdapters(
}
class CephNfsCharm(CharmBase):
class CephNfsCharm(
ops_openstack.plugins.classes.BaseCephClientCharm):
"""Ceph NFS Base Charm."""
_stored = StoredState()
PACKAGES = ['nfs-ganesha', 'ceph-common']
PACKAGES = ['nfs-ganesha-ceph', 'nfs-ganesha-rados-grace', 'ceph-common']
CEPH_CAPABILITIES = [
"mgr", "allow rw",
"mds", "allow *",
"osd", "allow rw",
"mon", "allow r, "
@ -89,14 +119,14 @@ class CephNfsCharm(CharmBase):
"allow command \"auth get\", "
"allow command \"auth get-or-create\""]
REQUIRED_RELATIONS = ['ceph-client', 'cluster']
REQUIRED_RELATIONS = ['ceph-client']
CEPH_CONFIG_PATH = Path('/etc/ceph')
GANESHA_CONFIG_PATH = Path('/etc/ganesha')
CEPH_GANESHA_CONFIG_PATH = CEPH_CONFIG_PATH / 'ganesha'
CEPH_CONF = CEPH_GANESHA_CONFIG_PATH / 'ceph.conf'
GANESHA_KEYRING = CEPH_GANESHA_CONFIG_PATH / 'ceph.client.ceph-ganesha.keyring'
CEPH_CONF = CEPH_CONFIG_PATH / 'ceph.conf'
GANESHA_KEYRING = CEPH_GANESHA_CONFIG_PATH / 'ceph.keyring'
GANESHA_CONF = GANESHA_CONFIG_PATH / 'ganesha.conf'
SERVICES = ['nfs-ganesha']
@ -114,13 +144,18 @@ class CephNfsCharm(CharmBase):
logging.info("Using %s class", self.release)
self._stored.set_default(
is_started=False,
is_cluster_setup=False
)
self.ceph_client = ceph_client.CephClientRequires(
self,
'ceph-client')
self.peers = interface_ceph_nfs_peer.CephNfsPeers(
self,
'cluster')
self.adapters = CephNFSAdapters(
(self.ceph_client,),
self)
(self.ceph_client, self.peers),
contexts=(CephNFSContext(self),),
charm_instance=self)
self.framework.observe(
self.ceph_client.on.broker_available,
self.request_ceph_pool)
@ -133,14 +168,20 @@ class CephNfsCharm(CharmBase):
self.framework.observe(
self.on.upgrade_charm,
self.render_config)
self.framework.observe(
self.ceph_client.on.pools_available,
self.setup_ganesha),
self.framework.observe(
self.peers.on.pool_initialised,
self.on_pool_initialised)
def config_get(self, key):
def config_get(self, key, default=None):
"""Retrieve config option.
:returns: Value of the corresponding config option or None.
:rtype: Any
"""
return self.model.config.get(key)
return self.model.config.get(key, default)
@property
def pool_name(self):
@ -149,11 +190,7 @@ class CephNfsCharm(CharmBase):
:returns: Data pool name.
:rtype: str
"""
if self.config_get('rbd-pool-name'):
pool_name = self.config_get('rbd-pool-name')
else:
pool_name = self.app.name
return pool_name
return self.config_get('rbd-pool-name', self.app.name)
@property
def client_name(self):
@ -180,6 +217,7 @@ class CephNfsCharm(CharmBase):
logging.info("Requesting replicated pool")
self.ceph_client.create_replicated_pool(
name=self.pool_name,
app_name='ganesha',
replicas=replicas,
weight=weight,
**bcomp_kwargs)
@ -200,7 +238,7 @@ class CephNfsCharm(CharmBase):
event.defer()
return
self.CEPH_GANESHA_PATH.mkdir(
self.CEPH_GANESHA_CONFIG_PATH.mkdir(
exist_ok=True,
mode=0o750)
@ -223,16 +261,35 @@ class CephNfsCharm(CharmBase):
self._stored.is_started = True
self.update_status()
logging.info("on_pools_available: status updated")
if not self._stored.is_cluster_setup:
subprocess.check_call([
'ganesha-rados-grace', '--userid', self.client_name,
'--cephconf', '/etc/ceph/ganesha/ceph.conf', '--pool', self.pool_name,
'add', socket.gethostname()])
self._stored.is_cluster_setup = True
# def custom_status_check(self):
# """Custom update status checks."""
# if ch_host.is_container():
# return ops.model.BlockedStatus(
# 'Charm cannot be deployed into a container')
# if self.peers.unit_count not in self.ALLOWED_UNIT_COUNTS:
# return ops.model.BlockedStatus(
# '{} is an invalid unit count'.format(self.peers.unit_count))
# return ops.model.ActiveStatus()
def setup_ganesha(self, event):
if not self.model.unit.is_leader():
return
cmd = [
'rados', '-p', self.pool_name,
'-c', '/etc/ceph/ganesha/ceph.conf',
'--id', self.client_name,
'put', 'ganesha-export-index', '/dev/null'
]
try:
subprocess.check_call(cmd)
self.peers.pool_initialised()
except subprocess.CalledProcessError:
logging.error("Failed to setup ganesha index object")
event.defer()
def on_pool_initialised(self, event):
try:
subprocess.check_call(['systemctl', 'restart', 'nfs-ganesha'])
except subprocess.CalledProcessError:
logging.error("Failed torestart nfs-ganesha")
event.defer()
@ops_openstack.core.charm_class

153
src/ganesha.py Normal file
View File

@ -0,0 +1,153 @@
#!/usr/bin/env python3
# Copyright 2021 OpenStack Charmers
# See LICENSE file for licensing details.
import json
import logging
import subprocess
import tempfile
import uuid
logger = logging.getLogger(__name__)
# TODO: Add ACL with client IPs
# TODO: Add ACL with kerberos
GANESHA_EXPORT_TEMPLATE = """EXPORT {{
# Each EXPORT must have a unique Export_Id.
Export_Id = {id};
# The directory in the exported file system this export
# is rooted on.
Path = '{path}';
# FSAL, Ganesha's module component
FSAL {{
# FSAL name
Name = "Ceph";
User_Id = "{user_id}";
Secret_Access_Key = "{secret_key}";
}}
# Path of export in the NFSv4 pseudo filesystem
Pseudo = '{path}';
SecType = "sys";
CLIENT {{
Access_Type = "rw";
Clients = 0.0.0.0;
}}
# User id squashing, one of None, Root, All
Squash = "None";
}}
"""
class GaneshaNfs(object):
export_index = "ganesha-export-index"
export_counter = "ganesha-export-counter"
def __init__(self, client_name, ceph_pool):
self.client_name = client_name
self.name = str(uuid.uuid4())
self.ceph_pool = ceph_pool
self.access_id = 'ganesha-{}'.format(self.name)
def create_share(self):
self.export_path = self._create_cephfs_share()
export_id = self._get_next_export_id()
export_template = GANESHA_EXPORT_TEMPLATE.format(
id=export_id,
path=self.export_path,
user_id=self.access_id,
secret_key=self._ceph_auth_key(),
)
logging.debug("Export template:: \n{}".format(export_template))
tmp_file = self._tmpfile(export_template)
self.rados_put('ganesha-export-{}'.format(export_id), tmp_file.name)
self._ganesha_add_export(self.export_path, tmp_file.name)
def _ganesha_add_export(self, export_path, tmp_path):
return self._dbus_send(
'ExportMgr', 'AddExport',
'string:{}'.format(tmp_path), 'string:EXPORT(Path={})'.format(export_path))
def _dbus_send(self, section, action, *args):
cmd = [
'dbus-send', '--print-reply', '--system', '--dest=org.ganesha.nfsd',
'/org/ganesha/nfsd/{}'.format(section),
'org.ganesha.nfsd.exportmgr.{}'.format(action)] + [*args]
logging.debug("About to call: {}".format(cmd))
return subprocess.check_output(cmd)
def _create_cephfs_share(self):
"""Create an authorise a CephFS share.
:returns: export path
:rtype: union[str, bool]
"""
try:
self._ceph_subvolume_command('create', 'ceph-fs', self.name)
except subprocess.CalledProcessError:
logging.error("failed to create subvolume")
return False
try:
self._ceph_subvolume_command(
'authorize', 'ceph-fs', self.name,
'ganesha-{name}'.format(name=self.name))
except subprocess.CalledProcessError:
logging.error("failed to authorize subvolume")
return False
try:
output = self._ceph_subvolume_command('getpath', 'ceph-fs', self.name)
return output.decode('utf-8').strip()
except subprocess.CalledProcessError:
logging.error("failed to get path")
return False
def _ceph_subvolume_command(self, *cmd):
return self._ceph_fs_command('subvolume', *cmd)
def _ceph_fs_command(self, *cmd):
return self._ceph_command('fs', *cmd)
def _ceph_auth_key(self):
output = self._ceph_command(
'auth', 'get', 'client.{}'.format(self.access_id), '--format=json')
return json.loads(output.decode('UTF-8'))[0]['key']
def _ceph_command(self, *cmd):
cmd = ["ceph", "--id", self.client_name, "--conf=/etc/ceph/ganesha/ceph.conf"] + [*cmd]
return subprocess.check_output(cmd)
def _get_next_export_id(self):
next_id = int(self.rados_get(self.export_counter))
file = self._tmpfile(next_id + 1)
self.rados_put(self.export_counter, file.name)
return next_id
def _tmpfile(self, value):
file = tempfile.NamedTemporaryFile(mode='w+')
file.write(str(value))
file.seek(0)
return file
def rados_get(self, name):
cmd = [
'rados', '-p', self.ceph_pool, '--id', self.client_name,
'get', name, '/dev/stdout'
]
logging.debug("About to call: {}".format(cmd))
output = subprocess.check_output(cmd)
return output.decode('utf-8')
def rados_put(self, name, source):
cmd = [
'rados', '-p', self.ceph_pool, '--id', self.client_name,
'put', name, source
]
logging.debug("About to call: {}".format(cmd))
subprocess.check_call(cmd)

View File

@ -0,0 +1,51 @@
#!/usr/bin/env python3
# import json
import logging
# import socket
from ops.framework import (
StoredState,
EventBase,
ObjectEvents,
EventSource,
Object)
class PoolInitialisedEvent(EventBase):
pass
class CephNfsPeerEvents(ObjectEvents):
pool_initialised = EventSource(PoolInitialisedEvent)
class CephNfsPeers(Object):
on = CephNfsPeerEvents()
_stored = StoredState()
def __init__(self, charm, relation_name):
super().__init__(charm, relation_name)
self.relation_name = relation_name
self.this_unit = self.framework.model.unit
self._stored.set_default(
pool_initialised=False)
self.framework.observe(
charm.on[relation_name].relation_changed,
self.on_changed)
def on_changed(self, event):
logging.info("CephNfsPeers on_changed")
if self.pool_initialised == 'True' and not self._stored.pool_initialised:
self.on.pool_initialised.emit()
self._stored.pool_initialised = True
def pool_initialised(self):
logging.info("Setting pool initialised")
self.peer_rel.data[self.peer_rel.app]['pool_initialised'] = 'True'
self.on.pool_initialised.emit()
@property
def peer_rel(self):
return self.framework.model.get_relation(self.relation_name)

View File

@ -1,3 +0,0 @@
[client.ceph-nfs]
key = {{ ceph_client.key }}

View File

@ -6,10 +6,10 @@
[global]
auth supported = {{ ceph_client.auth_supported }}
mon host = {{ ceph_client.mon_hosts }}
keyring = /etc/ceph/{{ options.application_name }}/$cluster.$name.keyring
keyring = /etc/ceph/ganesha/$cluster.keyring
[client.{{ options.application_name }}]
[client.{{ ceph_nfs.client_name }}]
client mount uid = 0
client mount gid = 0
log file = /var/log/ceph/ceph-client.{{ options.application_name }}.log
{% endif -%}
log file = /var/log/ceph/ceph-client.{{ ceph_nfs.client_name }}.log

3
templates/ceph.keyring Normal file
View File

@ -0,0 +1,3 @@
[client.{{ ceph_nfs.client_name }}]
key = {{ ceph_client.key }}

View File

@ -66,112 +66,20 @@ MDCACHE {
Dir_Chunk = 0;
}
EXPORT
{
# Unique export ID number for this export
Export_ID=100;
# We're only interested in NFSv4 in this configuration
Protocols = 4;
# NFSv4 does not allow UDP transport
Transports = TCP;
#
# Path into the cephfs tree.
#
# Note that FSAL_CEPH does not support subtree checking, so there is
# no way to validate that a filehandle presented by a client is
# reachable via an exported subtree.
#
# For that reason, we just export "/" here.
Path = /;
#
# The pseudoroot path. This is where the export will appear in the
# NFS pseudoroot namespace.
#
Pseudo = /cephfs_a/;
# We want to be able to read and write
Access_Type = RW;
# Time out attribute cache entries immediately
Attr_Expiration_Time = 0;
# Enable read delegations? libcephfs v13.0.1 and later allow the
# ceph client to set a delegation. While it's possible to allow RW
# delegations it's not recommended to enable them until ganesha
# acquires CB_GETATTR support.
#
# Note too that delegations may not be safe in clustered
# configurations, so it's probably best to just disable them until
# this problem is resolved:
#
# http://tracker.ceph.com/issues/24802
#
# Delegations = R;
# NFS servers usually decide to "squash" incoming requests from the
# root user to a "nobody" user. It's possible to disable that, but for
# now, we leave it enabled.
# Squash = root;
FSAL {
# FSAL_CEPH export
Name = CEPH;
#
# Ceph filesystems have a name string associated with them, and
# modern versions of libcephfs can mount them based on the
# name. The default is to mount the default filesystem in the
# cluster (usually the first one created).
#
# Filesystem = "cephfs_a";
#
# Ceph clusters have their own authentication scheme (cephx).
# Ganesha acts as a cephfs client. This is the client username
# to use. This user will need to be created before running
# ganesha.
#
# Typically ceph clients have a name like "client.foo". This
# setting should not contain the "client." prefix.
#
# See:
#
# http://docs.ceph.com/docs/jewel/rados/operations/user-management/
#
# The default is to set this to NULL, which means that the
# userid is set to the default in libcephfs (which is
# typically "admin").
#
User_Id = "{{ client_name }}";
#
# Key to use for the session (if any). If not set, it uses the
# normal search path for cephx keyring files to find a key:
#
# Secret_Access_Key = "YOUR SECRET KEY HERE";
}
}
# The below were taken from the Manila docs at
# https://docs.openstack.org/manila/queens/contributor/ganesha.html
# To read exports from RADOS objects
RADOS_URLS {
ceph_conf = "/etc/ceph/ganesha/ceph.conf";
userid = "{{ client_name }}";
userid = "{{ ceph_nfs.client_name }}";
}
%url rados://{{ pool_name }}/ganesha-export-index
%url rados://{{ ceph_nfs.pool_name }}/ganesha-export-index
# To store client recovery data in the same RADOS pool
RADOS_KV {
ceph_conf = "/etc/ceph/ganesha/ceph.conf";
userid = "{{ client_name }}";
pool = {{ pool_name }};
userid = "{{ ceph_nfs.client_name }}";
pool = "{{ ceph_nfs.pool_name }}";
nodeid = "{{ ceph_nfs.hostname }}";
}
# Config block for FSAL_CEPH

View File

@ -20,8 +20,14 @@ applications:
options:
monitor-count: '3'
expected-osd-count: 6
ceph-fs:
charm: cs:~openstack-charmers-next/ceph-fs
num_units: 1
relations:
- - 'ceph-mon:client'
- 'ceph-nfs:ceph-client'
- - 'ceph-osd:mon'
- 'ceph-mon:osd'
- - 'ceph-fs'
- 'ceph-mon'

View File

@ -9,4 +9,4 @@ tests: []
target_deploy_status:
ubuntu:
workload-status: active
workload-status-message: ''
workload-status-message-prefix: ''