Add in Ceph client handling, fix up lint and tests
This commit is contained in:
parent
3807908724
commit
b8e7c454ae
14
actions.yaml
14
actions.yaml
@ -7,10 +7,10 @@
|
||||
#
|
||||
# Learn more about actions at: https://juju.is/docs/sdk/actions
|
||||
|
||||
fortune:
|
||||
description: Returns a pithy phrase.
|
||||
params:
|
||||
fail:
|
||||
description: "Fail with this message"
|
||||
type: string
|
||||
default: ""
|
||||
# fortune:
|
||||
# description: Returns a pithy phrase.
|
||||
# params:
|
||||
# fail:
|
||||
# description: "Fail with this message"
|
||||
# type: string
|
||||
# default: ""
|
||||
|
@ -1,10 +0,0 @@
|
||||
# Learn more about charmcraft.yaml configuration at:
|
||||
# https://juju.is/docs/sdk/charmcraft-config
|
||||
type: "charm"
|
||||
bases:
|
||||
- build-on:
|
||||
- name: "ubuntu"
|
||||
channel: "20.04"
|
||||
run-on:
|
||||
- name: "ubuntu"
|
||||
channel: "20.04"
|
31
config.yaml
31
config.yaml
@ -12,3 +12,34 @@ options:
|
||||
default: 🎁
|
||||
description: A thing used by the charm.
|
||||
type: string
|
||||
ceph-osd-replication-count:
|
||||
type: int
|
||||
default: 3
|
||||
description: |
|
||||
This value dictates the number of replicas ceph must make of any
|
||||
object it stores within the images rbd pool. Of course, this only
|
||||
applies if using Ceph as a backend store. Note that once the images
|
||||
rbd pool has been created, changing this value will not have any
|
||||
effect (although it can be changed in ceph by manually configuring
|
||||
your ceph cluster).
|
||||
ceph-pool-weight:
|
||||
type: int
|
||||
default: 5
|
||||
description: |
|
||||
Defines a relative weighting of the pool as a percentage of the total
|
||||
amount of data in the Ceph cluster. This effectively weights the number
|
||||
of placement groups for the pool created to be appropriately portioned
|
||||
to the amount of data expected. For example, if the compute images
|
||||
for the OpenStack compute instances are expected to take up 20% of the
|
||||
overall configuration then this value would be specified as 20. Note -
|
||||
it is important to choose an appropriate value for the pool weight as
|
||||
this directly affects the number of placement groups which will be
|
||||
created for the pool. The number of placement groups for a pool can
|
||||
only be increased, never decreased - so it is important to identify the
|
||||
percent of data that will likely reside in the pool.
|
||||
rbd-pool-name:
|
||||
default:
|
||||
type: string
|
||||
description: |
|
||||
Optionally specify an existing pool that shares should map to. Defaults
|
||||
to the application's name.
|
||||
|
@ -1,23 +1,22 @@
|
||||
# Copyright 2021 OpenStack Charmers
|
||||
# See LICENSE file for licensing details.
|
||||
|
||||
# For a complete list of supported options, see:
|
||||
# https://discourse.charmhub.io/t/charm-metadata-v2/3674/15
|
||||
name: ceph-nfs
|
||||
display-name: |
|
||||
TEMPLATE-TODO: fill out a display name for the Charmcraft store
|
||||
summary: Gateway for provisioning NFS shares backed by ceph.
|
||||
maintainer: OpenStack Charmers <openstack-charmers@lists.ubuntu.com>
|
||||
description: |
|
||||
TEMPLATE-TODO: fill out the charm's description
|
||||
summary: |
|
||||
TEMPLATE-TODO: fill out the charm's summary
|
||||
|
||||
# TEMPLATE-TODO: replace with containers for your workload (delete for non-k8s)
|
||||
containers:
|
||||
httpbin:
|
||||
resource: httpbin-image
|
||||
|
||||
# TEMPLATE-TODO: each container defined above must specify an oci-image resource
|
||||
resources:
|
||||
httpbin-image:
|
||||
type: oci-image
|
||||
description: OCI image for httpbin (kennethreitz/httpbin)
|
||||
The NFS gateway is provided by NFS-Ganesha and provides NFS shares
|
||||
that are backed by CephFS.
|
||||
tags:
|
||||
- openstack
|
||||
- storage
|
||||
- misc
|
||||
series:
|
||||
- focal
|
||||
- groovy
|
||||
- hirsute
|
||||
- impish
|
||||
subordinate: false
|
||||
min-juju-version: 2.7.6
|
||||
extra-bindings:
|
||||
public:
|
||||
requires:
|
||||
ceph-client:
|
||||
interface: ceph-client
|
||||
|
241
src/charm.py
241
src/charm.py
@ -13,11 +13,18 @@ develop a new k8s charm using the Operator Framework:
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
import subprocess
|
||||
|
||||
from ops.charm import CharmBase
|
||||
from ops.framework import StoredState
|
||||
from ops.main import main
|
||||
from ops.model import ActiveStatus
|
||||
# from ops.model import ActiveStatus
|
||||
|
||||
import charmhelpers.core.host as ch_host
|
||||
import charmhelpers.core.templating as ch_templating
|
||||
import interface_ceph_client.ceph_client as ceph_client
|
||||
|
||||
import ops_openstack.adapters
|
||||
import ops_openstack.core
|
||||
@ -58,83 +65,183 @@ class CephClientAdapter(ops_openstack.adapters.OpenStackOperRelationAdapter):
|
||||
return self.relation.get_relation_data()['key']
|
||||
|
||||
|
||||
class CephNFSAdapters(
|
||||
ops_openstack.adapters.OpenStackRelationAdapters):
|
||||
"""Collection of relation adapters."""
|
||||
|
||||
relation_adapters = {
|
||||
'ceph-client': CephClientAdapter,
|
||||
}
|
||||
|
||||
|
||||
class CephNfsCharm(CharmBase):
|
||||
"""Charm the service."""
|
||||
"""Ceph NFS Base Charm."""
|
||||
|
||||
_stored = StoredState()
|
||||
PACKAGES = ['nfs-ganesha', 'ceph-common']
|
||||
|
||||
def __init__(self, *args):
|
||||
super().__init__(*args)
|
||||
self.framework.observe(self.on.httpbin_pebble_ready, self._on_httpbin_pebble_ready)
|
||||
self.framework.observe(self.on.config_changed, self._on_config_changed)
|
||||
self.framework.observe(self.on.fortune_action, self._on_fortune_action)
|
||||
self._stored.set_default(things=[])
|
||||
CEPH_CAPABILITIES = [
|
||||
"mds", "allow *",
|
||||
"osd", "allow rw",
|
||||
"mon", "allow r, "
|
||||
"allow command \"auth del\", "
|
||||
"allow command \"auth caps\", "
|
||||
"allow command \"auth get\", "
|
||||
"allow command \"auth get-or-create\""]
|
||||
|
||||
def _on_httpbin_pebble_ready(self, event):
|
||||
"""Define and start a workload using the Pebble API.
|
||||
REQUIRED_RELATIONS = ['ceph-client', 'cluster']
|
||||
|
||||
TEMPLATE-TODO: change this example to suit your needs.
|
||||
You'll need to specify the right entrypoint and environment
|
||||
configuration for your specific workload. Tip: you can see the
|
||||
standard entrypoint of an existing container using docker inspect
|
||||
CEPH_CONFIG_PATH = Path('/etc/ceph')
|
||||
GANESHA_CONFIG_PATH = Path('/etc/ganesha')
|
||||
|
||||
Learn more about Pebble layers at https://github.com/canonical/pebble
|
||||
CEPH_GANESHA_CONFIG_PATH = CEPH_CONFIG_PATH / 'ganesha'
|
||||
CEPH_CONF = CEPH_GANESHA_CONFIG_PATH / 'ceph.conf'
|
||||
GANESHA_KEYRING = CEPH_GANESHA_CONFIG_PATH / 'ceph.client.ceph-ganesha.keyring'
|
||||
GANESHA_CONF = GANESHA_CONFIG_PATH / 'ganesha.conf'
|
||||
|
||||
SERVICES = ['nfs-ganesha']
|
||||
|
||||
RESTART_MAP = {
|
||||
str(GANESHA_CONF): SERVICES,
|
||||
str(CEPH_CONF): SERVICES,
|
||||
str(GANESHA_KEYRING): SERVICES}
|
||||
|
||||
release = 'default'
|
||||
|
||||
def __init__(self, framework):
|
||||
super().__init__(framework)
|
||||
# super().register_status_check(self.custom_status_check)
|
||||
logging.info("Using %s class", self.release)
|
||||
self._stored.set_default(
|
||||
is_started=False,
|
||||
)
|
||||
self.ceph_client = ceph_client.CephClientRequires(
|
||||
self,
|
||||
'ceph-client')
|
||||
self.adapters = CephNFSAdapters(
|
||||
(self.ceph_client,),
|
||||
self)
|
||||
self.framework.observe(
|
||||
self.ceph_client.on.broker_available,
|
||||
self.request_ceph_pool)
|
||||
self.framework.observe(
|
||||
self.ceph_client.on.pools_available,
|
||||
self.render_config)
|
||||
self.framework.observe(
|
||||
self.on.config_changed,
|
||||
self.request_ceph_pool)
|
||||
self.framework.observe(
|
||||
self.on.upgrade_charm,
|
||||
self.render_config)
|
||||
|
||||
def config_get(self, key):
|
||||
"""Retrieve config option.
|
||||
|
||||
:returns: Value of the corresponding config option or None.
|
||||
:rtype: Any
|
||||
"""
|
||||
# Get a reference the container attribute on the PebbleReadyEvent
|
||||
container = event.workload
|
||||
# Define an initial Pebble layer configuration
|
||||
pebble_layer = {
|
||||
"summary": "httpbin layer",
|
||||
"description": "pebble config layer for httpbin",
|
||||
"services": {
|
||||
"httpbin": {
|
||||
"override": "replace",
|
||||
"summary": "httpbin",
|
||||
"command": "gunicorn -b 0.0.0.0:80 httpbin:app -k gevent",
|
||||
"startup": "enabled",
|
||||
"environment": {"thing": self.model.config["thing"]},
|
||||
}
|
||||
},
|
||||
}
|
||||
# Add intial Pebble config layer using the Pebble API
|
||||
container.add_layer("httpbin", pebble_layer, combine=True)
|
||||
# Autostart any services that were defined with startup: enabled
|
||||
container.autostart()
|
||||
# Learn more about statuses in the SDK docs:
|
||||
# https://juju.is/docs/sdk/constructs#heading--statuses
|
||||
self.unit.status = ActiveStatus()
|
||||
return self.model.config.get(key)
|
||||
|
||||
def _on_config_changed(self, _):
|
||||
"""Just an example to show how to deal with changed configuration.
|
||||
@property
|
||||
def pool_name(self):
|
||||
"""The name of the default rbd data pool to be used for shares.
|
||||
|
||||
TEMPLATE-TODO: change this example to suit your needs.
|
||||
If you don't need to handle config, you can remove this method,
|
||||
the hook created in __init__.py for it, the corresponding test,
|
||||
and the config.py file.
|
||||
|
||||
Learn more about config at https://juju.is/docs/sdk/config
|
||||
:returns: Data pool name.
|
||||
:rtype: str
|
||||
"""
|
||||
current = self.config["thing"]
|
||||
if current not in self._stored.things:
|
||||
logger.debug("found a new thing: %r", current)
|
||||
self._stored.things.append(current)
|
||||
|
||||
def _on_fortune_action(self, event):
|
||||
"""Just an example to show how to receive actions.
|
||||
|
||||
TEMPLATE-TODO: change this example to suit your needs.
|
||||
If you don't need to handle actions, you can remove this method,
|
||||
the hook created in __init__.py for it, the corresponding test,
|
||||
and the actions.py file.
|
||||
|
||||
Learn more about actions at https://juju.is/docs/sdk/actions
|
||||
"""
|
||||
fail = event.params["fail"]
|
||||
if fail:
|
||||
event.fail(fail)
|
||||
if self.config_get('rbd-pool-name'):
|
||||
pool_name = self.config_get('rbd-pool-name')
|
||||
else:
|
||||
event.set_results({"fortune": "A bug in the code is worth two in the documentation."})
|
||||
pool_name = self.app.name
|
||||
return pool_name
|
||||
|
||||
@property
|
||||
def client_name(self):
|
||||
return self.app.name
|
||||
|
||||
def request_ceph_pool(self, event):
|
||||
"""Request pools from Ceph cluster."""
|
||||
if not self.ceph_client.broker_available:
|
||||
logging.info("Cannot request ceph setup at this time")
|
||||
return
|
||||
try:
|
||||
bcomp_kwargs = self.get_bluestore_compression()
|
||||
except ValueError as e:
|
||||
# The end user has most likely provided a invalid value for
|
||||
# a configuration option. Just log the traceback here, the
|
||||
# end user will be notified by assess_status() called at
|
||||
# the end of the hook execution.
|
||||
logging.warn('Caught ValueError, invalid value provided for '
|
||||
'configuration?: "{}"'.format(str(e)))
|
||||
return
|
||||
weight = self.config_get('ceph-pool-weight')
|
||||
replicas = self.config_get('ceph-osd-replication-count')
|
||||
|
||||
logging.info("Requesting replicated pool")
|
||||
self.ceph_client.create_replicated_pool(
|
||||
name=self.pool_name,
|
||||
replicas=replicas,
|
||||
weight=weight,
|
||||
**bcomp_kwargs)
|
||||
logging.info("Requesting permissions")
|
||||
self.ceph_client.request_ceph_permissions(
|
||||
self.client_name,
|
||||
self.CEPH_CAPABILITIES)
|
||||
|
||||
def refresh_request(self, event):
|
||||
"""Re-request Ceph pools and render config."""
|
||||
self.render_config(event)
|
||||
self.request_ceph_pool(event)
|
||||
|
||||
def render_config(self, event):
|
||||
"""Render config and restart services if config files change."""
|
||||
if not self.ceph_client.pools_available:
|
||||
logging.info("Defering setup")
|
||||
event.defer()
|
||||
return
|
||||
|
||||
self.CEPH_GANESHA_PATH.mkdir(
|
||||
exist_ok=True,
|
||||
mode=0o750)
|
||||
|
||||
def daemon_reload_and_restart(service_name):
|
||||
subprocess.check_call(['systemctl', 'daemon-reload'])
|
||||
subprocess.check_call(['systemctl', 'restart', service_name])
|
||||
|
||||
rfuncs = {}
|
||||
|
||||
@ch_host.restart_on_change(self.RESTART_MAP, restart_functions=rfuncs)
|
||||
def _render_configs():
|
||||
for config_file in self.RESTART_MAP.keys():
|
||||
ch_templating.render(
|
||||
os.path.basename(config_file),
|
||||
config_file,
|
||||
self.adapters)
|
||||
logging.info("Rendering config")
|
||||
_render_configs()
|
||||
logging.info("Setting started state")
|
||||
self._stored.is_started = True
|
||||
self.update_status()
|
||||
logging.info("on_pools_available: status updated")
|
||||
|
||||
# def custom_status_check(self):
|
||||
# """Custom update status checks."""
|
||||
# if ch_host.is_container():
|
||||
# return ops.model.BlockedStatus(
|
||||
# 'Charm cannot be deployed into a container')
|
||||
# if self.peers.unit_count not in self.ALLOWED_UNIT_COUNTS:
|
||||
# return ops.model.BlockedStatus(
|
||||
# '{} is an invalid unit count'.format(self.peers.unit_count))
|
||||
# return ops.model.ActiveStatus()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main(CephNfsCharm)
|
||||
@ops_openstack.core.charm_class
|
||||
class CephNFSCharmOcto(CephNfsCharm):
|
||||
"""Ceph iSCSI Charm for Octopus."""
|
||||
|
||||
_stored = StoredState()
|
||||
release = 'octopus'
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main(ops_openstack.core.get_charm_class_for_release())
|
||||
|
@ -6,10 +6,10 @@
|
||||
[global]
|
||||
auth supported = {{ ceph_client.auth_supported }}
|
||||
mon host = {{ ceph_client.mon_hosts }}
|
||||
keyring = /etc/ceph/nfs/$cluster.$name.keyring
|
||||
keyring = /etc/ceph/{{ options.application_name }}/$cluster.$name.keyring
|
||||
|
||||
[client.ceph-nfs]
|
||||
[client.{{ options.application_name }}]
|
||||
client mount uid = 0
|
||||
client mount gid = 0
|
||||
log file = /var/log/ceph/ceph-client.nfs.log
|
||||
|
||||
log file = /var/log/ceph/ceph-client.{{ options.application_name }}.log
|
||||
{% endif -%}
|
||||
|
186
templates/ganesha.conf
Normal file
186
templates/ganesha.conf
Normal file
@ -0,0 +1,186 @@
|
||||
# The following is copied from the Ganesha source examples:
|
||||
# https://github.com/nfs-ganesha/nfs-ganesha/blob/576e3bafccb6da5c7ea18d7099013f7494ce8d2c/src/config_samples/ceph.conf
|
||||
#
|
||||
# It is possible to use FSAL_CEPH to provide an NFS gateway to CephFS. The
|
||||
# following sample config should be useful as a starting point for
|
||||
# configuration. This basic configuration is suitable for a standalone NFS
|
||||
# server, or an active/passive configuration managed by some sort of clustering
|
||||
# software (e.g. pacemaker, docker, etc.).
|
||||
#
|
||||
# Note too that it is also possible to put a config file in RADOS, and give
|
||||
# ganesha a rados URL from which to fetch it. For instance, if the config
|
||||
# file is stored in a RADOS pool called "nfs-ganesha", in a namespace called
|
||||
# "ganesha-namespace" with an object name of "ganesha-config":
|
||||
#
|
||||
# %url rados://nfs-ganesha/ganesha-namespace/ganesha-config
|
||||
#
|
||||
# If we only export cephfs (or RGW), store the configs and recovery data in
|
||||
# RADOS, and mandate NFSv4.1+ for access, we can avoid any sort of local
|
||||
# storage, and ganesha can run as an unprivileged user (even inside a
|
||||
# locked-down container).
|
||||
#
|
||||
|
||||
NFS_CORE_PARAM
|
||||
{
|
||||
# Ganesha can lift the NFS grace period early if NLM is disabled.
|
||||
Enable_NLM = false;
|
||||
|
||||
# rquotad doesn't add any value here. CephFS doesn't support per-uid
|
||||
# quotas anyway.
|
||||
Enable_RQUOTA = false;
|
||||
|
||||
# In this configuration, we're just exporting NFSv4. In practice, it's
|
||||
# best to use NFSv4.1+ to get the benefit of sessions.
|
||||
Protocols = 4;
|
||||
}
|
||||
|
||||
NFSv4
|
||||
{
|
||||
# Modern versions of libcephfs have delegation support, though they
|
||||
# are not currently recommended in clustered configurations. They are
|
||||
# disabled by default but can be reenabled for singleton or
|
||||
# active/passive configurations.
|
||||
# Delegations = false;
|
||||
|
||||
# One can use any recovery backend with this configuration, but being
|
||||
# able to store it in RADOS is a nice feature that makes it easy to
|
||||
# migrate the daemon to another host.
|
||||
#
|
||||
# For a single-node or active/passive configuration, rados_ng driver
|
||||
# is preferred. For active/active clustered configurations, the
|
||||
# rados_cluster backend can be used instead. See the
|
||||
# ganesha-rados-grace manpage for more information.
|
||||
RecoveryBackend = rados_cluster;
|
||||
|
||||
# NFSv4.0 clients do not send a RECLAIM_COMPLETE, so we end up having
|
||||
# to wait out the entire grace period if there are any. Avoid them.
|
||||
Minor_Versions = 1,2;
|
||||
}
|
||||
|
||||
# The libcephfs client will aggressively cache information while it
|
||||
# can, so there is little benefit to ganesha actively caching the same
|
||||
# objects. Doing so can also hurt cache coherency. Here, we disable
|
||||
# as much attribute and directory caching as we can.
|
||||
MDCACHE {
|
||||
# Size the dirent cache down as small as possible.
|
||||
Dir_Chunk = 0;
|
||||
}
|
||||
|
||||
EXPORT
|
||||
{
|
||||
# Unique export ID number for this export
|
||||
Export_ID=100;
|
||||
|
||||
# We're only interested in NFSv4 in this configuration
|
||||
Protocols = 4;
|
||||
|
||||
# NFSv4 does not allow UDP transport
|
||||
Transports = TCP;
|
||||
|
||||
#
|
||||
# Path into the cephfs tree.
|
||||
#
|
||||
# Note that FSAL_CEPH does not support subtree checking, so there is
|
||||
# no way to validate that a filehandle presented by a client is
|
||||
# reachable via an exported subtree.
|
||||
#
|
||||
# For that reason, we just export "/" here.
|
||||
Path = /;
|
||||
|
||||
#
|
||||
# The pseudoroot path. This is where the export will appear in the
|
||||
# NFS pseudoroot namespace.
|
||||
#
|
||||
Pseudo = /cephfs_a/;
|
||||
|
||||
# We want to be able to read and write
|
||||
Access_Type = RW;
|
||||
|
||||
# Time out attribute cache entries immediately
|
||||
Attr_Expiration_Time = 0;
|
||||
|
||||
# Enable read delegations? libcephfs v13.0.1 and later allow the
|
||||
# ceph client to set a delegation. While it's possible to allow RW
|
||||
# delegations it's not recommended to enable them until ganesha
|
||||
# acquires CB_GETATTR support.
|
||||
#
|
||||
# Note too that delegations may not be safe in clustered
|
||||
# configurations, so it's probably best to just disable them until
|
||||
# this problem is resolved:
|
||||
#
|
||||
# http://tracker.ceph.com/issues/24802
|
||||
#
|
||||
# Delegations = R;
|
||||
|
||||
# NFS servers usually decide to "squash" incoming requests from the
|
||||
# root user to a "nobody" user. It's possible to disable that, but for
|
||||
# now, we leave it enabled.
|
||||
# Squash = root;
|
||||
|
||||
FSAL {
|
||||
# FSAL_CEPH export
|
||||
Name = CEPH;
|
||||
|
||||
#
|
||||
# Ceph filesystems have a name string associated with them, and
|
||||
# modern versions of libcephfs can mount them based on the
|
||||
# name. The default is to mount the default filesystem in the
|
||||
# cluster (usually the first one created).
|
||||
#
|
||||
# Filesystem = "cephfs_a";
|
||||
|
||||
#
|
||||
# Ceph clusters have their own authentication scheme (cephx).
|
||||
# Ganesha acts as a cephfs client. This is the client username
|
||||
# to use. This user will need to be created before running
|
||||
# ganesha.
|
||||
#
|
||||
# Typically ceph clients have a name like "client.foo". This
|
||||
# setting should not contain the "client." prefix.
|
||||
#
|
||||
# See:
|
||||
#
|
||||
# http://docs.ceph.com/docs/jewel/rados/operations/user-management/
|
||||
#
|
||||
# The default is to set this to NULL, which means that the
|
||||
# userid is set to the default in libcephfs (which is
|
||||
# typically "admin").
|
||||
#
|
||||
User_Id = "{{ client_name }}";
|
||||
|
||||
#
|
||||
# Key to use for the session (if any). If not set, it uses the
|
||||
# normal search path for cephx keyring files to find a key:
|
||||
#
|
||||
# Secret_Access_Key = "YOUR SECRET KEY HERE";
|
||||
}
|
||||
}
|
||||
|
||||
# The below were taken from the Manila docs at
|
||||
# https://docs.openstack.org/manila/queens/contributor/ganesha.html
|
||||
|
||||
# To read exports from RADOS objects
|
||||
RADOS_URLS {
|
||||
ceph_conf = "/etc/ceph/ganesha/ceph.conf";
|
||||
userid = "{{ client_name }}";
|
||||
}
|
||||
|
||||
%url rados://{{ pool_name }}/ganesha-export-index
|
||||
# To store client recovery data in the same RADOS pool
|
||||
|
||||
RADOS_KV {
|
||||
ceph_conf = "/etc/ceph/ganesha/ceph.conf";
|
||||
userid = "{{ client_name }}";
|
||||
pool = {{ pool_name }};
|
||||
}
|
||||
|
||||
# Config block for FSAL_CEPH
|
||||
CEPH
|
||||
{
|
||||
# Path to a ceph.conf file for this ceph cluster.
|
||||
# Ceph_Conf = /etc/ceph/ceph.conf;
|
||||
|
||||
# User file-creation mask. These bits will be masked off from the unix
|
||||
# permissions on newly-created inodes.
|
||||
# umask = 0;
|
||||
}
|
@ -19,7 +19,7 @@ applications:
|
||||
num_units: 3
|
||||
options:
|
||||
monitor-count: '3'
|
||||
excpected-osd-count: 6
|
||||
expected-osd-count: 6
|
||||
relations:
|
||||
- - 'ceph-mon:client'
|
||||
- 'ceph-nfs:ceph-client'
|
||||
|
67
unit_tests/test_ceph_nfs_charm.py
Normal file
67
unit_tests/test_ceph_nfs_charm.py
Normal file
@ -0,0 +1,67 @@
|
||||
# Copyright 2021 OpenStack Charmers
|
||||
# See LICENSE file for licensing details.
|
||||
#
|
||||
# Learn more about testing at: https://juju.is/docs/sdk/testing
|
||||
|
||||
|
||||
import unittest
|
||||
import sys
|
||||
|
||||
sys.path.append('lib') # noqa
|
||||
sys.path.append('src') # noqa
|
||||
|
||||
from unittest.mock import patch, Mock
|
||||
|
||||
from charm import CephNfsCharm
|
||||
# from ops.model import ActiveStatus
|
||||
from ops.testing import Harness
|
||||
|
||||
with patch('charmhelpers.core.host_factory.ubuntu.cmp_pkgrevno',
|
||||
Mock(return_value=1)):
|
||||
import charm
|
||||
|
||||
|
||||
class CharmTestCase(unittest.TestCase):
|
||||
|
||||
def setUp(self, obj, patches):
|
||||
super().setUp()
|
||||
self.patches = patches
|
||||
self.obj = obj
|
||||
self.patch_all()
|
||||
|
||||
def patch(self, method):
|
||||
_m = patch.object(self.obj, method)
|
||||
mock = _m.start()
|
||||
self.addCleanup(_m.stop)
|
||||
return mock
|
||||
|
||||
def patch_all(self):
|
||||
for method in self.patches:
|
||||
setattr(self, method, self.patch(method))
|
||||
|
||||
|
||||
class _CephNfsCharm(CephNfsCharm):
|
||||
|
||||
@staticmethod
|
||||
def get_bluestore_compression():
|
||||
return {}
|
||||
|
||||
|
||||
class TestCephNFSCharmBase(CharmTestCase):
|
||||
|
||||
PATCHES = [
|
||||
'ch_templating',
|
||||
'os',
|
||||
'subprocess',
|
||||
]
|
||||
|
||||
def setUp(self):
|
||||
super().setUp(charm, self.PATCHES)
|
||||
self.harness = Harness(
|
||||
_CephNfsCharm,
|
||||
)
|
||||
self.addCleanup(self.harness.cleanup)
|
||||
|
||||
def test_init(self):
|
||||
self.harness.begin()
|
||||
self.assertFalse(self.harness.charm._stored.is_started)
|
@ -1,72 +0,0 @@
|
||||
# Copyright 2021 OpenStack Charmers
|
||||
# See LICENSE file for licensing details.
|
||||
#
|
||||
# Learn more about testing at: https://juju.is/docs/sdk/testing
|
||||
|
||||
|
||||
import unittest
|
||||
import sys
|
||||
|
||||
sys.path.append('lib') # noqa
|
||||
sys.path.append('src') # noqa
|
||||
|
||||
from unittest.mock import Mock
|
||||
|
||||
from charm import CephNfsCharm
|
||||
from ops.model import ActiveStatus
|
||||
from ops.testing import Harness
|
||||
|
||||
|
||||
class TestCharm(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.harness = Harness(CephNfsCharm)
|
||||
self.addCleanup(self.harness.cleanup)
|
||||
self.harness.begin()
|
||||
|
||||
def test_config_changed(self):
|
||||
self.assertEqual(list(self.harness.charm._stored.things), [])
|
||||
self.harness.update_config({"thing": "foo"})
|
||||
self.assertEqual(list(self.harness.charm._stored.things), ["foo"])
|
||||
|
||||
def test_action(self):
|
||||
# the harness doesn't (yet!) help much with actions themselves
|
||||
action_event = Mock(params={"fail": ""})
|
||||
self.harness.charm._on_fortune_action(action_event)
|
||||
|
||||
self.assertTrue(action_event.set_results.called)
|
||||
|
||||
def test_action_fail(self):
|
||||
action_event = Mock(params={"fail": "fail this"})
|
||||
self.harness.charm._on_fortune_action(action_event)
|
||||
|
||||
self.assertEqual(action_event.fail.call_args, [("fail this",)])
|
||||
|
||||
def test_httpbin_pebble_ready(self):
|
||||
# Check the initial Pebble plan is empty
|
||||
initial_plan = self.harness.get_container_pebble_plan("httpbin")
|
||||
self.assertEqual(initial_plan.to_yaml(), "{}\n")
|
||||
# Expected plan after Pebble ready with default config
|
||||
expected_plan = {
|
||||
"services": {
|
||||
"httpbin": {
|
||||
"override": "replace",
|
||||
"summary": "httpbin",
|
||||
"command": "gunicorn -b 0.0.0.0:80 httpbin:app -k gevent",
|
||||
"startup": "enabled",
|
||||
"environment": {"thing": "🎁"},
|
||||
}
|
||||
},
|
||||
}
|
||||
# Get the httpbin container from the model
|
||||
container = self.harness.model.unit.get_container("httpbin")
|
||||
# Emit the PebbleReadyEvent carrying the httpbin container
|
||||
self.harness.charm.on.httpbin_pebble_ready.emit(container)
|
||||
# Get the plan now we've run PebbleReady
|
||||
updated_plan = self.harness.get_container_pebble_plan("httpbin").to_dict()
|
||||
# Check we've got the plan we expected
|
||||
self.assertEqual(expected_plan, updated_plan)
|
||||
# Check the service was started
|
||||
service = self.harness.model.unit.get_container("httpbin").get_service("httpbin")
|
||||
self.assertTrue(service.is_running())
|
||||
# Ensure we set an ActiveStatus with no message
|
||||
self.assertEqual(self.harness.model.unit.status, ActiveStatus())
|
Loading…
x
Reference in New Issue
Block a user