ceph: remove unused ceph jewel components
* python-cephclient: remove ceph jewel client * ceph: remove unused ceph-rest-api service ceph-rest-api was removed in Ceph Mimic * ceph: remove unused osd-wait-status script Story: 2003605 Task: 28861 Depends-On: Ibfbecf0a8beb38009b9d7192ca9455a841402040 Change-Id: Ia79c2f03054588fe5057107d9b2856ee2e821881 Co-Authored-By: Daniel Badea <daniel.badea@windriver.com> Signed-off-by: Changcheng Liu <changcheng.liu@intel.com> Signed-off-by: Daniel Badea <daniel.badea@windriver.com>
This commit is contained in:
parent
e263b4c484
commit
6364ad7ec5
@ -1,92 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
### BEGIN INIT INFO
|
||||
# Provides: ceph-rest-api
|
||||
# Required-Start: $ceph
|
||||
# Required-Stop: $ceph
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: Ceph REST API daemon
|
||||
# Description: Ceph REST API daemon
|
||||
### END INIT INFO
|
||||
|
||||
DESC="ceph-rest-api"
|
||||
DAEMON="/usr/bin/ceph-rest-api"
|
||||
RUNDIR="/var/run/ceph"
|
||||
PIDFILE="${RUNDIR}/ceph-rest-api.pid"
|
||||
|
||||
start()
|
||||
{
|
||||
if [ -e $PIDFILE ]; then
|
||||
PIDDIR=/proc/$(cat $PIDFILE)
|
||||
if [ -d ${PIDDIR} ]; then
|
||||
echo "$DESC already running."
|
||||
exit 0
|
||||
else
|
||||
echo "Removing stale PID file $PIDFILE"
|
||||
rm -f $PIDFILE
|
||||
fi
|
||||
fi
|
||||
|
||||
echo -n "Starting $DESC..."
|
||||
mkdir -p $RUNDIR
|
||||
start-stop-daemon --start --quiet --background \
|
||||
--pidfile ${PIDFILE} --make-pidfile --exec ${DAEMON}
|
||||
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "done."
|
||||
else
|
||||
echo "failed."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
stop()
|
||||
{
|
||||
echo -n "Stopping $DESC..."
|
||||
start-stop-daemon --stop --quiet --pidfile $PIDFILE
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "done."
|
||||
else
|
||||
echo "failed."
|
||||
fi
|
||||
rm -f $PIDFILE
|
||||
}
|
||||
|
||||
status()
|
||||
{
|
||||
pid=`cat $PIDFILE 2>/dev/null`
|
||||
if [ -n "$pid" ]; then
|
||||
if ps -p $pid &>/dev/null ; then
|
||||
echo "$DESC is running"
|
||||
exit 0
|
||||
else
|
||||
echo "$DESC is not running but has pid file"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
echo "$DESC is not running"
|
||||
exit 3
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
start
|
||||
;;
|
||||
stop)
|
||||
stop
|
||||
;;
|
||||
restart|force-reload|reload)
|
||||
stop
|
||||
start
|
||||
;;
|
||||
status)
|
||||
status
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $0 {start|stop|force-reload|restart|reload|status}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
exit 0
|
@ -1,16 +0,0 @@
|
||||
[Unit]
|
||||
Description=Ceph REST API
|
||||
After=network.target ceph.target
|
||||
|
||||
[Service]
|
||||
Type=forking
|
||||
Restart=no
|
||||
KillMode=process
|
||||
RemainAfterExit=yes
|
||||
ExecStart=/etc/rc.d/init.d/ceph-rest-api start
|
||||
ExecStop=/etc/rc.d/init.d/ceph-rest-api stop
|
||||
ExecReload=/etc/rc.d/init.d/ceph-rest-api reload
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
@ -1,246 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
#
|
||||
# Wait for one or a group of OSDs to match one or a group of statuses
|
||||
# as reported by "ceph osd tree".
|
||||
#
|
||||
# Examples:
|
||||
# - wait for osd 0 to be up:
|
||||
# osd-wait-status -o 0 -s up
|
||||
#
|
||||
# - wait for osd 0 and osd 1 to be up:
|
||||
# osd-wait-status -o 0 1 -s up
|
||||
#
|
||||
# The amount of time spent waiting for OSDs to match a status can
|
||||
# be limited by specifying:
|
||||
#
|
||||
# - the maximum retry count; the script will if the status doesn't
|
||||
# match the desired one after more than retry count attempts.
|
||||
# The interval between attempts is controlled by the "-i" flag.
|
||||
# Example:
|
||||
# osd-wait-status -o 0 -s up -c 2 -i 3
|
||||
# will call "ceph osd tree" once to get the status of osd 0 and if
|
||||
# it's not "up" then it will try one more time after 3 seconds.
|
||||
#
|
||||
# - a deadline as the maximum interval of time the script is looping
|
||||
# waiting for OSDs to match status. The interval between attempts
|
||||
# is controlled by the "-i" flag.
|
||||
# Example:
|
||||
# osd-wait-status -o 0 -s up -d 10 -i 3
|
||||
# will call "ceph osd tree" until either osd 0 status is "up" or
|
||||
# no more than 10 seconds have passed, that's 3-4 attempts depending
|
||||
# on how much time it takes to run "ceph osd tree"
|
||||
#
|
||||
# Status match can be reversed by using "-n" flag.
|
||||
# Example:
|
||||
# osd-wait-status -o 0 -n -s up
|
||||
# waits until osd 0 status is NOT up.
|
||||
#
|
||||
# osd-wait-status does not allow matching arbitrary combinations of
|
||||
# OSDs and statuses. For example: "osd 0 up and osd 1 down" is not
|
||||
# supported.
|
||||
#
|
||||
# Return code is 0 if OSDs match expected status before the
|
||||
# retry count*interval / deadline limits are reached.
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import logging
|
||||
import retrying
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
LOG = logging.getLogger('osd-wait-status')
|
||||
|
||||
CEPH_BINARY_PATH = '/usr/bin/ceph'
|
||||
RETRY_INTERVAL_SEC = 1
|
||||
RETRY_FOREVER = 0
|
||||
NO_DEADLINE = 0
|
||||
|
||||
|
||||
class OsdException(Exception):
|
||||
def __init__(self, message, restartable=False):
|
||||
super(OsdException, self).__init__(message)
|
||||
self.restartable = restartable
|
||||
|
||||
|
||||
def get_osd_tree():
|
||||
command = [CEPH_BINARY_PATH,
|
||||
'osd', 'tree', '--format', 'json']
|
||||
try:
|
||||
p = subprocess.Popen(command,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
output, error = p.communicate()
|
||||
if p.returncode != 0:
|
||||
raise OsdException(
|
||||
('Command failed: command="{}", '
|
||||
'returncode={}, output="{}"').format(
|
||||
' '.join(command),
|
||||
p.returncode,
|
||||
output, error),
|
||||
restartable=True)
|
||||
except OSError as e:
|
||||
raise OsdException(
|
||||
('Command failed: command="{}", '
|
||||
'reason="{}"').format(command, str(e)))
|
||||
try:
|
||||
return json.loads(output)
|
||||
except ValueError as e:
|
||||
raise OsdException(
|
||||
('JSON decode failed: '
|
||||
'data="{}", error="{}"').format(
|
||||
output, e))
|
||||
|
||||
|
||||
def osd_match_status(target_osd, target_status,
|
||||
reverse_logic):
|
||||
LOG.info(('Match status: '
|
||||
'target_osd={}, '
|
||||
'target status={}, '
|
||||
'reverse_logic={}').format(
|
||||
target_osd, target_status, reverse_logic))
|
||||
tree = get_osd_tree()
|
||||
osd_status = {}
|
||||
for node in tree.get('nodes'):
|
||||
name = node.get('name')
|
||||
if name in target_osd:
|
||||
osd_status[name] = node.get('status')
|
||||
if len(osd_status) == len(target_osd):
|
||||
break
|
||||
LOG.info('Current OSD(s) status: {}'.format(osd_status))
|
||||
for name in target_osd:
|
||||
if name not in osd_status:
|
||||
raise OsdException(
|
||||
('Unable to retrieve status '
|
||||
'for "{}"').format(
|
||||
name))
|
||||
if reverse_logic:
|
||||
if osd_status[name] not in target_status:
|
||||
del osd_status[name]
|
||||
else:
|
||||
if osd_status[name] in target_status:
|
||||
del osd_status[name]
|
||||
if len(osd_status) == 0:
|
||||
LOG.info('OSD(s) status target reached.')
|
||||
return True
|
||||
else:
|
||||
LOG.info('OSD(s) {}matching status {}: {}'.format(
|
||||
'' if reverse_logic else 'not ',
|
||||
target_status,
|
||||
osd_status.keys()))
|
||||
return False
|
||||
|
||||
|
||||
def osd_wait_status(target_osd, target_status,
|
||||
reverse_logic,
|
||||
retry_count, retry_interval,
|
||||
deadline):
|
||||
|
||||
def retry_if_false(result):
|
||||
return (result is False)
|
||||
|
||||
def retry_if_restartable(exception):
|
||||
return (isinstance(exception, OsdException)
|
||||
and exception.restartable)
|
||||
|
||||
LOG.info(('Wait options: '
|
||||
'target_osd={}, '
|
||||
'target_status={}, '
|
||||
'reverse_logic={}, '
|
||||
'retry_count={}, '
|
||||
'retry_interval={}, '
|
||||
'deadline={}').format(
|
||||
target_osd, target_status, reverse_logic,
|
||||
retry_count, retry_interval, deadline))
|
||||
kwargs = {
|
||||
'retry_on_result': retry_if_false,
|
||||
'retry_on_exception': retry_if_restartable}
|
||||
if retry_count != RETRY_FOREVER:
|
||||
kwargs['stop_max_attempt_number'] = retry_count
|
||||
if deadline != NO_DEADLINE:
|
||||
kwargs['stop_max_delay'] = deadline * 1000
|
||||
if retry_interval != 0:
|
||||
kwargs['wait_fixed'] = retry_interval * 1000
|
||||
if not len(target_osd):
|
||||
return
|
||||
retrying.Retrying(**kwargs).call(
|
||||
osd_match_status,
|
||||
target_osd, target_status,
|
||||
reverse_logic)
|
||||
|
||||
|
||||
def non_negative_interger(value):
|
||||
value = int(value)
|
||||
if value < 0:
|
||||
raise argparse.argumenttypeerror(
|
||||
'{} is a negative integer value'.format(value))
|
||||
return value
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Wait for OSD status match')
|
||||
parser.add_argument(
|
||||
'-o', '--osd',
|
||||
nargs='*',
|
||||
help='osd id',
|
||||
type=non_negative_interger,
|
||||
required=True)
|
||||
parser.add_argument(
|
||||
'-n', '--not',
|
||||
dest='reverse_logic',
|
||||
help='reverse logic: wait for status NOT to match',
|
||||
action='store_true',
|
||||
default=False)
|
||||
parser.add_argument(
|
||||
'-s', '--status',
|
||||
nargs='+',
|
||||
help='status',
|
||||
type=str,
|
||||
required=True)
|
||||
parser.add_argument(
|
||||
'-c', '--retry-count',
|
||||
help='retry count',
|
||||
type=non_negative_interger,
|
||||
default=RETRY_FOREVER)
|
||||
parser.add_argument(
|
||||
'-i', '--retry-interval',
|
||||
help='retry interval (seconds)',
|
||||
type=non_negative_interger,
|
||||
default=RETRY_INTERVAL_SEC)
|
||||
parser.add_argument(
|
||||
'-d', '--deadline',
|
||||
help='deadline (seconds)',
|
||||
type=non_negative_interger,
|
||||
default=NO_DEADLINE)
|
||||
args = parser.parse_args()
|
||||
start = time.time()
|
||||
try:
|
||||
osd_wait_status(
|
||||
['osd.{}'.format(o) for o in args.osd],
|
||||
args.status,
|
||||
args.reverse_logic,
|
||||
args.retry_count,
|
||||
args.retry_interval,
|
||||
args.deadline)
|
||||
LOG.info('Elapsed time: {:.02f} seconds'.format(
|
||||
time.time() - start))
|
||||
sys.exit(0)
|
||||
except retrying.RetryError as e:
|
||||
LOG.warn(
|
||||
('Retry error: {}. '
|
||||
'Elapsed time: {:.02f} seconds'.format(
|
||||
e, time.time() - start)))
|
||||
except OsdException as e:
|
||||
LOG.warn(
|
||||
('OSD wait error: {}. '
|
||||
'Elapsed time: {:.02f} seconds').format(
|
||||
e, time.time() - start))
|
||||
sys.exit(1)
|
@ -1,6 +0,0 @@
|
||||
CLIENT_NAME=python-cephclient
|
||||
CLIENT_VER=v0.1.0.5
|
||||
|
||||
COPY_LIST="$CGCS_BASE/downloads/$CLIENT_NAME-$CLIENT_VER.tar.gz $PKG_BASE/$CLIENT_NAME/*"
|
||||
|
||||
TIS_PATCH_VER=2
|
@ -1,69 +0,0 @@
|
||||
%{!?_licensedir:%global license %%doc}
|
||||
%global pypi_name python-cephclient
|
||||
|
||||
Name: python-cephclient
|
||||
Version: 0.1.0.5
|
||||
Release: 0%{?_tis_dist}.%{tis_patch_ver}
|
||||
Summary: python-cephclient
|
||||
|
||||
License: Apache-2.0
|
||||
URL: https://github.com/dmsimard/python-cephclient
|
||||
Group: devel/python
|
||||
Packager: Wind River <info@windriver.com>
|
||||
|
||||
Source0: %{pypi_name}-v%{version}.tar.gz
|
||||
|
||||
Patch0: fix-osd-crush-remove.patch
|
||||
Patch1: set-default-endpoint.patch
|
||||
Patch2: 0001-US63903-Ceph-Rebase-Update-REST-API-to-0.94.2.patch
|
||||
Patch3: add-osd-get-pool-quota.patch
|
||||
Patch4: 0001-US70398-Ceph-Rebase-Update-REST-API-to-0.94.5.patch
|
||||
Patch5: fix-osd-tier-add.patch
|
||||
Patch6: US92424-Ceph-Rebase-Update-REST-API-to-10.2.4.patch
|
||||
|
||||
BuildArch: noarch
|
||||
|
||||
BuildRequires: python
|
||||
BuildRequires: ceph
|
||||
BuildRequires: python2-pip
|
||||
BuildRequires: python2-wheel
|
||||
|
||||
Requires: python
|
||||
|
||||
Provides: python-cephclient
|
||||
|
||||
%description
|
||||
Client library for the Ceph REST API
|
||||
|
||||
%prep
|
||||
%autosetup -p 1 -n %{pypi_name}-%{version}
|
||||
|
||||
# Remove bundled egg-info
|
||||
rm -rf %{pypi_name}.egg-info
|
||||
|
||||
# Let RPM handle the dependencies
|
||||
rm -f requirements.txt
|
||||
|
||||
%build
|
||||
%{__python2} setup.py build
|
||||
%py2_build_wheel
|
||||
|
||||
%install
|
||||
%{__python2} setup.py install --skip-build --root %{buildroot}
|
||||
mkdir -p $RPM_BUILD_ROOT/wheels
|
||||
install -m 644 dist/*.whl $RPM_BUILD_ROOT/wheels/
|
||||
|
||||
%files
|
||||
%doc README.rst
|
||||
%license LICENSE
|
||||
%{python2_sitelib}/cephclient
|
||||
%{python2_sitelib}/*.egg-info
|
||||
|
||||
%package wheels
|
||||
Summary: %{name} wheels
|
||||
|
||||
%description wheels
|
||||
Contains python wheels for %{name}
|
||||
|
||||
%files wheels
|
||||
/wheels/*
|
@ -1,220 +0,0 @@
|
||||
From 016ebffad6c953cf51c538cc8c45edf56e681515 Mon Sep 17 00:00:00 2001
|
||||
From: Robert Church <robert.church@windriver.com>
|
||||
Date: Fri, 21 Aug 2015 13:05:18 -0500
|
||||
Subject: [PATCH] US63903: Ceph Rebase - Update REST API to 0.94.2
|
||||
|
||||
This updates the existing REST APIs to correspond to what in required by
|
||||
Ceph 0.94.2 (Hammer LTS)
|
||||
---
|
||||
cephclient/wrapper.py | 134 +++++++++++++++++++++++++++++++++++---------------
|
||||
1 file changed, 95 insertions(+), 39 deletions(-)
|
||||
|
||||
diff --git a/cephclient/wrapper.py b/cephclient/wrapper.py
|
||||
index 8f583a5..926eb7f 100644
|
||||
--- a/cephclient/wrapper.py
|
||||
+++ b/cephclient/wrapper.py
|
||||
@@ -335,8 +335,12 @@ class CephWrapper(client.CephClient):
|
||||
def osd_crush_dump(self, **kwargs):
|
||||
return self.get('osd/crush/dump', **kwargs)
|
||||
|
||||
- def osd_crush_rule_dump(self, **kwargs):
|
||||
- return self.get('osd/crush/rule/dump', **kwargs)
|
||||
+ def osd_crush_rule_dump(self, name=None, **kwargs):
|
||||
+ if name is not None:
|
||||
+ return self.get('osd/crush/rule/dump?name={0}'
|
||||
+ .format(name), **kwargs)
|
||||
+ else:
|
||||
+ return self.get('osd/crush/rule/dump', **kwargs)
|
||||
|
||||
def osd_crush_rule_list(self, **kwargs):
|
||||
return self.get('osd/crush/rule/list', **kwargs)
|
||||
@@ -450,7 +454,7 @@ class CephWrapper(client.CephClient):
|
||||
.format(name, args), **kwargs)
|
||||
|
||||
def osd_crush_remove(self, name, ancestor=None, **kwargs):
|
||||
- if ancestor:
|
||||
+ if ancestor is not None:
|
||||
return self.put('osd/crush/remove?name={0}&ancestor={1}'
|
||||
.format(name, ancestor), **kwargs)
|
||||
else:
|
||||
@@ -462,29 +466,43 @@ class CephWrapper(client.CephClient):
|
||||
.format(name, weight), **kwargs)
|
||||
|
||||
def osd_crush_rm(self, name, ancestor, **kwargs):
|
||||
- return self.put('osd/crush/rm?name={0}&ancestor={1}'
|
||||
- .format(name, ancestor), **kwargs)
|
||||
+ if ancestor is not None:
|
||||
+ return self.put('osd/crush/rm?name={0}&ancestor={1}'
|
||||
+ .format(name, ancestor), **kwargs)
|
||||
+ else:
|
||||
+ return self.put('osd/crush/rm?name={0}'
|
||||
+ .format(name), **kwargs)
|
||||
|
||||
- def osd_crush_rule_create_simple(self, name, root, type, **kwargs):
|
||||
- return self.put(
|
||||
- 'osd/crush/rule/create-simple?name={0}&root={1}&type={2}'
|
||||
- .format(name, root, type), **kwargs)
|
||||
+ def osd_crush_rule_create_simple(self, name, root,
|
||||
+ type, mode=None, **kwargs):
|
||||
+ if mode is not None:
|
||||
+ return self.put(
|
||||
+ 'osd/crush/rule/create-simple?name={0}&root={1}&type={2}'
|
||||
+ '&mode={3}'.format(name, root, type, mode), **kwargs)
|
||||
+ else:
|
||||
+ return self.put(
|
||||
+ 'osd/crush/rule/create-simple?name={0}&root={1}&type={2}'
|
||||
+ .format(name, root, type), **kwargs)
|
||||
|
||||
def osd_crush_rule_rm(self, name, **kwargs):
|
||||
return self.put('osd/crush/rule/rm?name={0}'
|
||||
.format(name), **kwargs)
|
||||
|
||||
- def osd_crush_set(self, id, name, weight, args, **kwargs):
|
||||
+ def osd_crush_set(self, id, weight, args, **kwargs):
|
||||
return self.put('osd/crush/set?id={0}&weight={1}&args={2}'
|
||||
- .format(id, name, weight, args), **kwargs)
|
||||
+ .format(id, weight, args), **kwargs)
|
||||
|
||||
def osd_crush_tunables(self, profile, **kwargs):
|
||||
return self.put('osd/crush/tunables?profile={0}'
|
||||
.format(profile), **kwargs)
|
||||
|
||||
- def osd_crush_unlink(self, name, ancestor, **kwargs):
|
||||
- return self.put('osd/crush/unlink?name={0}&ancestor={1}'
|
||||
- .format(name, ancestor), **kwargs)
|
||||
+ def osd_crush_unlink(self, name, ancestor=None, **kwargs):
|
||||
+ if ancestor is not None:
|
||||
+ return self.put('osd/crush/unlink?name={0}&ancestor={1}'
|
||||
+ .format(name, ancestor), **kwargs)
|
||||
+ else:
|
||||
+ return self.put('osd/crush/unlink?name={0}'
|
||||
+ .format(name), **kwargs)
|
||||
|
||||
def osd_deep_scrub(self, who, **kwargs):
|
||||
return self.put('osd/deep-scrub?who={0}'
|
||||
@@ -498,22 +516,44 @@ class CephWrapper(client.CephClient):
|
||||
return self.put('osd/in?ids={0}'
|
||||
.format(ids), **kwargs)
|
||||
|
||||
- def osd_lost(self, id, sure, **kwargs):
|
||||
- return self.put('osd/lost?id={0}&sure={1}'
|
||||
- .format(id, sure), **kwargs)
|
||||
+ def osd_lost(self, id, sure=None, **kwargs):
|
||||
+ if sure is not None:
|
||||
+ return self.put('osd/lost?id={0}&sure={1}'
|
||||
+ .format(id, sure), **kwargs)
|
||||
+ else:
|
||||
+ return self.put('osd/lost?id={0}'
|
||||
+ .format(id), **kwargs)
|
||||
|
||||
def osd_out(self, ids, **kwargs):
|
||||
return self.put('osd/out?ids={0}'
|
||||
.format(ids), **kwargs)
|
||||
|
||||
- def osd_pool_create(self, pool, pg_num, pgp_num, properties, **kwargs):
|
||||
- return self.put(
|
||||
- 'osd/pool/create?pool={0}&pg_num={1}&pgp_num={2}&properties={3}'
|
||||
- .format(pool, pg_num, pgp_num, properties), **kwargs)
|
||||
-
|
||||
- def osd_pool_delete(self, pool, sure, **kwargs):
|
||||
- return self.put('osd/pool/delete?pool={0}&sure={1}'
|
||||
- .format(pool, sure), **kwargs)
|
||||
+ def osd_pool_create(self, pool, pg_num, pgp_num, pool_type=None,
|
||||
+ erasure_code_profile=None, ruleset=None,
|
||||
+ expected_num_objects=None, **kwargs):
|
||||
+ request = []
|
||||
+ request.append('osd/pool/create?pool={0}&pg_num={1}&pgp_num={2}'
|
||||
+ .format(pool, pg_num, pgp_num))
|
||||
+ if pool_type is not None:
|
||||
+ request.append('&pool_type={0}'.format(pool_type))
|
||||
+ if erasure_code_profile is not None:
|
||||
+ request.append('&erasure_code_profile={0}'
|
||||
+ .format(erasure_code_profile))
|
||||
+ if ruleset is not None:
|
||||
+ request.append('&ruleset={0}'.format(ruleset))
|
||||
+ if expected_num_objects is not None:
|
||||
+ request.append('&expected_num_objects={0}'
|
||||
+ .format(expected_num_objects))
|
||||
+ return self.put(''.join(request), **kwargs)
|
||||
+
|
||||
+ def osd_pool_delete(self, pool, pool2=None, sure=None, **kwargs):
|
||||
+ request = []
|
||||
+ request.append('osd/pool/delete?pool={0}'.format(pool))
|
||||
+ if pool2 is not None:
|
||||
+ request.append('&pool2={0}'.format(pool2))
|
||||
+ if sure is not None:
|
||||
+ request.append('&sure={0}'.format(sure))
|
||||
+ return self.put(''.join(request), **kwargs)
|
||||
|
||||
def osd_pool_param(self, pool, var, **kwargs):
|
||||
return self.put('osd/pool/get?pool={0}&var={1}'
|
||||
@@ -531,13 +571,17 @@ class CephWrapper(client.CephClient):
|
||||
return self.put('osd/pool/rmsnap?pool={0}&snap={1}'
|
||||
.format(pool, snap), **kwargs)
|
||||
|
||||
- def osd_set_pool_param(self, pool, var, **kwargs):
|
||||
- return self.put('osd/pool/set?pool={0}&var={1}'
|
||||
- .format(pool, var), **kwargs)
|
||||
+ def osd_set_pool_param(self, pool, var, val, force=None, **kwargs):
|
||||
+ if force is not None:
|
||||
+ return self.put('osd/pool/set?pool={0}&var={1}&val={2}&force={3}'
|
||||
+ .format(pool, var, val, force), **kwargs)
|
||||
+ else:
|
||||
+ return self.put('osd/pool/set?pool={0}&var={1}&val={2}'
|
||||
+ .format(pool, var, val), **kwargs)
|
||||
|
||||
- def osd_set_pool_quota(self, pool, field, **kwargs):
|
||||
- return self.put('osd/pool/set-quota?pool={0}&field={1}'
|
||||
- .format(pool, field), **kwargs)
|
||||
+ def osd_set_pool_quota(self, pool, field, val, **kwargs):
|
||||
+ return self.put('osd/pool/set-quota?pool={0}&field={1}&val={2}'
|
||||
+ .format(pool, field, val), **kwargs)
|
||||
|
||||
def osd_repair(self, pool, who, **kwargs):
|
||||
return self.put('osd/repair?who={0}'
|
||||
@@ -571,9 +615,14 @@ class CephWrapper(client.CephClient):
|
||||
return self.put('osd/thrash?num_epochs={0}'
|
||||
.format(num_epochs), **kwargs)
|
||||
|
||||
- def osd_tier_add(self, pool, tierpool, **kwargs):
|
||||
- return self.put('osd/tier/add?pool={0}&tierpool={1}'
|
||||
- .format(pool, tierpool), **kwargs)
|
||||
+ def osd_tier_add(self, pool, tierpool, force_notempty=None, **kwargs):
|
||||
+ if force_notempty is not None:
|
||||
+ return self.put('osd/tier/add?pool={0}&tierpool={1}'
|
||||
+ '&force_notempty={2}'
|
||||
+ .format(pool, tierpool, force_notempty), **kwargs)
|
||||
+ else:
|
||||
+ return self.put('osd/tier/add?pool={0}&tierpool={1}'
|
||||
+ .format(pool, tierpool), **kwargs)
|
||||
|
||||
def osd_tier_cachemode(self, pool, mode, **kwargs):
|
||||
return self.put('osd/tier/cache-mode?pool={0}&mode={1}'
|
||||
@@ -621,12 +670,19 @@ class CephWrapper(client.CephClient):
|
||||
def pg_dump_pools_json(self, **kwargs):
|
||||
return self.get('pg/dump_pools_json', **kwargs)
|
||||
|
||||
- def pg_dump_stuck(self, stuckops=None, **kwargs):
|
||||
+ def pg_dump_stuck(self, stuckops=None, threshold=None, **kwargs):
|
||||
+ request = []
|
||||
+ request.append('pg/dump_stuck')
|
||||
if stuckops is not None:
|
||||
- return self.get('pg/dump_stuck?stuckops={0}'
|
||||
- .format(stuckops), **kwargs)
|
||||
- else:
|
||||
- return self.get('pg/dump_stuck', **kwargs)
|
||||
+ request.append('?stuckops={0}'.format(stuckops))
|
||||
+ if threshold is not None:
|
||||
+ if stuckops is not None:
|
||||
+ request.append('&')
|
||||
+ else:
|
||||
+ request.append('?')
|
||||
+ request.append('threshold={0}'.format(threshold))
|
||||
+
|
||||
+ return self.get(''.join(request), **kwargs)
|
||||
|
||||
def pg_getmap(self, **kwargs):
|
||||
kwargs['supported_body_types'] = ['binary']
|
||||
--
|
||||
2.5.0
|
||||
|
@ -1,475 +0,0 @@
|
||||
From 2253242390ed6dfd1206ae2743ccab4c28437d13 Mon Sep 17 00:00:00 2001
|
||||
From: Robert Church <robert.church@windriver.com>
|
||||
Date: Tue, 24 Nov 2015 20:37:39 -0600
|
||||
Subject: [PATCH] US70398: Ceph Rebase - Update REST API to 0.94.5
|
||||
|
||||
Add support for all APIs currently present in the Ceph REST API. This
|
||||
provides a version 0.94.5 compliant client and include client calls that
|
||||
were present previously but never added to the python-cephclient.
|
||||
---
|
||||
cephclient/wrapper.py | 281 +++++++++++++++++++++++++++++++++++++++++++++++---
|
||||
1 file changed, 269 insertions(+), 12 deletions(-)
|
||||
|
||||
diff --git a/cephclient/wrapper.py b/cephclient/wrapper.py
|
||||
index 867b68b..871b53a 100644
|
||||
--- a/cephclient/wrapper.py
|
||||
+++ b/cephclient/wrapper.py
|
||||
@@ -58,6 +58,9 @@ class CephWrapper(client.CephClient):
|
||||
def status(self, **kwargs):
|
||||
return self.get('status', **kwargs)
|
||||
|
||||
+ def version(self, **kwargs):
|
||||
+ return self.get('version', **kwargs)
|
||||
+
|
||||
###
|
||||
# root PUT calls
|
||||
###
|
||||
@@ -83,6 +86,16 @@ class CephWrapper(client.CephClient):
|
||||
def scrub(self, **kwargs):
|
||||
return self.put('scrub', **kwargs)
|
||||
|
||||
+ def sync(self, validate1=None, validate2=None, **kwargs):
|
||||
+ request = []
|
||||
+ request.append('sync/force')
|
||||
+ if validate1 is not None:
|
||||
+ request.append('?validate1={0}'.format(validate1))
|
||||
+ if validate2 is not None:
|
||||
+ request.append('validate2={0}'.format(validate2))
|
||||
+
|
||||
+ return self.put(''.join(request), **kwargs)
|
||||
+
|
||||
def tell(self, target, args, **kwargs):
|
||||
return self.put('tell?target={0}&args={1}'
|
||||
.format(target, args), **kwargs)
|
||||
@@ -191,6 +204,44 @@ class CephWrapper(client.CephClient):
|
||||
return self.get('config-key/list', **kwargs)
|
||||
|
||||
###
|
||||
+ # config-key PUT calls
|
||||
+ ###
|
||||
+ def config_key_del(self, key, **kwargs):
|
||||
+ return self.put('config-key/del?key={0}'
|
||||
+ .format(key), **kwargs)
|
||||
+
|
||||
+ def config_key_put(self, key, val, **kwargs):
|
||||
+ return self.put('config-key/put?key={0}&val={1}'
|
||||
+ .format(key, val), **kwargs)
|
||||
+
|
||||
+ ###
|
||||
+ # fs GET calls
|
||||
+ ###
|
||||
+ def fs_ls(self, **kwargs):
|
||||
+ return self.get('fs/ls', **kwargs)
|
||||
+
|
||||
+ ###
|
||||
+ # fs PUT calls
|
||||
+ ###
|
||||
+ def fs_new(self, fs_name, metadata, data, **kwargs):
|
||||
+ return self.put('fs/new?fs_name={0}&metadata={1}&data={2}'
|
||||
+ .format(fs_name, metadata, data), **kwargs)
|
||||
+
|
||||
+ def fs_reset(self, fs_name, sure=None, **kwargs):
|
||||
+ request = []
|
||||
+ request.append('fs/reset?fs_name={0}'.format(fs_name))
|
||||
+ if sure is not None:
|
||||
+ request.append('&sure={0}'.format(sure))
|
||||
+ return self.put(''.join(request), **kwargs)
|
||||
+
|
||||
+ def fs_rm(self, fs_name, sure=None, **kwargs):
|
||||
+ request = []
|
||||
+ request.append('fs/rm?fs_name={0}'.format(fs_name))
|
||||
+ if sure is not None:
|
||||
+ request.append('&sure={0}'.format(sure))
|
||||
+ return self.put(''.join(request), **kwargs)
|
||||
+
|
||||
+ ###
|
||||
# mds GET calls
|
||||
###
|
||||
def mds_compat_show(self, **kwargs):
|
||||
@@ -244,9 +295,13 @@ class CephWrapper(client.CephClient):
|
||||
return self.put('mds/fail?who={0}'
|
||||
.format(who), **kwargs)
|
||||
|
||||
- def mds_newfs(self, metadata, data, sure, **kwargs):
|
||||
- return self.put('mds/newfs?metadata={0}&data={1}&sure={2}'
|
||||
- .format(metadata, data, sure), **kwargs)
|
||||
+ def mds_newfs(self, metadata, data, sure=None, **kwargs):
|
||||
+ if sure is not None:
|
||||
+ return self.put('mds/newfs?metadata={0}&data={1}&sure={2}'
|
||||
+ .format(metadata, data, sure), **kwargs)
|
||||
+ else:
|
||||
+ return self.put('mds/newfs?metadata={0}&data={1}'
|
||||
+ .format(metadata, data), **kwargs)
|
||||
|
||||
def mds_remove_data_pool(self, pool, **kwargs):
|
||||
return self.put('mds/remove_data_pool?pool={0}'
|
||||
@@ -260,11 +315,29 @@ class CephWrapper(client.CephClient):
|
||||
return self.put('mds/rmfailed?who={0}'
|
||||
.format(who), **kwargs)
|
||||
|
||||
- def mds_set_allow_new_snaps(self, sure, **kwargs):
|
||||
- """
|
||||
- mds/set?key=allow_new_snaps&sure=
|
||||
- """
|
||||
- raise exceptions.FunctionNotImplemented()
|
||||
+ def mds_set_max_file_size(self, val, confirm=None, **kwargs):
|
||||
+ if confirm is not None:
|
||||
+ return self.put('mds/set?var=max_file_size?val={0}&confirm={1}'
|
||||
+ .format(val, confirm), **kwargs)
|
||||
+ else:
|
||||
+ return self.put('mds/set?var=max_file_size?val={0}'
|
||||
+ .format(val), **kwargs)
|
||||
+
|
||||
+ def mds_set_allow_new_snaps(self, val, confirm=None, **kwargs):
|
||||
+ if confirm is not None:
|
||||
+ return self.put('mds/set?var=allow_new_snaps?val={0}&confirm={1}'
|
||||
+ .format(val, confirm), **kwargs)
|
||||
+ else:
|
||||
+ return self.put('mds/set?var=allow_new_snaps?val={0}'
|
||||
+ .format(val), **kwargs)
|
||||
+
|
||||
+ def mds_set_inline_data(self, val, confirm=None, **kwargs):
|
||||
+ if confirm is not None:
|
||||
+ return self.put('mds/set?var=inline_data?val={0}&confirm={1}'
|
||||
+ .format(val, confirm), **kwargs)
|
||||
+ else:
|
||||
+ return self.put('mds/set?var=inline_data?val={0}'
|
||||
+ .format(val), **kwargs)
|
||||
|
||||
def mds_set_max_mds(self, maxmds, **kwargs):
|
||||
return self.put('mds/set_max_mds?maxmds={0}'
|
||||
@@ -332,6 +405,9 @@ class CephWrapper(client.CephClient):
|
||||
def osd_blacklist_ls(self, **kwargs):
|
||||
return self.get('osd/blacklist/ls', **kwargs)
|
||||
|
||||
+ def osd_blocked_by(self, **kwargs):
|
||||
+ return self.get('osd/blocked-by', **kwargs)
|
||||
+
|
||||
def osd_crush_dump(self, **kwargs):
|
||||
return self.get('osd/crush/dump', **kwargs)
|
||||
|
||||
@@ -348,6 +424,19 @@ class CephWrapper(client.CephClient):
|
||||
def osd_crush_rule_ls(self, **kwargs):
|
||||
return self.get('osd/crush/rule/ls', **kwargs)
|
||||
|
||||
+ def osd_crush_show_tunables(self, **kwargs):
|
||||
+ return self.get('osd/crush/show-tunables', **kwargs)
|
||||
+
|
||||
+ def osd_crush_tree(self, **kwargs):
|
||||
+ return self.get('osd/crush/tree', **kwargs)
|
||||
+
|
||||
+ def osd_df(self, output_method=None, **kwargs):
|
||||
+ if output_method is not None:
|
||||
+ return self.get('osd/df?output_method={0}'
|
||||
+ .format(output_method), **kwargs)
|
||||
+ else:
|
||||
+ return self.get('osd/df', **kwargs)
|
||||
+
|
||||
def osd_dump(self, epoch=None, **kwargs):
|
||||
if epoch is not None:
|
||||
return self.get('osd/dump?epoch={0}'
|
||||
@@ -355,6 +444,13 @@ class CephWrapper(client.CephClient):
|
||||
else:
|
||||
return self.get('osd/dump', **kwargs)
|
||||
|
||||
+ def osd_erasure_code_profile_get(self, name, **kwargs):
|
||||
+ return self.get('osd/erasure-code-profile/get?name={0}'
|
||||
+ .format(name), **kwargs)
|
||||
+
|
||||
+ def osd_erasure_code_profile_ls(self, **kwargs):
|
||||
+ return self.get('osd/erasure-code-profile/ls', **kwargs)
|
||||
+
|
||||
def osd_find(self, id, **kwargs):
|
||||
return self.get('osd/find?id={0}'
|
||||
.format(id), **kwargs)
|
||||
@@ -398,9 +494,17 @@ class CephWrapper(client.CephClient):
|
||||
return self.get('osd/map?pool={0}&object={1}'
|
||||
.format(pool, object), **kwargs)
|
||||
|
||||
+ def osd_metadata(self, id, **kwargs):
|
||||
+ return self.get('osd/metadata?id={0}'
|
||||
+ .format(id), **kwargs)
|
||||
+
|
||||
def osd_perf(self, **kwargs):
|
||||
return self.get('osd/perf', **kwargs)
|
||||
|
||||
+ def osd_get_pool_param(self, pool, var, **kwargs):
|
||||
+ return self.get('osd/pool/get?pool={0}&var={1}'
|
||||
+ .format(pool, var), **kwargs)
|
||||
+
|
||||
def osd_pool_get(self, pool, var, **kwargs):
|
||||
return self.get('osd/pool/get?pool={0}&var={1}'
|
||||
.format(pool, var), **kwargs)
|
||||
@@ -416,6 +520,13 @@ class CephWrapper(client.CephClient):
|
||||
return self.get('osd/pool/get-quota?pool={0}'
|
||||
.format(pool), **kwargs)
|
||||
|
||||
+ def osd_pool_ls(self, detail=None, **kwargs):
|
||||
+ if detail is not None:
|
||||
+ return self.get('osd/pool/ls?detail={0}'
|
||||
+ .format(detail), **kwargs)
|
||||
+ else:
|
||||
+ return self.get('osd/pool/ls', **kwargs)
|
||||
+
|
||||
def osd_stat(self, **kwargs):
|
||||
return self.get('osd/stat', **kwargs)
|
||||
|
||||
@@ -449,6 +560,10 @@ class CephWrapper(client.CephClient):
|
||||
return self.put('osd/crush/create-or-move?id={0}&weight={1}&args={2}'
|
||||
.format(id, weight, args), **kwargs)
|
||||
|
||||
+ def osd_crush_get_tunable(self, tunable, **kwargs):
|
||||
+ return self.put('osd/crush/get-tunable?tunable={0}'
|
||||
+ .format(tunable), **kwargs)
|
||||
+
|
||||
def osd_crush_link(self, name, args, **kwargs):
|
||||
return self.put('osd/crush/link?name={0}&args={2}'
|
||||
.format(name, args), **kwargs)
|
||||
@@ -465,10 +580,21 @@ class CephWrapper(client.CephClient):
|
||||
return self.put('osd/crush/remove?name={0}'
|
||||
.format(name), **kwargs)
|
||||
|
||||
+ def osd_crush_rename_bucket(self, srcname, dstname, **kwargs):
|
||||
+ return self.put('osd/crush/rename-bucket?srcname={0}&dstname={1}'
|
||||
+ .format(srcname, dstname), **kwargs)
|
||||
+
|
||||
def osd_crush_reweight(self, name, weight, **kwargs):
|
||||
return self.put('osd/crush/reweight?name={0}&weight={1}'
|
||||
.format(name, weight), **kwargs)
|
||||
|
||||
+ def osd_crush_reweight_all(self, **kwargs):
|
||||
+ return self.put('osd/crush/reweight-all', **kwargs)
|
||||
+
|
||||
+ def osd_crush_reweight_subtree(self, name, weight, **kwargs):
|
||||
+ return self.put('osd/crush/reweight-subtree?name={0}&weight={1}'
|
||||
+ .format(name, weight), **kwargs)
|
||||
+
|
||||
def osd_crush_rm(self, name, ancestor, **kwargs):
|
||||
if ancestor is not None:
|
||||
return self.put('osd/crush/rm?name={0}&ancestor={1}'
|
||||
@@ -477,6 +603,11 @@ class CephWrapper(client.CephClient):
|
||||
return self.put('osd/crush/rm?name={0}'
|
||||
.format(name), **kwargs)
|
||||
|
||||
+ def osd_crush_rule_create_erasure(self, name, profile, **kwargs):
|
||||
+ return self.put(
|
||||
+ 'osd/crush/rule/create-erasure?name={0}&profile={1}'
|
||||
+ .format(name, profile), **kwargs)
|
||||
+
|
||||
def osd_crush_rule_create_simple(self, name, root,
|
||||
type, mode=None, **kwargs):
|
||||
if mode is not None:
|
||||
@@ -496,6 +627,10 @@ class CephWrapper(client.CephClient):
|
||||
return self.put('osd/crush/set?id={0}&weight={1}&args={2}'
|
||||
.format(id, weight, args), **kwargs)
|
||||
|
||||
+ def osd_crush_set_tunable(self, tunable, value, **kwargs):
|
||||
+ return self.put('osd/crush/set-tunable?tunable={0}&value={1}'
|
||||
+ .format(tunable), **kwargs)
|
||||
+
|
||||
def osd_crush_tunables(self, profile, **kwargs):
|
||||
return self.put('osd/crush/tunables?profile={0}'
|
||||
.format(profile), **kwargs)
|
||||
@@ -516,6 +651,18 @@ class CephWrapper(client.CephClient):
|
||||
return self.put('osd/down?ids={0}'
|
||||
.format(ids), **kwargs)
|
||||
|
||||
+ def osd_erasure_code_profile_rm(self, name, **kwargs):
|
||||
+ return self.put('osd/erasure-code-profile/rm?name={0}'
|
||||
+ .format(name), **kwargs)
|
||||
+
|
||||
+ def osd_erasure_code_profile_set(self, name, profile=None, **kwargs):
|
||||
+ if profile is not None:
|
||||
+ return self.put('osd/erasure-code-profile/set?name={0}&profile={1}'
|
||||
+ .format(name, profile), **kwargs)
|
||||
+ else:
|
||||
+ return self.put('osd/erasure-code-profile/set?name={0}'
|
||||
+ .format(name), **kwargs)
|
||||
+
|
||||
def osd_in(self, ids, **kwargs):
|
||||
return self.put('osd/in?ids={0}'
|
||||
.format(ids), **kwargs)
|
||||
@@ -532,6 +679,17 @@ class CephWrapper(client.CephClient):
|
||||
return self.put('osd/out?ids={0}'
|
||||
.format(ids), **kwargs)
|
||||
|
||||
+ def osd_pause(self, **kwargs):
|
||||
+ return self.put('osd/pause', **kwargs)
|
||||
+
|
||||
+ def osd_pg_temp(self, pgid, id=None, **kwargs):
|
||||
+ if id is not None:
|
||||
+ return self.put('osd/pg-temp?pgid={0}&id={1}'
|
||||
+ .format(pgid, id), **kwargs)
|
||||
+ else:
|
||||
+ return self.put('osd/pg-temp?pgid={0}'
|
||||
+ .format(pgid), **kwargs)
|
||||
+
|
||||
def osd_pool_create(self, pool, pg_num, pgp_num, pool_type=None,
|
||||
erasure_code_profile=None, ruleset=None,
|
||||
expected_num_objects=None, **kwargs):
|
||||
@@ -559,10 +717,6 @@ class CephWrapper(client.CephClient):
|
||||
request.append('&sure={0}'.format(sure))
|
||||
return self.put(''.join(request), **kwargs)
|
||||
|
||||
- def osd_pool_param(self, pool, var, **kwargs):
|
||||
- return self.put('osd/pool/get?pool={0}&var={1}'
|
||||
- .format(pool, var), **kwargs)
|
||||
-
|
||||
def osd_pool_mksnap(self, pool, snap, **kwargs):
|
||||
return self.put('osd/pool/mksnap?pool={0}&snap={1}'
|
||||
.format(pool, snap), **kwargs)
|
||||
@@ -583,6 +737,22 @@ class CephWrapper(client.CephClient):
|
||||
return self.put('osd/pool/set?pool={0}&var={1}&val={2}'
|
||||
.format(pool, var, val), **kwargs)
|
||||
|
||||
+ def osd_pool_set(self, pool, var, val, force=None, **kwargs):
|
||||
+ if force is not None:
|
||||
+ return self.put('osd/pool/set?pool={0}&var={1}&val={2}&force={3}'
|
||||
+ .format(pool, var, val, force), **kwargs)
|
||||
+ else:
|
||||
+ return self.put('osd/pool/set?pool={0}&var={1}&val={2}'
|
||||
+ .format(pool, var, val), **kwargs)
|
||||
+
|
||||
+ def osd_primary_affinity(self, id, weight, **kwargs):
|
||||
+ return self.put('osd/primary-affinity?id={0}&weight={1}'
|
||||
+ .format(id, weight), **kwargs)
|
||||
+
|
||||
+ def osd_primary_temp(self, pgid, id, **kwargs):
|
||||
+ return self.put('osd/primary-temp?pgid={0}&id={1}'
|
||||
+ .format(pgid, id), **kwargs)
|
||||
+
|
||||
def osd_set_pool_quota(self, pool, field, val, **kwargs):
|
||||
return self.put('osd/pool/set-quota?pool={0}&field={1}&val={2}'
|
||||
.format(pool, field, val), **kwargs)
|
||||
@@ -595,6 +765,10 @@ class CephWrapper(client.CephClient):
|
||||
return self.put('osd/reweight?id={0}&weight={1}'
|
||||
.format(id, weight), **kwargs)
|
||||
|
||||
+ def osd_reweight_by_pg(self, oload, pools, **kwargs):
|
||||
+ return self.put('osd/reweight-by-pg?oload={0}&pools={1}'
|
||||
+ .format(oload, pools), **kwargs)
|
||||
+
|
||||
def osd_reweight_by_utilization(self, oload, **kwargs):
|
||||
return self.put('osd/reweight-by-utilization?oload={0}'
|
||||
.format(oload), **kwargs)
|
||||
@@ -611,6 +785,12 @@ class CephWrapper(client.CephClient):
|
||||
return self.put('osd/set?key={0}'
|
||||
.format(key), **kwargs)
|
||||
|
||||
+ def osd_crushmap(self, **kwargs):
|
||||
+ """
|
||||
+ osd/crushmap
|
||||
+ """
|
||||
+ raise exceptions.FunctionNotImplemented()
|
||||
+
|
||||
def osd_setmaxosd(self, newmax, **kwargs):
|
||||
return self.put('osd/setmaxosd?newmax={0}'
|
||||
.format(newmax), **kwargs)
|
||||
@@ -628,6 +808,11 @@ class CephWrapper(client.CephClient):
|
||||
return self.put('osd/tier/add?pool={0}&tierpool={1}'
|
||||
.format(pool, tierpool), **kwargs)
|
||||
|
||||
+ def osd_tier_add_cache(self, pool, tierpool, size, **kwargs):
|
||||
+ return self.put('osd/tier/add-cache?pool={0}&tierpool={1}'
|
||||
+ '&size={2}'
|
||||
+ .format(pool, tierpool, size), **kwargs)
|
||||
+
|
||||
def osd_tier_cachemode(self, pool, mode, **kwargs):
|
||||
return self.put('osd/tier/cache-mode?pool={0}&mode={1}'
|
||||
.format(pool, mode), **kwargs)
|
||||
@@ -644,6 +829,9 @@ class CephWrapper(client.CephClient):
|
||||
return self.put('osd/tier/set-overlay?pool={0}&overlaypool={1}'
|
||||
.format(pool, overlaypool), **kwargs)
|
||||
|
||||
+ def osd_unpause(self, key, **kwargs):
|
||||
+ return self.put('osd/unpause', **kwargs)
|
||||
+
|
||||
def osd_unset(self, key, **kwargs):
|
||||
return self.put('osd/unset?key={0}'
|
||||
.format(key), **kwargs)
|
||||
@@ -693,6 +881,44 @@ class CephWrapper(client.CephClient):
|
||||
|
||||
return self.get('pg/getmap', **kwargs)
|
||||
|
||||
+ def pg_ls(self, pool=None, states=None, **kwargs):
|
||||
+ request = []
|
||||
+ request.append('pg/ls')
|
||||
+ if pool is not None:
|
||||
+ request.append('?pool={0}'.format(pool))
|
||||
+ if states is not None:
|
||||
+ request.append('states={0}'.format(states))
|
||||
+
|
||||
+ return self.get(''.join(request), **kwargs)
|
||||
+
|
||||
+ def pg_ls_by_osd(self, osd, pool=None, states=None, **kwargs):
|
||||
+ request = []
|
||||
+ request.append('pg/ls-by-osd?osd={0}'.format(osd))
|
||||
+ if pool is not None:
|
||||
+ request.append('?pool={0}'.format(pool))
|
||||
+ if states is not None:
|
||||
+ request.append('states={0}'.format(states))
|
||||
+
|
||||
+ return self.get(''.join(request), **kwargs)
|
||||
+
|
||||
+ def pg_ls_by_pool(self, poolstr, states=None, **kwargs):
|
||||
+ if states is not None:
|
||||
+ return self.get('pg/ls-by-pool?poolstr={0}&states={1}'
|
||||
+ .format(poolstr, states), **kwargs)
|
||||
+ else:
|
||||
+ return self.get('pg/ls-by-pool?poolstr={0}'
|
||||
+ .format(poolstr), **kwargs)
|
||||
+
|
||||
+ def pg_ls_by_primary(self, osd, pool=None, states=None, **kwargs):
|
||||
+ request = []
|
||||
+ request.append('pg/ls-by-primary?osd={0}'.format(osd))
|
||||
+ if pool is not None:
|
||||
+ request.append('?pool={0}'.format(pool))
|
||||
+ if states is not None:
|
||||
+ request.append('states={0}'.format(states))
|
||||
+
|
||||
+ return self.get(''.join(request), **kwargs)
|
||||
+
|
||||
def pg_map(self, pgid, **kwargs):
|
||||
return self.get('pg/map?pgid={0}'
|
||||
.format(pgid), **kwargs)
|
||||
@@ -701,6 +927,37 @@ class CephWrapper(client.CephClient):
|
||||
return self.get('pg/stat', **kwargs)
|
||||
|
||||
###
|
||||
+ # pg PUT calls
|
||||
+ ###
|
||||
+
|
||||
+ def pg_deep_scrub(self, pgid, **kwargs):
|
||||
+ return self.put('pg/deep-scrub?pgid={0}'
|
||||
+ .format(pgid), **kwargs)
|
||||
+
|
||||
+ def pg_force_create_pg(self, pgid, **kwargs):
|
||||
+ return self.put('pg/force_create_pg?pgid={0}'
|
||||
+ .format(pgid), **kwargs)
|
||||
+
|
||||
+ def pg_repair(self, pgid, **kwargs):
|
||||
+ return self.put('pg/repair?pgid={0}'
|
||||
+ .format(pgid), **kwargs)
|
||||
+
|
||||
+ def pg_scrub(self, pgid, **kwargs):
|
||||
+ return self.put('pg/scrub?pgid={0}'
|
||||
+ .format(pgid), **kwargs)
|
||||
+
|
||||
+ def pg_send_pg_creates(self, **kwargs):
|
||||
+ return self.put('pg/send_pg_creates', **kwargs)
|
||||
+
|
||||
+ def pg_set_full_ratio(self, ratio, **kwargs):
|
||||
+ return self.put('pg/set_full_ratio?ratio={0}'
|
||||
+ .format(ratio), **kwargs)
|
||||
+
|
||||
+ def pg_set_nearfull_ratio(self, ratio, **kwargs):
|
||||
+ return self.put('pg/set_nearfull_ratio?ratio={0}'
|
||||
+ .format(ratio), **kwargs)
|
||||
+
|
||||
+ ###
|
||||
# tell GET calls
|
||||
###
|
||||
def tell_debug_dump_missing(self, id, filename, **kwargs):
|
||||
--
|
||||
2.5.0
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,15 +0,0 @@
|
||||
Index: git/cephclient/wrapper.py
|
||||
===================================================================
|
||||
--- git.orig/cephclient/wrapper.py 2015-10-26 16:00:32.768154102 +0200
|
||||
+++ git/cephclient/wrapper.py 2015-11-04 14:46:09.491855340 +0200
|
||||
@@ -412,6 +412,10 @@
|
||||
else:
|
||||
return self.get('osd/pool/stats', **kwargs)
|
||||
|
||||
+ def osd_get_pool_quota(self, pool, **kwargs):
|
||||
+ return self.get('osd/pool/get-quota?pool={0}'
|
||||
+ .format(pool), **kwargs)
|
||||
+
|
||||
def osd_stat(self, **kwargs):
|
||||
return self.get('osd/stat', **kwargs)
|
||||
|
@ -1,25 +0,0 @@
|
||||
---
|
||||
cephclient/wrapper.py | 7 +++++--
|
||||
1 file changed, 5 insertions(+), 2 deletions(-)
|
||||
|
||||
Index: git/cephclient/wrapper.py
|
||||
===================================================================
|
||||
--- git.orig/cephclient/wrapper.py
|
||||
+++ git/cephclient/wrapper.py
|
||||
@@ -449,9 +449,13 @@ class CephWrapper(client.CephClient):
|
||||
return self.put('osd/crush/move?name={0}&args={1}'
|
||||
.format(name, args), **kwargs)
|
||||
|
||||
- def osd_crush_remove(self, name, ancestor, **kwargs):
|
||||
- return self.put('osd/crush/remove?name={0}&ancestor={1}'
|
||||
- .format(name, ancestor), **kwargs)
|
||||
+ def osd_crush_remove(self, name, ancestor=None, **kwargs):
|
||||
+ if ancestor:
|
||||
+ return self.put('osd/crush/remove?name={0}&ancestor={1}'
|
||||
+ .format(name, ancestor), **kwargs)
|
||||
+ else:
|
||||
+ return self.put('osd/crush/remove?name={0}'
|
||||
+ .format(name), **kwargs)
|
||||
|
||||
def osd_crush_reweight(self, name, weight, **kwargs):
|
||||
return self.put('osd/crush/reweight?name={0}&weight={1}'
|
@ -1,19 +0,0 @@
|
||||
diff -rupN a/cephclient/wrapper.py b/cephclient/wrapper.py
|
||||
--- a/cephclient/wrapper.py 2016-07-04 21:59:06.000000000 +0300
|
||||
+++ b/cephclient/wrapper.py 2016-07-07 18:01:50.000000000 +0300
|
||||
@@ -799,11 +799,11 @@ class CephWrapper(client.CephClient):
|
||||
return self.put('osd/thrash?num_epochs={0}'
|
||||
.format(num_epochs), **kwargs)
|
||||
|
||||
- def osd_tier_add(self, pool, tierpool, force_notempty=None, **kwargs):
|
||||
- if force_notempty is not None:
|
||||
+ def osd_tier_add(self, pool, tierpool, force_nonempty=None, **kwargs):
|
||||
+ if force_nonempty is not None:
|
||||
return self.put('osd/tier/add?pool={0}&tierpool={1}'
|
||||
- '&force_notempty={2}'
|
||||
- .format(pool, tierpool, force_notempty), **kwargs)
|
||||
+ '&force_nonempty={2}'
|
||||
+ .format(pool, tierpool, force_nonempty), **kwargs)
|
||||
else:
|
||||
return self.put('osd/tier/add?pool={0}&tierpool={1}'
|
||||
.format(pool, tierpool), **kwargs)
|
@ -1,20 +0,0 @@
|
||||
---
|
||||
cephclient/client.py | 7 ++++++-
|
||||
1 file changed, 6 insertions(+), 1 deletion(-)
|
||||
|
||||
--- a/cephclient/client.py
|
||||
+++ b/cephclient/client.py
|
||||
@@ -53,7 +53,12 @@ class CephClient(object):
|
||||
|
||||
self.log.debug("Params: {0}".format(str(self.params)))
|
||||
|
||||
- self.endpoint = self.params['endpoint']
|
||||
+ if 'endpoint' in self.params:
|
||||
+ self.endpoint = self.params['endpoint']
|
||||
+ else:
|
||||
+ # default endpoint
|
||||
+ self.endpoint = 'http://localhost:5001/api/v0.1/'
|
||||
+
|
||||
if 'timeout' not in self.params:
|
||||
self.timeout = None
|
||||
|
Loading…
x
Reference in New Issue
Block a user