Merge remote-tracking branch 'origin/master' into f/centos75-merge
Change-Id: I7eb34f4c3c43a84c9788cbf158cfe6c0d34bda1e
This commit is contained in:
commit
57ae67d022
@ -2,5 +2,9 @@
|
||||
- project:
|
||||
check:
|
||||
jobs:
|
||||
- openstack-tox-pep8:
|
||||
voting: false
|
||||
- openstack-tox-pep8
|
||||
- openstack-tox-linters
|
||||
gate:
|
||||
jobs:
|
||||
- openstack-tox-pep8
|
||||
- openstack-tox-linters
|
||||
|
176
centos_iso_image.inc
Normal file
176
centos_iso_image.inc
Normal file
@ -0,0 +1,176 @@
|
||||
# List of packages to be included/installed in ISO
|
||||
# If these have dependencies, they will be pulled in automatically
|
||||
#
|
||||
|
||||
# ceph
|
||||
ceph
|
||||
ceph-common
|
||||
ceph-fuse
|
||||
ceph-radosgw
|
||||
libcephfs1
|
||||
python-cephfs
|
||||
python-ceph-compat
|
||||
|
||||
# ceph-manager
|
||||
ceph-manager
|
||||
|
||||
# openstack-murano
|
||||
openstack-murano-common
|
||||
openstack-murano-engine
|
||||
openstack-murano-api
|
||||
openstack-murano-cf-api
|
||||
openstack-murano-doc
|
||||
|
||||
# python-muranoclient
|
||||
python2-muranoclient
|
||||
python-muranoclient-doc
|
||||
|
||||
# openstack-murano-ui
|
||||
openstack-murano-ui
|
||||
openstack-murano-ui-doc
|
||||
|
||||
# openstack-ironic
|
||||
openstack-ironic-common
|
||||
openstack-ironic-api
|
||||
openstack-ironic-conductor
|
||||
|
||||
# python-ironicclient
|
||||
python2-ironicclient
|
||||
|
||||
# python-magnumclient
|
||||
python2-magnumclient
|
||||
python-magnumclient-doc
|
||||
python-magnumclient-tests
|
||||
|
||||
# openstack-magnum
|
||||
python-magnum
|
||||
openstack-magnum-common
|
||||
openstack-magnum-conductor
|
||||
openstack-magnum-api
|
||||
openstack-magnum-doc
|
||||
python-magnum-tests
|
||||
|
||||
# openstack-magnum-ui
|
||||
openstack-magnum-ui
|
||||
|
||||
# openstack-ras
|
||||
openstack-ras
|
||||
|
||||
# openstack-ceilometer
|
||||
python-ceilometer
|
||||
openstack-ceilometer-common
|
||||
openstack-ceilometer-notification
|
||||
openstack-ceilometer-ipmi
|
||||
openstack-ceilometer-polling
|
||||
|
||||
# openstack-cinder
|
||||
openstack-cinder
|
||||
|
||||
# openstack-glance
|
||||
openstack-glance
|
||||
|
||||
# gnocchi
|
||||
python-gnocchi
|
||||
gnocchi-api
|
||||
gnocchi-common
|
||||
gnocchi-metricd
|
||||
gnocchi-statsd
|
||||
|
||||
# python-gnocchiclient
|
||||
python2-gnocchiclient
|
||||
|
||||
# openstack-heat
|
||||
openstack-heat-common
|
||||
openstack-heat-engine
|
||||
openstack-heat-api
|
||||
openstack-heat-api-cfn
|
||||
openstack-heat-api-cloudwatch
|
||||
|
||||
# wrs-heat-templates
|
||||
wrs-heat-templates
|
||||
|
||||
# python-django-horizon
|
||||
python-django-horizon
|
||||
openstack-dashboard
|
||||
|
||||
# openstack-keystone
|
||||
openstack-keystone
|
||||
|
||||
# python-networking-bgpvpn
|
||||
python2-networking-bgpvpn
|
||||
python-networking-bgpvpn-dashboard
|
||||
python-networking-bgpvpn-heat
|
||||
|
||||
# python-networking-sfc
|
||||
python2-networking-sfc
|
||||
|
||||
# python-networking-odl
|
||||
python-networking-odl
|
||||
|
||||
# openstack-neutron
|
||||
openstack-neutron
|
||||
python-neutron
|
||||
openstack-neutron-common
|
||||
openstack-neutron-ml2
|
||||
openstack-neutron-openvswitch
|
||||
openstack-neutron-sriov-nic-agent
|
||||
|
||||
# python-neutron-dynamic-routing
|
||||
python2-neutron-dynamic-routing
|
||||
|
||||
# openstack-nova
|
||||
openstack-nova-common
|
||||
openstack-nova-compute
|
||||
openstack-nova-network
|
||||
openstack-nova-scheduler
|
||||
openstack-nova-api
|
||||
openstack-nova-conductor
|
||||
openstack-nova-console
|
||||
openstack-nova-cells
|
||||
openstack-nova-cert
|
||||
openstack-nova-novncproxy
|
||||
openstack-nova-spicehtml5proxy
|
||||
openstack-nova-serialproxy
|
||||
openstack-nova-placement-api
|
||||
python-nova
|
||||
|
||||
# python-novaclient
|
||||
python2-novaclient
|
||||
|
||||
# distributedcloud
|
||||
distributedcloud-dcmanager
|
||||
distributedcloud-dcorch
|
||||
|
||||
# distributedcloud-client
|
||||
distributedcloud-client-dcmanagerclient
|
||||
|
||||
# openstack-aodh
|
||||
openstack-aodh-compat
|
||||
openstack-aodh-api
|
||||
openstack-aodh-evaluator
|
||||
openstack-aodh-notifier
|
||||
openstack-aodh-listener
|
||||
openstack-aodh-expirer
|
||||
|
||||
# openstack-panko
|
||||
python-panko
|
||||
openstack-panko-api
|
||||
openstack-panko-common
|
||||
|
||||
# rabbitmq-server
|
||||
rabbitmq-server
|
||||
|
||||
# python-openstackclient
|
||||
python2-openstackclient
|
||||
|
||||
# python-django-openstack-auth
|
||||
python2-django-openstack-auth
|
||||
|
||||
# python-wsme
|
||||
python2-wsme
|
||||
|
||||
# openstack-swift
|
||||
openstack-swift-object
|
||||
openstack-swift-account
|
||||
openstack-swift-container
|
||||
openstack-swift-proxy
|
@ -1,5 +1,3 @@
|
||||
ceph
|
||||
ceph-manager
|
||||
openstack/openstack-aodh
|
||||
openstack/openstack-murano
|
||||
openstack/python-muranoclient
|
||||
@ -50,3 +48,4 @@ openstack/python-django-openstack-auth
|
||||
openstack/python-wsme
|
||||
openstack/distributedcloud
|
||||
openstack/distributedcloud-client
|
||||
openstack/openstack-swift
|
||||
|
6
ceph-manager/.gitignore
vendored
6
ceph-manager/.gitignore
vendored
@ -1,6 +0,0 @@
|
||||
!.distro
|
||||
.distro/centos7/rpmbuild/RPMS
|
||||
.distro/centos7/rpmbuild/SRPMS
|
||||
.distro/centos7/rpmbuild/BUILD
|
||||
.distro/centos7/rpmbuild/BUILDROOT
|
||||
.distro/centos7/rpmbuild/SOURCES/ceph-manager*tar.gz
|
@ -1,202 +0,0 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
@ -1,13 +0,0 @@
|
||||
Metadata-Version: 1.1
|
||||
Name: ceph-manager
|
||||
Version: 1.0
|
||||
Summary: Handle Ceph API calls and provide status updates via alarms
|
||||
Home-page:
|
||||
Author: Windriver
|
||||
Author-email: info@windriver.com
|
||||
License: Apache-2.0
|
||||
|
||||
Description: Handle Ceph API calls and provide status updates via alarms
|
||||
|
||||
|
||||
Platform: UNKNOWN
|
@ -1,3 +0,0 @@
|
||||
SRC_DIR="ceph-manager"
|
||||
COPY_LIST_TO_TAR="files scripts"
|
||||
TIS_PATCH_VER=4
|
@ -1,70 +0,0 @@
|
||||
Summary: Handle Ceph API calls and provide status updates via alarms
|
||||
Name: ceph-manager
|
||||
Version: 1.0
|
||||
Release: %{tis_patch_ver}%{?_tis_dist}
|
||||
License: Apache-2.0
|
||||
Group: base
|
||||
Packager: Wind River <info@windriver.com>
|
||||
URL: unknown
|
||||
Source0: %{name}-%{version}.tar.gz
|
||||
|
||||
BuildRequires: python-setuptools
|
||||
BuildRequires: systemd-units
|
||||
BuildRequires: systemd-devel
|
||||
Requires: sysinv
|
||||
|
||||
%description
|
||||
Handle Ceph API calls and provide status updates via alarms.
|
||||
Handle sysinv RPC calls for long running Ceph API operations:
|
||||
- cache tiering enable
|
||||
- cache tiering disable
|
||||
|
||||
%define local_bindir /usr/bin/
|
||||
%define local_etc_initd /etc/init.d/
|
||||
%define local_etc_logrotated /etc/logrotate.d/
|
||||
%define pythonroot /usr/lib64/python2.7/site-packages
|
||||
|
||||
%define debug_package %{nil}
|
||||
|
||||
%prep
|
||||
%setup
|
||||
|
||||
%build
|
||||
%{__python} setup.py build
|
||||
|
||||
%install
|
||||
%{__python} setup.py install --root=$RPM_BUILD_ROOT \
|
||||
--install-lib=%{pythonroot} \
|
||||
--prefix=/usr \
|
||||
--install-data=/usr/share \
|
||||
--single-version-externally-managed
|
||||
|
||||
install -d -m 755 %{buildroot}%{local_etc_initd}
|
||||
install -p -D -m 700 scripts/init.d/ceph-manager %{buildroot}%{local_etc_initd}/ceph-manager
|
||||
|
||||
install -d -m 755 %{buildroot}%{local_bindir}
|
||||
install -p -D -m 700 scripts/bin/ceph-manager %{buildroot}%{local_bindir}/ceph-manager
|
||||
|
||||
install -d -m 755 %{buildroot}%{local_etc_logrotated}
|
||||
install -p -D -m 644 files/ceph-manager.logrotate %{buildroot}%{local_etc_logrotated}/ceph-manager.logrotate
|
||||
|
||||
install -d -m 755 %{buildroot}%{_unitdir}
|
||||
install -m 644 -p -D files/%{name}.service %{buildroot}%{_unitdir}/%{name}.service
|
||||
|
||||
%clean
|
||||
rm -rf $RPM_BUILD_ROOT
|
||||
|
||||
# Note: The package name is ceph-manager but the import name is ceph_manager so
|
||||
# can't use '%{name}'.
|
||||
%files
|
||||
%defattr(-,root,root,-)
|
||||
%doc LICENSE
|
||||
%{local_bindir}/*
|
||||
%{local_etc_initd}/*
|
||||
%{_unitdir}/%{name}.service
|
||||
%dir %{local_etc_logrotated}
|
||||
%{local_etc_logrotated}/*
|
||||
%dir %{pythonroot}/ceph_manager
|
||||
%{pythonroot}/ceph_manager/*
|
||||
%dir %{pythonroot}/ceph_manager-%{version}.0-py2.7.egg-info
|
||||
%{pythonroot}/ceph_manager-%{version}.0-py2.7.egg-info/*
|
@ -1,202 +0,0 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
@ -1,5 +0,0 @@
|
||||
#
|
||||
# Copyright (c) 2016 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
@ -1,159 +0,0 @@
|
||||
#
|
||||
# Copyright (c) 2016-2018 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
import exception
|
||||
from i18n import _LI
|
||||
# noinspection PyUnresolvedReferences
|
||||
from oslo_log import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def osd_pool_set_quota(ceph_api, pool_name, max_bytes=0, max_objects=0):
|
||||
"""Set the quota for an OSD pool_name
|
||||
Setting max_bytes or max_objects to 0 will disable that quota param
|
||||
:param pool_name: OSD pool_name
|
||||
:param max_bytes: maximum bytes for OSD pool_name
|
||||
:param max_objects: maximum objects for OSD pool_name
|
||||
"""
|
||||
|
||||
# Update quota if needed
|
||||
prev_quota = osd_pool_get_quota(ceph_api, pool_name)
|
||||
if prev_quota["max_bytes"] != max_bytes:
|
||||
resp, b = ceph_api.osd_set_pool_quota(pool_name, 'max_bytes',
|
||||
max_bytes, body='json')
|
||||
if resp.ok:
|
||||
LOG.info(_LI("Set OSD pool_name quota: "
|
||||
"pool_name={}, max_bytes={}").format(
|
||||
pool_name, max_bytes))
|
||||
else:
|
||||
e = exception.CephPoolSetQuotaFailure(
|
||||
pool=pool_name, name='max_bytes',
|
||||
value=max_bytes, reason=resp.reason)
|
||||
LOG.error(e)
|
||||
raise e
|
||||
if prev_quota["max_objects"] != max_objects:
|
||||
resp, b = ceph_api.osd_set_pool_quota(pool_name, 'max_objects',
|
||||
max_objects,
|
||||
body='json')
|
||||
if resp.ok:
|
||||
LOG.info(_LI("Set OSD pool_name quota: "
|
||||
"pool_name={}, max_objects={}").format(
|
||||
pool_name, max_objects))
|
||||
else:
|
||||
e = exception.CephPoolSetQuotaFailure(
|
||||
pool=pool_name, name='max_objects',
|
||||
value=max_objects, reason=resp.reason)
|
||||
LOG.error(e)
|
||||
raise e
|
||||
|
||||
|
||||
def osd_pool_get_quota(ceph_api, pool_name):
|
||||
resp, quota = ceph_api.osd_get_pool_quota(pool_name, body='json')
|
||||
if not resp.ok:
|
||||
e = exception.CephPoolGetQuotaFailure(
|
||||
pool=pool_name, reason=resp.reason)
|
||||
LOG.error(e)
|
||||
raise e
|
||||
else:
|
||||
return {"max_objects": quota["output"]["quota_max_objects"],
|
||||
"max_bytes": quota["output"]["quota_max_bytes"]}
|
||||
|
||||
|
||||
def osd_pool_exists(ceph_api, pool_name):
|
||||
response, body = ceph_api.osd_pool_get(
|
||||
pool_name, "pg_num", body='json')
|
||||
if response.ok:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def osd_pool_create(ceph_api, pool_name, pg_num, pgp_num):
|
||||
# ruleset 0: is the default ruleset if no crushmap is loaded or
|
||||
# the ruleset for the backing tier if loaded:
|
||||
# Name: storage_tier_ruleset
|
||||
ruleset = 0
|
||||
response, body = ceph_api.osd_pool_create(
|
||||
pool_name, pg_num, pgp_num, pool_type="replicated",
|
||||
ruleset=ruleset, body='json')
|
||||
if response.ok:
|
||||
LOG.info(_LI("Created OSD pool: "
|
||||
"pool_name={}, pg_num={}, pgp_num={}, "
|
||||
"pool_type=replicated, ruleset={}").format(
|
||||
pool_name, pg_num, pgp_num, ruleset))
|
||||
else:
|
||||
e = exception.CephPoolCreateFailure(
|
||||
name=pool_name, reason=response.reason)
|
||||
LOG.error(e)
|
||||
raise e
|
||||
|
||||
# Explicitly assign the ruleset to the pool on creation since it is
|
||||
# ignored in the create call
|
||||
response, body = ceph_api.osd_set_pool_param(
|
||||
pool_name, "crush_ruleset", ruleset, body='json')
|
||||
if response.ok:
|
||||
LOG.info(_LI("Assigned crush ruleset to OS pool: "
|
||||
"pool_name={}, ruleset={}").format(
|
||||
pool_name, ruleset))
|
||||
else:
|
||||
e = exception.CephPoolRulesetFailure(
|
||||
name=pool_name, reason=response.reason)
|
||||
LOG.error(e)
|
||||
ceph_api.osd_pool_delete(
|
||||
pool_name, pool_name,
|
||||
sure='--yes-i-really-really-mean-it',
|
||||
body='json')
|
||||
raise e
|
||||
|
||||
|
||||
def osd_pool_delete(ceph_api, pool_name):
|
||||
"""Delete an osd pool
|
||||
:param pool_name: pool name
|
||||
"""
|
||||
response, body = ceph_api.osd_pool_delete(
|
||||
pool_name, pool_name,
|
||||
sure='--yes-i-really-really-mean-it',
|
||||
body='json')
|
||||
if response.ok:
|
||||
LOG.info(_LI("Deleted OSD pool {}").format(pool_name))
|
||||
else:
|
||||
e = exception.CephPoolDeleteFailure(
|
||||
name=pool_name, reason=response.reason)
|
||||
LOG.warn(e)
|
||||
raise e
|
||||
|
||||
|
||||
def osd_set_pool_param(ceph_api, pool_name, param, value):
|
||||
response, body = ceph_api.osd_set_pool_param(
|
||||
pool_name, param, value,
|
||||
force=None, body='json')
|
||||
if response.ok:
|
||||
LOG.info('OSD set pool param: '
|
||||
'pool={}, name={}, value={}'.format(
|
||||
pool_name, param, value))
|
||||
else:
|
||||
raise exception.CephPoolSetParamFailure(
|
||||
pool_name=pool_name,
|
||||
param=param,
|
||||
value=str(value),
|
||||
reason=response.reason)
|
||||
return response, body
|
||||
|
||||
|
||||
def osd_get_pool_param(ceph_api, pool_name, param):
|
||||
response, body = ceph_api.osd_get_pool_param(
|
||||
pool_name, param, body='json')
|
||||
if response.ok:
|
||||
LOG.debug('OSD get pool param: '
|
||||
'pool={}, name={}, value={}'.format(
|
||||
pool_name, param, body['output'][param]))
|
||||
else:
|
||||
raise exception.CephPoolGetParamFailure(
|
||||
pool_name=pool_name,
|
||||
param=param,
|
||||
reason=response.reason)
|
||||
return body['output'][param]
|
@ -1,90 +0,0 @@
|
||||
#
|
||||
# Copyright (c) 2016-2018 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
from i18n import _
|
||||
# noinspection PyUnresolvedReferences
|
||||
from sysinv.common import constants as sysinv_constants
|
||||
|
||||
CEPH_POOL_OBJECT_GATEWAY_NAME_JEWEL = \
|
||||
sysinv_constants.CEPH_POOL_OBJECT_GATEWAY_NAME_JEWEL
|
||||
CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER = \
|
||||
sysinv_constants.CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER
|
||||
CEPH_POOLS = sysinv_constants.CEPH_POOLS
|
||||
CEPH_REPLICATION_FACTOR = sysinv_constants.CEPH_REPLICATION_FACTOR_DEFAULT
|
||||
|
||||
# Cache flush parameters
|
||||
CACHE_FLUSH_OBJECTS_THRESHOLD = 1000
|
||||
CACHE_FLUSH_MIN_WAIT_OBJ_COUNT_DECREASE_SEC = 1
|
||||
CACHE_FLUSH_MAX_WAIT_OBJ_COUNT_DECREASE_SEC = 128
|
||||
|
||||
FM_ALARM_REASON_MAX_SIZE = 256
|
||||
|
||||
# TODO this will later change based on parsed health
|
||||
# clock skew is vm malfunction, mon or osd is equipment mal
|
||||
ALARM_CAUSE = 'equipment-malfunction'
|
||||
ALARM_TYPE = 'equipment'
|
||||
|
||||
# Ceph health check interval (in seconds)
|
||||
CEPH_HEALTH_CHECK_INTERVAL = 60
|
||||
|
||||
# Ceph health statuses
|
||||
CEPH_HEALTH_OK = 'HEALTH_OK'
|
||||
CEPH_HEALTH_WARN = 'HEALTH_WARN'
|
||||
CEPH_HEALTH_ERR = 'HEALTH_ERR'
|
||||
CEPH_HEALTH_DOWN = 'CEPH_DOWN'
|
||||
|
||||
# Statuses not reported by Ceph
|
||||
CEPH_STATUS_CUSTOM = [CEPH_HEALTH_DOWN]
|
||||
|
||||
SEVERITY = {CEPH_HEALTH_DOWN: 'critical',
|
||||
CEPH_HEALTH_ERR: 'critical',
|
||||
CEPH_HEALTH_WARN: 'warning'}
|
||||
|
||||
SERVICE_AFFECTING = {CEPH_HEALTH_DOWN: True,
|
||||
CEPH_HEALTH_ERR: True,
|
||||
CEPH_HEALTH_WARN: False}
|
||||
|
||||
# TODO this will later change based on parsed health
|
||||
ALARM_REASON_NO_OSD = _('no OSDs')
|
||||
ALARM_REASON_OSDS_DOWN = _('OSDs are down')
|
||||
ALARM_REASON_OSDS_OUT = _('OSDs are out')
|
||||
ALARM_REASON_OSDS_DOWN_OUT = _('OSDs are down/out')
|
||||
ALARM_REASON_PEER_HOST_DOWN = _('peer host down')
|
||||
|
||||
REPAIR_ACTION_MAJOR_CRITICAL_ALARM = _(
|
||||
'Ensure storage hosts from replication group are unlocked and available.'
|
||||
'Check if OSDs of each storage host are up and running.'
|
||||
'If problem persists, contact next level of support.')
|
||||
REPAIR_ACTION = _('If problem persists, contact next level of support.')
|
||||
|
||||
SYSINV_CONDUCTOR_TOPIC = 'sysinv.conductor_manager'
|
||||
CEPH_MANAGER_TOPIC = 'sysinv.ceph_manager'
|
||||
SYSINV_CONFIG_FILE = '/etc/sysinv/sysinv.conf'
|
||||
|
||||
# Titanium Cloud version strings
|
||||
TITANIUM_SERVER_VERSION_18_03 = '18.03'
|
||||
|
||||
CEPH_HEALTH_WARN_REQUIRE_JEWEL_OSDS_NOT_SET = (
|
||||
"all OSDs are running jewel or later but the "
|
||||
"'require_jewel_osds' osdmap flag is not set")
|
||||
|
||||
UPGRADE_COMPLETED = \
|
||||
sysinv_constants.UPGRADE_COMPLETED
|
||||
UPGRADE_ABORTING = \
|
||||
sysinv_constants.UPGRADE_ABORTING
|
||||
UPGRADE_ABORT_COMPLETING = \
|
||||
sysinv_constants.UPGRADE_ABORT_COMPLETING
|
||||
UPGRADE_ABORTING_ROLLBACK = \
|
||||
sysinv_constants.UPGRADE_ABORTING_ROLLBACK
|
||||
|
||||
CEPH_FLAG_REQUIRE_JEWEL_OSDS = 'require_jewel_osds'
|
||||
|
||||
# Tiers
|
||||
CEPH_CRUSH_TIER_SUFFIX = sysinv_constants.CEPH_CRUSH_TIER_SUFFIX
|
||||
SB_TIER_TYPE_CEPH = sysinv_constants.SB_TIER_TYPE_CEPH
|
||||
SB_TIER_SUPPORTED = sysinv_constants.SB_TIER_SUPPORTED
|
||||
SB_TIER_DEFAULT_NAMES = sysinv_constants.SB_TIER_DEFAULT_NAMES
|
||||
SB_TIER_CEPH_POOLS = sysinv_constants.SB_TIER_CEPH_POOLS
|
@ -1,78 +0,0 @@
|
||||
#
|
||||
# Copyright (c) 2016-2018 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
# noinspection PyUnresolvedReferences
|
||||
from i18n import _, _LW
|
||||
# noinspection PyUnresolvedReferences
|
||||
from oslo_log import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CephManagerException(Exception):
|
||||
message = _("An unknown exception occurred.")
|
||||
|
||||
def __init__(self, message=None, **kwargs):
|
||||
self.kwargs = kwargs
|
||||
if not message:
|
||||
try:
|
||||
message = self.message % kwargs
|
||||
except TypeError:
|
||||
LOG.warn(_LW('Exception in string format operation'))
|
||||
for name, value in kwargs.iteritems():
|
||||
LOG.error("%s: %s" % (name, value))
|
||||
# at least get the core message out if something happened
|
||||
message = self.message
|
||||
super(CephManagerException, self).__init__(message)
|
||||
|
||||
|
||||
class CephPoolSetQuotaFailure(CephManagerException):
|
||||
message = _("Error seting the OSD pool "
|
||||
"quota %(name)s for %(pool)s to %(value)s") \
|
||||
+ ": %(reason)s"
|
||||
|
||||
|
||||
class CephPoolGetQuotaFailure(CephManagerException):
|
||||
message = _("Error geting the OSD pool quota for %(pool)s") \
|
||||
+ ": %(reason)s"
|
||||
|
||||
|
||||
class CephPoolCreateFailure(CephManagerException):
|
||||
message = _("Creating OSD pool %(name)s failed: %(reason)s")
|
||||
|
||||
|
||||
class CephPoolDeleteFailure(CephManagerException):
|
||||
message = _("Deleting OSD pool %(name)s failed: %(reason)s")
|
||||
|
||||
|
||||
class CephPoolRulesetFailure(CephManagerException):
|
||||
message = _("Assigning crush ruleset to OSD "
|
||||
"pool %(name)s failed: %(reason)s")
|
||||
|
||||
|
||||
class CephPoolSetParamFailure(CephManagerException):
|
||||
message = _("Cannot set Ceph OSD pool parameter: "
|
||||
"pool_name=%(pool_name)s, param=%(param)s, value=%(value)s. "
|
||||
"Reason: %(reason)s")
|
||||
|
||||
|
||||
class CephPoolGetParamFailure(CephManagerException):
|
||||
message = _("Cannot get Ceph OSD pool parameter: "
|
||||
"pool_name=%(pool_name)s, param=%(param)s. "
|
||||
"Reason: %(reason)s")
|
||||
|
||||
|
||||
class CephSetKeyFailure(CephManagerException):
|
||||
message = _("Error setting the Ceph flag "
|
||||
"'%(flag)s' %(extra)s: "
|
||||
"response=%(response_status_code)s:%(response_reason)s, "
|
||||
"status=%(status)s, output=%(output)s")
|
||||
|
||||
|
||||
class CephApiFailure(CephManagerException):
|
||||
message = _("API failure: "
|
||||
"call=%(call)s, reason=%(reason)s")
|
@ -1,15 +0,0 @@
|
||||
#
|
||||
# Copyright (c) 2016 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
import oslo_i18n
|
||||
|
||||
DOMAIN = 'ceph-manager'
|
||||
|
||||
_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN)
|
||||
_ = _translators.primary
|
||||
|
||||
_LI = _translators.log_info
|
||||
_LW = _translators.log_warning
|
||||
_LE = _translators.log_error
|
@ -1,874 +0,0 @@
|
||||
#
|
||||
# Copyright (c) 2013-2018 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
import time
|
||||
|
||||
# noinspection PyUnresolvedReferences
|
||||
from fm_api import fm_api
|
||||
# noinspection PyUnresolvedReferences
|
||||
from fm_api import constants as fm_constants
|
||||
# noinspection PyUnresolvedReferences
|
||||
from oslo_log import log as logging
|
||||
|
||||
# noinspection PyProtectedMember
|
||||
from i18n import _, _LI, _LW, _LE
|
||||
|
||||
import constants
|
||||
import exception
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# In 18.03 R5, ceph cache tiering was disabled and prevented from being
|
||||
# re-enabled. When upgrading from 18.03 (R5) to R6 we need to remove the
|
||||
# cache-tier from the crushmap ceph-cache-tiering
|
||||
#
|
||||
# This class is needed only when upgrading from R5 to R6
|
||||
# TODO: remove it after 1st R6 release
|
||||
#
|
||||
class HandleUpgradesMixin(object):
|
||||
|
||||
def __init__(self, service):
|
||||
self.service = service
|
||||
self.wait_for_upgrade_complete = False
|
||||
|
||||
def setup(self, config):
|
||||
self._set_upgrade(self.service.retry_get_software_upgrade_status())
|
||||
|
||||
def _set_upgrade(self, upgrade):
|
||||
state = upgrade.get('state')
|
||||
from_version = upgrade.get('from_version')
|
||||
if (state
|
||||
and state != constants.UPGRADE_COMPLETED
|
||||
and from_version == constants.TITANIUM_SERVER_VERSION_18_03):
|
||||
|
||||
LOG.info(_LI("Wait for ceph upgrade to complete before monitoring cluster."))
|
||||
self.wait_for_upgrade_complete = True
|
||||
|
||||
def set_flag_require_jewel_osds(self):
|
||||
try:
|
||||
response, body = self.service.ceph_api.osd_set_key(
|
||||
constants.CEPH_FLAG_REQUIRE_JEWEL_OSDS,
|
||||
body='json')
|
||||
LOG.info(_LI("Set require_jewel_osds flag"))
|
||||
except IOError as e:
|
||||
raise exception.CephApiFailure(
|
||||
call="osd_set_key",
|
||||
reason=e.message)
|
||||
else:
|
||||
if not response.ok:
|
||||
raise exception.CephSetKeyFailure(
|
||||
flag=constants.CEPH_FLAG_REQUIRE_JEWEL_OSDS,
|
||||
extra=_("needed to complete upgrade to Jewel"),
|
||||
response_status_code=response.status_code,
|
||||
response_reason=response.reason,
|
||||
status=body.get('status'),
|
||||
output=body.get('output'))
|
||||
|
||||
def filter_health_status(self, health):
|
||||
health = self.auto_heal(health)
|
||||
# filter out require_jewel_osds warning
|
||||
#
|
||||
if not self.wait_for_upgrade_complete:
|
||||
return health
|
||||
if health['health'] != constants.CEPH_HEALTH_WARN:
|
||||
return health
|
||||
if (constants.CEPH_HEALTH_WARN_REQUIRE_JEWEL_OSDS_NOT_SET
|
||||
not in health['detail']):
|
||||
return health
|
||||
return self._remove_require_jewel_osds_warning(health)
|
||||
|
||||
def _remove_require_jewel_osds_warning(self, health):
|
||||
reasons_list = []
|
||||
for reason in health['detail'].split(';'):
|
||||
reason = reason.strip()
|
||||
if len(reason) == 0:
|
||||
continue
|
||||
if constants.CEPH_HEALTH_WARN_REQUIRE_JEWEL_OSDS_NOT_SET in reason:
|
||||
continue
|
||||
reasons_list.append(reason)
|
||||
if len(reasons_list) == 0:
|
||||
health = {
|
||||
'health': constants.CEPH_HEALTH_OK,
|
||||
'detail': ''}
|
||||
else:
|
||||
health['detail'] = '; '.join(reasons_list)
|
||||
return health
|
||||
|
||||
def auto_heal(self, health):
|
||||
if (health['health'] == constants.CEPH_HEALTH_WARN
|
||||
and (constants.CEPH_HEALTH_WARN_REQUIRE_JEWEL_OSDS_NOT_SET
|
||||
in health['detail'])):
|
||||
try:
|
||||
upgrade = self.service.get_software_upgrade_status()
|
||||
except Exception as ex:
|
||||
LOG.warn(_LW(
|
||||
"Getting software upgrade status failed "
|
||||
"with: %s. Skip auto-heal attempt "
|
||||
"(will retry on next ceph status poll).") % str(ex))
|
||||
return health
|
||||
state = upgrade.get('state')
|
||||
# surpress require_jewel_osds in case upgrade is
|
||||
# in progress but not completed or aborting
|
||||
if (not self.wait_for_upgrade_complete
|
||||
and (upgrade.get('from_version')
|
||||
== constants.TITANIUM_SERVER_VERSION_18_03)
|
||||
and state not in [
|
||||
None,
|
||||
constants.UPGRADE_COMPLETED,
|
||||
constants.UPGRADE_ABORTING,
|
||||
constants.UPGRADE_ABORT_COMPLETING,
|
||||
constants.UPGRADE_ABORTING_ROLLBACK]):
|
||||
self.wait_for_upgrade_complete = True
|
||||
# set require_jewel_osds in case upgrade is
|
||||
# not in progress or completed
|
||||
if (state in [None, constants.UPGRADE_COMPLETED]):
|
||||
LOG.warn(_LW(
|
||||
"No upgrade in progress or update completed "
|
||||
"and require_jewel_osds health warning raised. "
|
||||
"Set require_jewel_osds flag."))
|
||||
self.set_flag_require_jewel_osds()
|
||||
health = self._remove_require_jewel_osds_warning(health)
|
||||
LOG.info(_LI("Unsurpress require_jewel_osds health warning"))
|
||||
self.wait_for_upgrade_complete = False
|
||||
# unsurpress require_jewel_osds in case upgrade
|
||||
# is aborting
|
||||
if (state in [
|
||||
constants.UPGRADE_ABORTING,
|
||||
constants.UPGRADE_ABORT_COMPLETING,
|
||||
constants.UPGRADE_ABORTING_ROLLBACK]):
|
||||
self.wait_for_upgrade_complete = False
|
||||
return health
|
||||
|
||||
|
||||
class Monitor(HandleUpgradesMixin):
|
||||
|
||||
def __init__(self, service):
|
||||
self.service = service
|
||||
self.current_ceph_health = ""
|
||||
self.tiers_size = {}
|
||||
self.known_object_pool_name = None
|
||||
self.primary_tier_name = constants.SB_TIER_DEFAULT_NAMES[
|
||||
constants.SB_TIER_TYPE_CEPH] + constants.CEPH_CRUSH_TIER_SUFFIX
|
||||
self.cluster_is_up = False
|
||||
super(Monitor, self).__init__(service)
|
||||
|
||||
def setup(self, config):
|
||||
super(Monitor, self).setup(config)
|
||||
|
||||
def run(self):
|
||||
# Wait until Ceph cluster is up and we can get the fsid
|
||||
while True:
|
||||
try:
|
||||
self.ceph_get_fsid()
|
||||
except Exception:
|
||||
LOG.exception("Error getting fsid, "
|
||||
"will retry in %ss" % constants.CEPH_HEALTH_CHECK_INTERVAL)
|
||||
if self.service.entity_instance_id:
|
||||
break
|
||||
time.sleep(constants.CEPH_HEALTH_CHECK_INTERVAL)
|
||||
|
||||
# Start monitoring ceph status
|
||||
while True:
|
||||
try:
|
||||
self.ceph_poll_status()
|
||||
self.ceph_poll_quotas()
|
||||
except Exception:
|
||||
LOG.exception("Error running periodic monitoring of ceph status, "
|
||||
"will retry in %ss" % constants.CEPH_HEALTH_CHECK_INTERVAL)
|
||||
time.sleep(constants.CEPH_HEALTH_CHECK_INTERVAL)
|
||||
|
||||
def ceph_get_fsid(self):
|
||||
# Check whether an alarm has already been raised
|
||||
self._get_current_alarms()
|
||||
if self.current_health_alarm:
|
||||
LOG.info(_LI("Current alarm: %s") %
|
||||
str(self.current_health_alarm.__dict__))
|
||||
|
||||
fsid = self._get_fsid()
|
||||
if not fsid:
|
||||
# Raise alarm - it will not have an entity_instance_id
|
||||
self._report_fault({'health': constants.CEPH_HEALTH_DOWN,
|
||||
'detail': 'Ceph cluster is down.'},
|
||||
fm_constants.FM_ALARM_ID_STORAGE_CEPH)
|
||||
else:
|
||||
# Clear alarm with no entity_instance_id
|
||||
self._clear_fault(fm_constants.FM_ALARM_ID_STORAGE_CEPH)
|
||||
self.service.entity_instance_id = 'cluster=%s' % fsid
|
||||
|
||||
def ceph_poll_status(self):
|
||||
# get previous data every time in case:
|
||||
# * daemon restarted
|
||||
# * alarm was cleared manually but stored as raised in daemon
|
||||
self._get_current_alarms()
|
||||
if self.current_health_alarm:
|
||||
LOG.info(_LI("Current alarm: %s") %
|
||||
str(self.current_health_alarm.__dict__))
|
||||
|
||||
# get ceph health
|
||||
health = self._get_health()
|
||||
LOG.info(_LI("Current Ceph health: "
|
||||
"%(health)s detail: %(detail)s") % health)
|
||||
|
||||
health = self.filter_health_status(health)
|
||||
if health['health'] != constants.CEPH_HEALTH_OK:
|
||||
self._report_fault(health, fm_constants.FM_ALARM_ID_STORAGE_CEPH)
|
||||
self._report_alarm_osds_health()
|
||||
else:
|
||||
self._clear_fault(fm_constants.FM_ALARM_ID_STORAGE_CEPH)
|
||||
self.clear_all_major_critical()
|
||||
|
||||
def filter_health_status(self, health):
|
||||
return super(Monitor, self).filter_health_status(health)
|
||||
|
||||
def ceph_poll_quotas(self):
|
||||
self._get_current_alarms()
|
||||
if self.current_quota_alarms:
|
||||
LOG.info(_LI("Current quota alarms %s") %
|
||||
self.current_quota_alarms)
|
||||
|
||||
# Get current current size of each tier
|
||||
previous_tiers_size = self.tiers_size
|
||||
self.tiers_size = self._get_tiers_size()
|
||||
|
||||
# Make sure any removed tiers have the alarms cleared
|
||||
for t in (set(previous_tiers_size)-set(self.tiers_size)):
|
||||
self._clear_fault(fm_constants.FM_ALARM_ID_STORAGE_CEPH_FREE_SPACE,
|
||||
"{0}.tier={1}".format(
|
||||
self.service.entity_instance_id,
|
||||
t[:-len(constants.CEPH_CRUSH_TIER_SUFFIX)]))
|
||||
|
||||
# Check the quotas on each tier
|
||||
for tier in self.tiers_size:
|
||||
# Extract the tier name from the crush equivalent
|
||||
tier_name = tier[:-len(constants.CEPH_CRUSH_TIER_SUFFIX)]
|
||||
|
||||
if self.tiers_size[tier] == 0:
|
||||
LOG.info(_LI("'%s' tier cluster size not yet available")
|
||||
% tier_name)
|
||||
continue
|
||||
|
||||
pools_quota_sum = 0
|
||||
if tier == self.primary_tier_name:
|
||||
for pool in constants.CEPH_POOLS:
|
||||
if (pool['pool_name'] ==
|
||||
constants.CEPH_POOL_OBJECT_GATEWAY_NAME_JEWEL or
|
||||
pool['pool_name'] ==
|
||||
constants.CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER):
|
||||
object_pool_name = self._get_object_pool_name()
|
||||
if object_pool_name is None:
|
||||
LOG.error("Rados gateway object data pool does "
|
||||
"not exist.")
|
||||
else:
|
||||
pools_quota_sum += \
|
||||
self._get_osd_pool_quota(object_pool_name)
|
||||
else:
|
||||
pools_quota_sum += self._get_osd_pool_quota(
|
||||
pool['pool_name'])
|
||||
else:
|
||||
for pool in constants.SB_TIER_CEPH_POOLS:
|
||||
pool_name = "{0}-{1}".format(pool['pool_name'], tier_name)
|
||||
pools_quota_sum += self._get_osd_pool_quota(pool_name)
|
||||
|
||||
# Currently, there is only one pool on the addtional tier(s),
|
||||
# therefore allow a quota of 0
|
||||
if (pools_quota_sum != self.tiers_size[tier] and
|
||||
pools_quota_sum != 0):
|
||||
self._report_fault(
|
||||
{'tier_name': tier_name,
|
||||
'tier_eid': "{0}.tier={1}".format(
|
||||
self.service.entity_instance_id,
|
||||
tier_name)},
|
||||
fm_constants.FM_ALARM_ID_STORAGE_CEPH_FREE_SPACE)
|
||||
else:
|
||||
self._clear_fault(
|
||||
fm_constants.FM_ALARM_ID_STORAGE_CEPH_FREE_SPACE,
|
||||
"{0}.tier={1}".format(self.service.entity_instance_id,
|
||||
tier_name))
|
||||
|
||||
# CEPH HELPERS
|
||||
|
||||
def _get_fsid(self):
|
||||
try:
|
||||
response, fsid = self.service.ceph_api.fsid(
|
||||
body='text', timeout=30)
|
||||
except IOError as e:
|
||||
LOG.warning(_LW("ceph_api.fsid failed: %s") % str(e.message))
|
||||
self.cluster_is_up = False
|
||||
return None
|
||||
|
||||
if not response.ok:
|
||||
LOG.warning(_LW("Get fsid failed: %s") % response.reason)
|
||||
self.cluster_is_up = False
|
||||
return None
|
||||
|
||||
self.cluster_is_up = True
|
||||
return fsid.strip()
|
||||
|
||||
def _get_health(self):
|
||||
try:
|
||||
# we use text since it has all info
|
||||
response, body = self.service.ceph_api.health(
|
||||
body='text', timeout=30)
|
||||
except IOError as e:
|
||||
LOG.warning(_LW("ceph_api.health failed: %s") % str(e.message))
|
||||
self.cluster_is_up = False
|
||||
return {'health': constants.CEPH_HEALTH_DOWN,
|
||||
'detail': 'Ceph cluster is down.'}
|
||||
|
||||
if not response.ok:
|
||||
LOG.warning(_LW("CEPH health check failed: %s") % response.reason)
|
||||
health_info = [constants.CEPH_HEALTH_DOWN, response.reason]
|
||||
self.cluster_is_up = False
|
||||
else:
|
||||
health_info = body.split(' ', 1)
|
||||
self.cluster_is_up = True
|
||||
|
||||
health = health_info[0]
|
||||
|
||||
if len(health_info) > 1:
|
||||
detail = health_info[1]
|
||||
else:
|
||||
detail = health_info[0]
|
||||
|
||||
return {'health': health.strip(),
|
||||
'detail': detail.strip()}
|
||||
|
||||
def _get_object_pool_name(self):
|
||||
if self.known_object_pool_name is None:
|
||||
response, body = self.service.ceph_api.osd_pool_get(
|
||||
constants.CEPH_POOL_OBJECT_GATEWAY_NAME_JEWEL,
|
||||
"pg_num",
|
||||
body='json')
|
||||
|
||||
if response.ok:
|
||||
self.known_object_pool_name = \
|
||||
constants.CEPH_POOL_OBJECT_GATEWAY_NAME_JEWEL
|
||||
return self.known_object_pool_name
|
||||
|
||||
response, body = self.service.ceph_api.osd_pool_get(
|
||||
constants.CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER,
|
||||
"pg_num",
|
||||
body='json')
|
||||
|
||||
if response.ok:
|
||||
self.known_object_pool_name = \
|
||||
constants.CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER
|
||||
return self.known_object_pool_name
|
||||
|
||||
return self.known_object_pool_name
|
||||
|
||||
def _get_osd_pool_quota(self, pool_name):
|
||||
try:
|
||||
resp, quota = self.service.ceph_api.osd_get_pool_quota(
|
||||
pool_name, body='json')
|
||||
except IOError:
|
||||
return 0
|
||||
|
||||
if not resp.ok:
|
||||
LOG.error(_LE("Getting the quota for "
|
||||
"%(name)s pool failed:%(reason)s)") %
|
||||
{"name": pool_name, "reason": resp.reason})
|
||||
return 0
|
||||
else:
|
||||
try:
|
||||
quota_gib = int(quota["output"]["quota_max_bytes"])/(1024**3)
|
||||
return quota_gib
|
||||
except IOError:
|
||||
return 0
|
||||
|
||||
# we have two root nodes 'cache-tier' and 'storage-tier'
|
||||
# to calculate the space that is used by the pools, we must only
|
||||
# use 'storage-tier'
|
||||
# this function determines if a certain node is under a certain
|
||||
# tree
|
||||
def host_is_in_root(self, search_tree, node, root_name):
|
||||
if node['type'] == 'root':
|
||||
if node['name'] == root_name:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
return self.host_is_in_root(search_tree,
|
||||
search_tree[node['parent']],
|
||||
root_name)
|
||||
|
||||
# The information received from ceph is not properly
|
||||
# structured for efficient parsing and searching, so
|
||||
# it must be processed and transformed into a more
|
||||
# structured form.
|
||||
#
|
||||
# Input received from ceph is an array of nodes with the
|
||||
# following structure:
|
||||
# [{'id':<node_id>, 'children':<array_of_children_ids>, ....},
|
||||
# ...]
|
||||
#
|
||||
# We process this array and transform it into a dictionary
|
||||
# (for efficient access) The transformed "search tree" is a
|
||||
# dictionary with the following structure:
|
||||
# {<node_id> : {'children':<array_of_children_ids>}
|
||||
def _get_tiers_size(self):
|
||||
try:
|
||||
resp, body = self.service.ceph_api.osd_df(
|
||||
body='json',
|
||||
output_method='tree')
|
||||
except IOError:
|
||||
return 0
|
||||
if not resp.ok:
|
||||
LOG.error(_LE("Getting the cluster usage "
|
||||
"information failed: %(reason)s - "
|
||||
"%(body)s") % {"reason": resp.reason,
|
||||
"body": body})
|
||||
return {}
|
||||
|
||||
# A node is a crushmap element: root, chassis, host, osd. Create a
|
||||
# dictionary for the nodes with the key as the id used for efficient
|
||||
# searching through nodes.
|
||||
#
|
||||
# For example: storage-0's node has one child node => OSD 0
|
||||
# {
|
||||
# "id": -4,
|
||||
# "name": "storage-0",
|
||||
# "type": "host",
|
||||
# "type_id": 1,
|
||||
# "reweight": -1.000000,
|
||||
# "kb": 51354096,
|
||||
# "kb_used": 1510348,
|
||||
# "kb_avail": 49843748,
|
||||
# "utilization": 2.941047,
|
||||
# "var": 1.480470,
|
||||
# "pgs": 0,
|
||||
# "children": [
|
||||
# 0
|
||||
# ]
|
||||
# },
|
||||
search_tree = {}
|
||||
for node in body['output']['nodes']:
|
||||
search_tree[node['id']] = node
|
||||
|
||||
# Extract the tiers as we will return a dict for the size of each tier
|
||||
tiers = {k: v for k, v in search_tree.items() if v['type'] == 'root'}
|
||||
|
||||
# For each tier, traverse the heirarchy from the root->chassis->host.
|
||||
# Sum the host sizes to determine the overall size of the tier
|
||||
tier_sizes = {}
|
||||
for tier in tiers.values():
|
||||
tier_size = 0
|
||||
for chassis_id in tier['children']:
|
||||
chassis_size = 0
|
||||
chassis = search_tree[chassis_id]
|
||||
for host_id in chassis['children']:
|
||||
host = search_tree[host_id]
|
||||
if (chassis_size == 0 or
|
||||
chassis_size > host['kb']):
|
||||
chassis_size = host['kb']
|
||||
tier_size += chassis_size/(1024 ** 2)
|
||||
tier_sizes[tier['name']] = tier_size
|
||||
|
||||
return tier_sizes
|
||||
|
||||
# ALARM HELPERS
|
||||
|
||||
@staticmethod
|
||||
def _check_storage_group(osd_tree, group_id,
|
||||
hosts, osds, fn_report_alarm):
|
||||
reasons = set()
|
||||
degraded_hosts = set()
|
||||
severity = fm_constants.FM_ALARM_SEVERITY_CRITICAL
|
||||
for host_id in hosts:
|
||||
if len(osds[host_id]) == 0:
|
||||
reasons.add(constants.ALARM_REASON_NO_OSD)
|
||||
degraded_hosts.add(host_id)
|
||||
else:
|
||||
for osd_id in osds[host_id]:
|
||||
if osd_tree[osd_id]['status'] == 'up':
|
||||
if osd_tree[osd_id]['reweight'] == 0.0:
|
||||
reasons.add(constants.ALARM_REASON_OSDS_OUT)
|
||||
degraded_hosts.add(host_id)
|
||||
else:
|
||||
severity = fm_constants.FM_ALARM_SEVERITY_MAJOR
|
||||
elif osd_tree[osd_id]['status'] == 'down':
|
||||
reasons.add(constants.ALARM_REASON_OSDS_DOWN)
|
||||
degraded_hosts.add(host_id)
|
||||
if constants.ALARM_REASON_OSDS_OUT in reasons \
|
||||
and constants.ALARM_REASON_OSDS_DOWN in reasons:
|
||||
reasons.add(constants.ALARM_REASON_OSDS_DOWN_OUT)
|
||||
reasons.remove(constants.ALARM_REASON_OSDS_OUT)
|
||||
if constants.ALARM_REASON_OSDS_DOWN in reasons \
|
||||
and constants.ALARM_REASON_OSDS_DOWN_OUT in reasons:
|
||||
reasons.remove(constants.ALARM_REASON_OSDS_DOWN)
|
||||
reason = "/".join(list(reasons))
|
||||
if severity == fm_constants.FM_ALARM_SEVERITY_CRITICAL:
|
||||
reason = "{} {}: {}".format(
|
||||
fm_constants.ALARM_CRITICAL_REPLICATION,
|
||||
osd_tree[group_id]['name'],
|
||||
reason)
|
||||
elif severity == fm_constants.FM_ALARM_SEVERITY_MAJOR:
|
||||
reason = "{} {}: {}".format(
|
||||
fm_constants.ALARM_MAJOR_REPLICATION,
|
||||
osd_tree[group_id]['name'],
|
||||
reason)
|
||||
if len(degraded_hosts) == 0:
|
||||
if len(hosts) < 2:
|
||||
fn_report_alarm(
|
||||
osd_tree[group_id]['name'],
|
||||
"{} {}: {}".format(
|
||||
fm_constants.ALARM_MAJOR_REPLICATION,
|
||||
osd_tree[group_id]['name'],
|
||||
constants.ALARM_REASON_PEER_HOST_DOWN),
|
||||
fm_constants.FM_ALARM_SEVERITY_MAJOR)
|
||||
elif len(degraded_hosts) == 1:
|
||||
fn_report_alarm(
|
||||
"{}.host={}".format(
|
||||
osd_tree[group_id]['name'],
|
||||
osd_tree[list(degraded_hosts)[0]]['name']),
|
||||
reason, severity)
|
||||
else:
|
||||
fn_report_alarm(
|
||||
osd_tree[group_id]['name'],
|
||||
reason, severity)
|
||||
|
||||
def _check_storage_tier(self, osd_tree, tier_name, fn_report_alarm):
|
||||
for tier_id in osd_tree:
|
||||
if osd_tree[tier_id]['type'] != 'root':
|
||||
continue
|
||||
if osd_tree[tier_id]['name'] != tier_name:
|
||||
continue
|
||||
for group_id in osd_tree[tier_id]['children']:
|
||||
if osd_tree[group_id]['type'] != 'chassis':
|
||||
continue
|
||||
if not osd_tree[group_id]['name'].startswith('group-'):
|
||||
continue
|
||||
hosts = []
|
||||
osds = {}
|
||||
for host_id in osd_tree[group_id]['children']:
|
||||
if osd_tree[host_id]['type'] != 'host':
|
||||
continue
|
||||
hosts.append(host_id)
|
||||
osds[host_id] = []
|
||||
for osd_id in osd_tree[host_id]['children']:
|
||||
if osd_tree[osd_id]['type'] == 'osd':
|
||||
osds[host_id].append(osd_id)
|
||||
self._check_storage_group(osd_tree, group_id, hosts,
|
||||
osds, fn_report_alarm)
|
||||
break
|
||||
|
||||
def _current_health_alarm_equals(self, reason, severity):
|
||||
if not self.current_health_alarm:
|
||||
return False
|
||||
if getattr(self.current_health_alarm, 'severity', None) != severity:
|
||||
return False
|
||||
if getattr(self.current_health_alarm, 'reason_text', None) != reason:
|
||||
return False
|
||||
return True
|
||||
|
||||
def _report_alarm_osds_health(self):
|
||||
response, osd_tree = self.service.ceph_api.osd_tree(body='json')
|
||||
if not response.ok:
|
||||
LOG.error(_LE("Failed to retrieve Ceph OSD tree: "
|
||||
"status_code: %(status_code)s, reason: %(reason)s") %
|
||||
{"status_code": response.status_code,
|
||||
"reason": response.reason})
|
||||
return
|
||||
osd_tree = dict([(n['id'], n) for n in osd_tree['output']['nodes']])
|
||||
alarms = []
|
||||
|
||||
self._check_storage_tier(osd_tree, "storage-tier",
|
||||
lambda *args: alarms.append(args))
|
||||
|
||||
old_alarms = {}
|
||||
for alarm_id in [
|
||||
fm_constants.FM_ALARM_ID_STORAGE_CEPH_MAJOR,
|
||||
fm_constants.FM_ALARM_ID_STORAGE_CEPH_CRITICAL]:
|
||||
alarm_list = self.service.fm_api.get_faults_by_id(alarm_id)
|
||||
if not alarm_list:
|
||||
continue
|
||||
for alarm in alarm_list:
|
||||
if alarm.entity_instance_id not in old_alarms:
|
||||
old_alarms[alarm.entity_instance_id] = []
|
||||
old_alarms[alarm.entity_instance_id].append(
|
||||
(alarm.alarm_id, alarm.reason_text))
|
||||
|
||||
for peer_group, reason, severity in alarms:
|
||||
if self._current_health_alarm_equals(reason, severity):
|
||||
continue
|
||||
alarm_critical_major = fm_constants.FM_ALARM_ID_STORAGE_CEPH_MAJOR
|
||||
if severity == fm_constants.FM_ALARM_SEVERITY_CRITICAL:
|
||||
alarm_critical_major = (
|
||||
fm_constants.FM_ALARM_ID_STORAGE_CEPH_CRITICAL)
|
||||
entity_instance_id = (
|
||||
self.service.entity_instance_id + '.peergroup=' + peer_group)
|
||||
alarm_already_exists = False
|
||||
if entity_instance_id in old_alarms:
|
||||
for alarm_id, old_reason in old_alarms[entity_instance_id]:
|
||||
if (reason == old_reason and
|
||||
alarm_id == alarm_critical_major):
|
||||
# if the alarm is exactly the same, we don't need
|
||||
# to recreate it
|
||||
old_alarms[entity_instance_id].remove(
|
||||
(alarm_id, old_reason))
|
||||
alarm_already_exists = True
|
||||
elif (alarm_id == alarm_critical_major):
|
||||
# if we change just the reason, then we just remove the
|
||||
# alarm from the list so we don't remove it at the
|
||||
# end of the function
|
||||
old_alarms[entity_instance_id].remove(
|
||||
(alarm_id, old_reason))
|
||||
|
||||
if (len(old_alarms[entity_instance_id]) == 0):
|
||||
del old_alarms[entity_instance_id]
|
||||
|
||||
# in case the alarm is exactly the same, we skip the alarm set
|
||||
if alarm_already_exists is True:
|
||||
continue
|
||||
major_repair_action = constants.REPAIR_ACTION_MAJOR_CRITICAL_ALARM
|
||||
fault = fm_api.Fault(
|
||||
alarm_id=alarm_critical_major,
|
||||
alarm_type=fm_constants.FM_ALARM_TYPE_4,
|
||||
alarm_state=fm_constants.FM_ALARM_STATE_SET,
|
||||
entity_type_id=fm_constants.FM_ENTITY_TYPE_CLUSTER,
|
||||
entity_instance_id=entity_instance_id,
|
||||
severity=severity,
|
||||
reason_text=reason,
|
||||
probable_cause=fm_constants.ALARM_PROBABLE_CAUSE_15,
|
||||
proposed_repair_action=major_repair_action,
|
||||
service_affecting=constants.SERVICE_AFFECTING['HEALTH_WARN'])
|
||||
alarm_uuid = self.service.fm_api.set_fault(fault)
|
||||
if alarm_uuid:
|
||||
LOG.info(_LI(
|
||||
"Created storage alarm %(alarm_uuid)s - "
|
||||
"severity: %(severity)s, reason: %(reason)s, "
|
||||
"service_affecting: %(service_affecting)s") % {
|
||||
"alarm_uuid": str(alarm_uuid),
|
||||
"severity": str(severity),
|
||||
"reason": reason,
|
||||
"service_affecting": str(
|
||||
constants.SERVICE_AFFECTING['HEALTH_WARN'])})
|
||||
else:
|
||||
LOG.error(_LE(
|
||||
"Failed to create storage alarm - "
|
||||
"severity: %(severity)s, reason: %(reason)s, "
|
||||
"service_affecting: %(service_affecting)s") % {
|
||||
"severity": str(severity),
|
||||
"reason": reason,
|
||||
"service_affecting": str(
|
||||
constants.SERVICE_AFFECTING['HEALTH_WARN'])})
|
||||
|
||||
for entity_instance_id in old_alarms:
|
||||
for alarm_id, old_reason in old_alarms[entity_instance_id]:
|
||||
self.service.fm_api.clear_fault(alarm_id, entity_instance_id)
|
||||
|
||||
@staticmethod
|
||||
def _parse_reason(health):
|
||||
""" Parse reason strings received from Ceph """
|
||||
if health['health'] in constants.CEPH_STATUS_CUSTOM:
|
||||
# Don't parse reason messages that we added
|
||||
return "Storage Alarm Condition: %(health)s. %(detail)s" % health
|
||||
|
||||
reasons_lst = health['detail'].split(';')
|
||||
|
||||
parsed_reasons_text = ""
|
||||
|
||||
# Check if PGs have issues - we can't safely store the entire message
|
||||
# as it tends to be long
|
||||
for reason in reasons_lst:
|
||||
if "pgs" in reason:
|
||||
parsed_reasons_text += "PGs are degraded/stuck or undersized"
|
||||
break
|
||||
|
||||
# Extract recovery status
|
||||
parsed_reasons = [r.strip() for r in reasons_lst if 'recovery' in r]
|
||||
if parsed_reasons:
|
||||
parsed_reasons_text += ";" + ";".join(parsed_reasons)
|
||||
|
||||
# We need to keep the most important parts of the messages when storing
|
||||
# them to fm alarms, therefore text between [] brackets is truncated if
|
||||
# max size is reached.
|
||||
|
||||
# Add brackets, if needed
|
||||
if len(parsed_reasons_text):
|
||||
lbracket = " ["
|
||||
rbracket = "]"
|
||||
else:
|
||||
lbracket = ""
|
||||
rbracket = ""
|
||||
|
||||
msg = {"head": "Storage Alarm Condition: ",
|
||||
"tail": ". Please check 'ceph -s' for more details."}
|
||||
max_size = constants.FM_ALARM_REASON_MAX_SIZE - \
|
||||
len(msg["head"]) - len(msg["tail"])
|
||||
|
||||
return (
|
||||
msg['head'] +
|
||||
(health['health'] + lbracket + parsed_reasons_text)[:max_size-1] +
|
||||
rbracket + msg['tail'])
|
||||
|
||||
def _report_fault(self, health, alarm_id):
|
||||
if alarm_id == fm_constants.FM_ALARM_ID_STORAGE_CEPH:
|
||||
new_severity = constants.SEVERITY[health['health']]
|
||||
new_reason_text = self._parse_reason(health)
|
||||
new_service_affecting = \
|
||||
constants.SERVICE_AFFECTING[health['health']]
|
||||
|
||||
# Raise or update alarm if necessary
|
||||
if ((not self.current_health_alarm) or
|
||||
(self.current_health_alarm.__dict__['severity'] !=
|
||||
new_severity) or
|
||||
(self.current_health_alarm.__dict__['reason_text'] !=
|
||||
new_reason_text) or
|
||||
(self.current_health_alarm.__dict__['service_affecting'] !=
|
||||
str(new_service_affecting))):
|
||||
|
||||
fault = fm_api.Fault(
|
||||
alarm_id=fm_constants.FM_ALARM_ID_STORAGE_CEPH,
|
||||
alarm_type=fm_constants.FM_ALARM_TYPE_4,
|
||||
alarm_state=fm_constants.FM_ALARM_STATE_SET,
|
||||
entity_type_id=fm_constants.FM_ENTITY_TYPE_CLUSTER,
|
||||
entity_instance_id=self.service.entity_instance_id,
|
||||
severity=new_severity,
|
||||
reason_text=new_reason_text,
|
||||
probable_cause=fm_constants.ALARM_PROBABLE_CAUSE_15,
|
||||
proposed_repair_action=constants.REPAIR_ACTION,
|
||||
service_affecting=new_service_affecting)
|
||||
|
||||
alarm_uuid = self.service.fm_api.set_fault(fault)
|
||||
if alarm_uuid:
|
||||
LOG.info(_LI(
|
||||
"Created storage alarm %(alarm_uuid)s - "
|
||||
"severity: %(severity)s, reason: %(reason)s, "
|
||||
"service_affecting: %(service_affecting)s") % {
|
||||
"alarm_uuid": alarm_uuid,
|
||||
"severity": new_severity,
|
||||
"reason": new_reason_text,
|
||||
"service_affecting": new_service_affecting})
|
||||
else:
|
||||
LOG.error(_LE(
|
||||
"Failed to create storage alarm - "
|
||||
"severity: %(severity)s, reason: %(reason)s "
|
||||
"service_affecting: %(service_affecting)s") % {
|
||||
"severity": new_severity,
|
||||
"reason": new_reason_text,
|
||||
"service_affecting": new_service_affecting})
|
||||
|
||||
# Log detailed reason for later analysis
|
||||
if (self.current_ceph_health != health['health'] or
|
||||
self.detailed_health_reason != health['detail']):
|
||||
LOG.info(_LI("Ceph status changed: %(health)s "
|
||||
"detailed reason: %(detail)s") % health)
|
||||
self.current_ceph_health = health['health']
|
||||
self.detailed_health_reason = health['detail']
|
||||
|
||||
elif (alarm_id == fm_constants.FM_ALARM_ID_STORAGE_CEPH_FREE_SPACE and
|
||||
not health['tier_eid'] in self.current_quota_alarms):
|
||||
|
||||
quota_reason_text = ("Quota/Space mismatch for the %s tier. The "
|
||||
"sum of Ceph pool quotas does not match the "
|
||||
"tier size." % health['tier_name'])
|
||||
fault = fm_api.Fault(
|
||||
alarm_id=fm_constants.FM_ALARM_ID_STORAGE_CEPH_FREE_SPACE,
|
||||
alarm_state=fm_constants.FM_ALARM_STATE_SET,
|
||||
entity_type_id=fm_constants.FM_ENTITY_TYPE_CLUSTER,
|
||||
entity_instance_id=health['tier_eid'],
|
||||
severity=fm_constants.FM_ALARM_SEVERITY_MINOR,
|
||||
reason_text=quota_reason_text,
|
||||
alarm_type=fm_constants.FM_ALARM_TYPE_7,
|
||||
probable_cause=fm_constants.ALARM_PROBABLE_CAUSE_75,
|
||||
proposed_repair_action=(
|
||||
"Update ceph storage pool quotas to use all available "
|
||||
"cluster space for the %s tier." % health['tier_name']),
|
||||
service_affecting=False)
|
||||
|
||||
alarm_uuid = self.service.fm_api.set_fault(fault)
|
||||
if alarm_uuid:
|
||||
LOG.info(_LI(
|
||||
"Created storage quota storage alarm %(alarm_uuid)s. "
|
||||
"Reason: %(reason)s") % {
|
||||
"alarm_uuid": alarm_uuid, "reason": quota_reason_text})
|
||||
else:
|
||||
LOG.error(_LE("Failed to create quota "
|
||||
"storage alarm. Reason: %s") % quota_reason_text)
|
||||
|
||||
def _clear_fault(self, alarm_id, entity_instance_id=None):
|
||||
# Only clear alarm if there is one already raised
|
||||
if (alarm_id == fm_constants.FM_ALARM_ID_STORAGE_CEPH and
|
||||
self.current_health_alarm):
|
||||
LOG.info(_LI("Clearing health alarm"))
|
||||
self.service.fm_api.clear_fault(
|
||||
fm_constants.FM_ALARM_ID_STORAGE_CEPH,
|
||||
self.service.entity_instance_id)
|
||||
elif (alarm_id == fm_constants.FM_ALARM_ID_STORAGE_CEPH_FREE_SPACE and
|
||||
entity_instance_id in self.current_quota_alarms):
|
||||
LOG.info(_LI("Clearing quota alarm with entity_instance_id %s")
|
||||
% entity_instance_id)
|
||||
self.service.fm_api.clear_fault(
|
||||
fm_constants.FM_ALARM_ID_STORAGE_CEPH_FREE_SPACE,
|
||||
entity_instance_id)
|
||||
|
||||
def clear_critical_alarm(self, group_name):
|
||||
alarm_list = self.service.fm_api.get_faults_by_id(
|
||||
fm_constants.FM_ALARM_ID_STORAGE_CEPH_CRITICAL)
|
||||
if alarm_list:
|
||||
for alarm in range(len(alarm_list)):
|
||||
group_id = alarm_list[alarm].entity_instance_id.find("group-")
|
||||
group_instance_name = (
|
||||
"group-" +
|
||||
alarm_list[alarm].entity_instance_id[group_id + 6])
|
||||
if group_name == group_instance_name:
|
||||
self.service.fm_api.clear_fault(
|
||||
fm_constants.FM_ALARM_ID_STORAGE_CEPH_CRITICAL,
|
||||
alarm_list[alarm].entity_instance_id)
|
||||
|
||||
def clear_all_major_critical(self, group_name=None):
|
||||
# clear major alarms
|
||||
alarm_list = self.service.fm_api.get_faults_by_id(
|
||||
fm_constants.FM_ALARM_ID_STORAGE_CEPH_MAJOR)
|
||||
if alarm_list:
|
||||
for alarm in range(len(alarm_list)):
|
||||
if group_name is not None:
|
||||
group_id = (
|
||||
alarm_list[alarm].entity_instance_id.find("group-"))
|
||||
group_instance_name = (
|
||||
"group-" +
|
||||
alarm_list[alarm].entity_instance_id[group_id+6])
|
||||
if group_name == group_instance_name:
|
||||
self.service.fm_api.clear_fault(
|
||||
fm_constants.FM_ALARM_ID_STORAGE_CEPH_MAJOR,
|
||||
alarm_list[alarm].entity_instance_id)
|
||||
else:
|
||||
self.service.fm_api.clear_fault(
|
||||
fm_constants.FM_ALARM_ID_STORAGE_CEPH_MAJOR,
|
||||
alarm_list[alarm].entity_instance_id)
|
||||
# clear critical alarms
|
||||
alarm_list = self.service.fm_api.get_faults_by_id(
|
||||
fm_constants.FM_ALARM_ID_STORAGE_CEPH_CRITICAL)
|
||||
if alarm_list:
|
||||
for alarm in range(len(alarm_list)):
|
||||
if group_name is not None:
|
||||
group_id = (
|
||||
alarm_list[alarm].entity_instance_id.find("group-"))
|
||||
group_instance_name = (
|
||||
"group-" +
|
||||
alarm_list[alarm].entity_instance_id[group_id + 6])
|
||||
if group_name == group_instance_name:
|
||||
self.service.fm_api.clear_fault(
|
||||
fm_constants.FM_ALARM_ID_STORAGE_CEPH_CRITICAL,
|
||||
alarm_list[alarm].entity_instance_id)
|
||||
else:
|
||||
self.service.fm_api.clear_fault(
|
||||
fm_constants.FM_ALARM_ID_STORAGE_CEPH_CRITICAL,
|
||||
alarm_list[alarm].entity_instance_id)
|
||||
|
||||
def _get_current_alarms(self):
|
||||
""" Retrieve currently raised alarm """
|
||||
self.current_health_alarm = self.service.fm_api.get_fault(
|
||||
fm_constants.FM_ALARM_ID_STORAGE_CEPH,
|
||||
self.service.entity_instance_id)
|
||||
quota_faults = self.service.fm_api.get_faults_by_id(
|
||||
fm_constants.FM_ALARM_ID_STORAGE_CEPH_FREE_SPACE)
|
||||
if quota_faults:
|
||||
self.current_quota_alarms = [f.entity_instance_id
|
||||
for f in quota_faults]
|
||||
else:
|
||||
self.current_quota_alarms = []
|
@ -1,175 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
#
|
||||
# Copyright (c) 2016-2018 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
# https://chrigl.de/posts/2014/08/27/oslo-messaging-example.html
|
||||
# http://docs.openstack.org/developer/oslo.messaging/server.html
|
||||
|
||||
import sys
|
||||
|
||||
# noinspection PyUnresolvedReferences
|
||||
import eventlet
|
||||
# noinspection PyUnresolvedReferences
|
||||
import oslo_messaging as messaging
|
||||
# noinspection PyUnresolvedReferences
|
||||
from fm_api import fm_api
|
||||
# noinspection PyUnresolvedReferences
|
||||
from oslo_config import cfg
|
||||
# noinspection PyUnresolvedReferences
|
||||
from oslo_log import log as logging
|
||||
# noinspection PyUnresolvedReferences
|
||||
from oslo_service import service
|
||||
# noinspection PyUnresolvedReferences
|
||||
from oslo_service.periodic_task import PeriodicTasks
|
||||
# noinspection PyUnresolvedReferences
|
||||
from oslo_service import loopingcall
|
||||
|
||||
# noinspection PyUnresolvedReferences
|
||||
from cephclient import wrapper
|
||||
|
||||
from monitor import Monitor
|
||||
import exception
|
||||
import constants
|
||||
|
||||
from i18n import _LI, _LW
|
||||
from retrying import retry
|
||||
|
||||
eventlet.monkey_patch(all=True)
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts([
|
||||
cfg.StrOpt('sysinv_api_bind_ip',
|
||||
default='0.0.0.0',
|
||||
help='IP for the Ceph Manager server to bind to')])
|
||||
CONF.logging_default_format_string = (
|
||||
'%(asctime)s.%(msecs)03d %(process)d '
|
||||
'%(levelname)s %(name)s [-] %(message)s')
|
||||
logging.register_options(CONF)
|
||||
logging.setup(CONF, __name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
CONF.rpc_backend = 'rabbit'
|
||||
|
||||
|
||||
class RpcEndpoint(PeriodicTasks):
|
||||
|
||||
def __init__(self, service=None):
|
||||
self.service = service
|
||||
|
||||
def get_primary_tier_size(self, _):
|
||||
"""Get the ceph size for the primary tier.
|
||||
|
||||
returns: an int for the size (in GB) of the tier
|
||||
"""
|
||||
|
||||
tiers_size = self.service.monitor.tiers_size
|
||||
primary_tier_size = tiers_size.get(
|
||||
self.service.monitor.primary_tier_name, 0)
|
||||
LOG.debug(_LI("Ceph cluster primary tier size: %s GB") %
|
||||
str(primary_tier_size))
|
||||
return primary_tier_size
|
||||
|
||||
def get_tiers_size(self, _):
|
||||
"""Get the ceph cluster tier sizes.
|
||||
|
||||
returns: a dict of sizes (in GB) by tier name
|
||||
"""
|
||||
|
||||
tiers_size = self.service.monitor.tiers_size
|
||||
LOG.debug(_LI("Ceph cluster tiers (size in GB): %s") %
|
||||
str(tiers_size))
|
||||
return tiers_size
|
||||
|
||||
def is_cluster_up(self, _):
|
||||
"""Report if the last health check was successful.
|
||||
|
||||
This is an independent view of the cluster accessibility that can be
|
||||
used by the sysinv conductor to gate ceph API calls which would timeout
|
||||
and potentially block other operations.
|
||||
|
||||
This view is only updated at the rate the monitor checks for a cluster
|
||||
uuid or a health check (CEPH_HEALTH_CHECK_INTERVAL)
|
||||
|
||||
returns: boolean True if last health check was successful else False
|
||||
"""
|
||||
return self.service.monitor.cluster_is_up
|
||||
|
||||
|
||||
class SysinvConductorUpgradeApi(object):
|
||||
def __init__(self):
|
||||
self.sysinv_conductor = None
|
||||
super(SysinvConductorUpgradeApi, self).__init__()
|
||||
|
||||
def get_software_upgrade_status(self):
|
||||
LOG.info(_LI("Getting software upgrade status from sysinv"))
|
||||
cctxt = self.sysinv_conductor.prepare(timeout=2)
|
||||
upgrade = cctxt.call({}, 'get_software_upgrade_status')
|
||||
LOG.info(_LI("Software upgrade status: %s") % str(upgrade))
|
||||
return upgrade
|
||||
|
||||
@retry(wait_fixed=1000,
|
||||
retry_on_exception=lambda e:
|
||||
LOG.warn(_LW(
|
||||
"Getting software upgrade status failed "
|
||||
"with: %s. Retrying... ") % str(e)) or True)
|
||||
def retry_get_software_upgrade_status(self):
|
||||
return self.get_software_upgrade_status()
|
||||
|
||||
|
||||
class Service(SysinvConductorUpgradeApi, service.Service):
|
||||
|
||||
def __init__(self, conf):
|
||||
super(Service, self).__init__()
|
||||
self.conf = conf
|
||||
self.rpc_server = None
|
||||
self.sysinv_conductor = None
|
||||
self.ceph_api = None
|
||||
self.entity_instance_id = ''
|
||||
self.fm_api = fm_api.FaultAPIs()
|
||||
self.monitor = Monitor(self)
|
||||
self.config = None
|
||||
self.config_desired = None
|
||||
self.config_applied = None
|
||||
|
||||
def start(self):
|
||||
super(Service, self).start()
|
||||
transport = messaging.get_transport(self.conf)
|
||||
self.sysinv_conductor = messaging.RPCClient(
|
||||
transport,
|
||||
messaging.Target(
|
||||
topic=constants.SYSINV_CONDUCTOR_TOPIC))
|
||||
|
||||
self.ceph_api = wrapper.CephWrapper(
|
||||
endpoint='http://localhost:5001/api/v0.1/')
|
||||
|
||||
# Get initial config from sysinv and send it to
|
||||
# services that need it before starting them
|
||||
self.rpc_server = messaging.get_rpc_server(
|
||||
transport,
|
||||
messaging.Target(topic=constants.CEPH_MANAGER_TOPIC,
|
||||
server=self.conf.sysinv_api_bind_ip),
|
||||
[RpcEndpoint(self)],
|
||||
executor='eventlet')
|
||||
self.rpc_server.start()
|
||||
eventlet.spawn_n(self.monitor.run)
|
||||
|
||||
def stop(self):
|
||||
try:
|
||||
self.rpc_server.stop()
|
||||
self.rpc_server.wait()
|
||||
except Exception:
|
||||
pass
|
||||
super(Service, self).stop()
|
||||
|
||||
|
||||
def run_service():
|
||||
CONF(sys.argv[1:])
|
||||
logging.setup(CONF, "ceph-manager")
|
||||
launcher = service.launch(CONF, Service(CONF), workers=1)
|
||||
launcher.wait()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run_service()
|
@ -1,19 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright (c) 2013-2014, 2016 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
|
||||
import setuptools
|
||||
|
||||
setuptools.setup(
|
||||
name='ceph_manager',
|
||||
version='1.0.0',
|
||||
description='CEPH manager',
|
||||
license='Apache-2.0',
|
||||
packages=['ceph_manager'],
|
||||
entry_points={
|
||||
}
|
||||
)
|
@ -1,10 +0,0 @@
|
||||
# The order of packages is significant, because pip processes them in the order
|
||||
# of appearance. Changing the order has an impact on the overall integration
|
||||
# process, which may cause wedges in the gate later.
|
||||
|
||||
mock
|
||||
flake8
|
||||
eventlet
|
||||
pytest
|
||||
oslo.log
|
||||
oslo.i18n
|
@ -1,29 +0,0 @@
|
||||
# adapted from glance tox.ini
|
||||
|
||||
[tox]
|
||||
minversion = 1.6
|
||||
envlist = py27,pep8
|
||||
skipsdist = True
|
||||
# tox does not work if the path to the workdir is too long, so move it to /tmp
|
||||
toxworkdir = /tmp/{env:USER}_ceph_manager_tox
|
||||
|
||||
[testenv]
|
||||
setenv = VIRTUAL_ENV={envdir}
|
||||
usedevelop = True
|
||||
install_command = pip install --no-use-wheel -U --force-reinstall {opts} {packages}
|
||||
deps = -r{toxinidir}/test-requirements.txt
|
||||
commands = py.test {posargs}
|
||||
whitelist_externals = bash
|
||||
passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY
|
||||
|
||||
[testenv:py27]
|
||||
basepython = python2.7
|
||||
setenv =
|
||||
PYTHONPATH={toxinidir}/../../../../sysinv/recipes-common/sysinv/sysinv:{toxinidir}/../../../../config/recipes-common/tsconfig/tsconfig
|
||||
|
||||
[testenv:pep8]
|
||||
commands =
|
||||
flake8 {posargs}
|
||||
|
||||
[flake8]
|
||||
exclude = .venv,.git,.tox,dist,doc,etc,*glance/locale*,*lib/python*,*egg,build
|
@ -1,11 +0,0 @@
|
||||
/var/log/ceph-manager.log {
|
||||
nodateext
|
||||
size 10M
|
||||
start 1
|
||||
rotate 10
|
||||
missingok
|
||||
notifempty
|
||||
compress
|
||||
delaycompress
|
||||
copytruncate
|
||||
}
|
@ -1,17 +0,0 @@
|
||||
[Unit]
|
||||
Description=Handle Ceph API calls and provide status updates via alarms
|
||||
After=ceph.target
|
||||
|
||||
[Service]
|
||||
Type=forking
|
||||
Restart=no
|
||||
KillMode=process
|
||||
RemainAfterExit=yes
|
||||
ExecStart=/etc/rc.d/init.d/ceph-manager start
|
||||
ExecStop=/etc/rc.d/init.d/ceph-manager stop
|
||||
ExecReload=/etc/rc.d/init.d/ceph-manager reload
|
||||
PIDFile=/var/run/ceph/ceph-manager.pid
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
@ -1,17 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright (c) 2016 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
|
||||
import sys
|
||||
|
||||
try:
|
||||
from ceph_manager.server import run_service
|
||||
except EnvironmentError as e:
|
||||
print >> sys.stderr, "Error importing ceph_manager: ", str(e)
|
||||
sys.exit(1)
|
||||
|
||||
run_service()
|
@ -1,103 +0,0 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# Copyright (c) 2013-2014, 2016 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
|
||||
### BEGIN INIT INFO
|
||||
# Provides: ceph-manager
|
||||
# Required-Start: $ceph
|
||||
# Required-Stop: $ceph
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: Daemon for polling ceph status
|
||||
# Description: Daemon for polling ceph status
|
||||
### END INIT INFO
|
||||
|
||||
DESC="ceph-manager"
|
||||
DAEMON="/usr/bin/ceph-manager"
|
||||
RUNDIR="/var/run/ceph"
|
||||
PIDFILE=$RUNDIR/$DESC.pid
|
||||
|
||||
CONFIGFILE="/etc/sysinv/sysinv.conf"
|
||||
LOGFILE="/var/log/ceph-manager.log"
|
||||
|
||||
start()
|
||||
{
|
||||
if [ -e $PIDFILE ]; then
|
||||
PIDDIR=/prod/$(cat $PIDFILE)
|
||||
if [ -d ${PIDFILE} ]; then
|
||||
echo "$DESC already running."
|
||||
exit 0
|
||||
else
|
||||
echo "Removing stale PID file $PIDFILE"
|
||||
rm -f $PIDFILE
|
||||
fi
|
||||
fi
|
||||
|
||||
echo -n "Starting $DESC..."
|
||||
mkdir -p $RUNDIR
|
||||
start-stop-daemon --start --quiet \
|
||||
--pidfile ${PIDFILE} --exec ${DAEMON} \
|
||||
--make-pidfile --background \
|
||||
-- --log-file=$LOGFILE --config-file=$CONFIGFILE
|
||||
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "done."
|
||||
else
|
||||
echo "failed."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
stop()
|
||||
{
|
||||
echo -n "Stopping $DESC..."
|
||||
start-stop-daemon --stop --quiet --pidfile $PIDFILE --retry 60
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "done."
|
||||
else
|
||||
echo "failed."
|
||||
fi
|
||||
rm -f $PIDFILE
|
||||
}
|
||||
|
||||
status()
|
||||
{
|
||||
pid=`cat $PIDFILE 2>/dev/null`
|
||||
if [ -n "$pid" ]; then
|
||||
if ps -p $pid &> /dev/null ; then
|
||||
echo "$DESC is running"
|
||||
exit 0
|
||||
else
|
||||
echo "$DESC is not running but has pid file"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
echo "$DESC is not running"
|
||||
exit 3
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
start
|
||||
;;
|
||||
stop)
|
||||
stop
|
||||
;;
|
||||
restart|force-reload|reload)
|
||||
stop
|
||||
start
|
||||
;;
|
||||
status)
|
||||
status
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $0 {start|stop|force-reload|restart|reload|status}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
exit 0
|
@ -1,5 +0,0 @@
|
||||
SRC_DIR="$CGCS_BASE/git/ceph"
|
||||
TIS_BASE_SRCREV=3f07f7ff1a5c7bfa8d0de12c966594d5fb7cf4ec
|
||||
TIS_PATCH_VER=GITREVCOUNT
|
||||
BUILD_IS_BIG=40
|
||||
BUILD_IS_SLOW=26
|
@ -1 +0,0 @@
|
||||
../../../git/ceph/ceph.spec
|
@ -1,326 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2016 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
import ast
|
||||
import os
|
||||
import os.path
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
|
||||
#########
|
||||
# Utils #
|
||||
#########
|
||||
|
||||
def command(arguments, **kwargs):
|
||||
""" Execute e command and capture stdout, stderr & return code """
|
||||
process = subprocess.Popen(
|
||||
arguments,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
**kwargs)
|
||||
out, err = process.communicate()
|
||||
return out, err, process.returncode
|
||||
|
||||
|
||||
def get_input(arg, valid_keys):
|
||||
"""Convert the input to a dict and perform basic validation"""
|
||||
json_string = arg.replace("\\n", "\n")
|
||||
try:
|
||||
input_dict = ast.literal_eval(json_string)
|
||||
if not all(k in input_dict for k in valid_keys):
|
||||
return None
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
return input_dict
|
||||
|
||||
|
||||
def get_partition_uuid(dev):
|
||||
output, _, _ = command(['blkid', dev])
|
||||
try:
|
||||
return re.search('PARTUUID=\"(.+?)\"', output).group(1)
|
||||
except AttributeError:
|
||||
return None
|
||||
|
||||
|
||||
def device_path_to_device_node(device_path):
|
||||
try:
|
||||
output, _, _ = command(["udevadm", "settle", "-E", device_path])
|
||||
out, err, retcode = command(["readlink", "-f", device_path])
|
||||
out = out.rstrip()
|
||||
except Exception as e:
|
||||
return None
|
||||
|
||||
return out
|
||||
|
||||
|
||||
###########################################
|
||||
# Manage Journal Disk Partitioning Scheme #
|
||||
###########################################
|
||||
|
||||
DISK_BY_PARTUUID = "/dev/disk/by-partuuid/"
|
||||
JOURNAL_UUID='45b0969e-9b03-4f30-b4c6-b4b80ceff106' # Type of a journal partition
|
||||
|
||||
|
||||
def is_partitioning_correct(disk_path, partition_sizes):
|
||||
""" Validate the existence and size of journal partitions"""
|
||||
|
||||
# Obtain the device node from the device path.
|
||||
disk_node = device_path_to_device_node(disk_path)
|
||||
|
||||
# Check that partition table format is GPT
|
||||
output, _, _ = command(["udevadm", "settle", "-E", disk_node])
|
||||
output, _, _ = command(["parted", "-s", disk_node, "print"])
|
||||
if not re.search('Partition Table: gpt', output):
|
||||
print "Format of disk node %s is not GPT, zapping disk" % disk_node
|
||||
return False
|
||||
|
||||
# Check each partition size
|
||||
partition_index = 1
|
||||
for size in partition_sizes:
|
||||
# Check that each partition size matches the one in input
|
||||
partition_node = disk_node + str(partition_index)
|
||||
output, _, _ = command(["udevadm", "settle", "-E", partition_node])
|
||||
cmd = ["parted", "-s", partition_node, "unit", "MiB", "print"]
|
||||
output, _, _ = command(cmd)
|
||||
|
||||
regex = ("^Disk " + str(partition_node) + ":\\s*" +
|
||||
str(size) + "[\\.0]*MiB")
|
||||
if not re.search(regex, output, re.MULTILINE):
|
||||
print ("Journal partition %(node)s size is not %(size)s, "
|
||||
"zapping disk" % {"node": partition_node, "size": size})
|
||||
return False
|
||||
|
||||
partition_index += 1
|
||||
|
||||
output, _, _ = command(["udevadm", "settle", "-t", "10"])
|
||||
return True
|
||||
|
||||
|
||||
def create_partitions(disk_path, partition_sizes):
|
||||
""" Recreate partitions """
|
||||
|
||||
# Obtain the device node from the device path.
|
||||
disk_node = device_path_to_device_node(disk_path)
|
||||
|
||||
# Issue: After creating a new partition table on a device, Udev does not
|
||||
# always remove old symlinks (i.e. to previous partitions on that device).
|
||||
# Also, even if links are erased before zapping the disk, some of them will
|
||||
# be recreated even though there is no partition to back them!
|
||||
# Therefore, we have to remove the links AFTER we erase the partition table
|
||||
# Issue: DISK_BY_PARTUUID directory is not present at all if there are no
|
||||
# GPT partitions on the storage node so nothing to remove in this case
|
||||
links = []
|
||||
if os.path.isdir(DISK_BY_PARTUUID):
|
||||
links = [ os.path.join(DISK_BY_PARTUUID,l) for l in os.listdir(DISK_BY_PARTUUID)
|
||||
if os.path.islink(os.path.join(DISK_BY_PARTUUID, l)) ]
|
||||
|
||||
# Erase all partitions on current node by creating a new GPT table
|
||||
_, err, ret = command(["parted", "-s", disk_node, "mktable", "gpt"])
|
||||
if ret:
|
||||
print ("Error erasing partition table of %(node)s\n"
|
||||
"Return code: %(ret)s reason: %(reason)s" %
|
||||
{"node": disk_node, "ret": ret, "reason": err})
|
||||
exit(1)
|
||||
|
||||
# Erase old symlinks
|
||||
for l in links:
|
||||
if disk_node in os.path.realpath(l):
|
||||
os.remove(l)
|
||||
|
||||
# Create partitions in order
|
||||
used_space_mib = 1 # leave 1 MB at the beginning of the disk
|
||||
num = 1
|
||||
for size in partition_sizes:
|
||||
cmd = ['parted', '-s', disk_node, 'unit', 'mib',
|
||||
'mkpart', 'primary',
|
||||
str(used_space_mib), str(used_space_mib + size)]
|
||||
_, err, ret = command(cmd)
|
||||
parms = {"disk_node": disk_node,
|
||||
"start": used_space_mib,
|
||||
"end": used_space_mib + size,
|
||||
"reason": err}
|
||||
print ("Created partition from start=%(start)s MiB to end=%(end)s MiB"
|
||||
" on %(disk_node)s" % parms)
|
||||
if ret:
|
||||
print ("Failed to create partition with "
|
||||
"start=%(start)s, end=%(end)s "
|
||||
"on %(disk_node)s reason: %(reason)s" % parms)
|
||||
exit(1)
|
||||
# Set partition type to ceph journal
|
||||
# noncritical operation, it makes 'ceph-disk list' output correct info
|
||||
cmd = ['sgdisk',
|
||||
'--change-name={num}:ceph journal'.format(num=num),
|
||||
'--typecode={num}:{uuid}'.format(
|
||||
num=num,
|
||||
uuid=JOURNAL_UUID,
|
||||
),
|
||||
disk_node]
|
||||
_, err, ret = command(cmd)
|
||||
if ret:
|
||||
print ("WARNINIG: Failed to set partition name and typecode")
|
||||
used_space_mib += size
|
||||
num += 1
|
||||
|
||||
###########################
|
||||
# Manage Journal Location #
|
||||
###########################
|
||||
|
||||
OSD_PATH = "/var/lib/ceph/osd/"
|
||||
|
||||
|
||||
def mount_data_partition(data_path, osdid):
|
||||
""" Mount an OSD data partition and return the mounted path """
|
||||
|
||||
# Obtain the device node from the device path.
|
||||
data_node = device_path_to_device_node(data_path)
|
||||
|
||||
mount_path = OSD_PATH + "ceph-" + str(osdid)
|
||||
output, _, _ = command(['mount'])
|
||||
regex = "^" + data_node + ".*" + mount_path
|
||||
if not re.search(regex, output, re.MULTILINE):
|
||||
cmd = ['mount', '-t', 'xfs', data_node, mount_path]
|
||||
_, _, ret = command(cmd)
|
||||
params = {"node": data_node, "path": mount_path}
|
||||
if ret:
|
||||
print "Failed to mount %(node)s to %(path), aborting" % params
|
||||
exit(1)
|
||||
else:
|
||||
print "Mounted %(node)s to %(path)s" % params
|
||||
return mount_path
|
||||
|
||||
|
||||
def is_location_correct(path, journal_path, osdid):
|
||||
""" Check if location points to the correct device """
|
||||
|
||||
# Obtain the device node from the device path.
|
||||
journal_node = device_path_to_device_node(journal_path)
|
||||
|
||||
cur_node = os.path.realpath(path + "/journal")
|
||||
if cur_node == journal_node:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def fix_location(mount_point, journal_path, osdid):
|
||||
""" Move the journal to the new partition """
|
||||
|
||||
# Obtain the device node from the device path.
|
||||
journal_node = device_path_to_device_node(journal_path)
|
||||
|
||||
# Fix symlink
|
||||
path = mount_point + "/journal" # 'journal' symlink path used by ceph-osd
|
||||
journal_uuid = get_partition_uuid(journal_node)
|
||||
new_target = DISK_BY_PARTUUID + journal_uuid
|
||||
params = {"path": path, "target": new_target}
|
||||
try:
|
||||
if os.path.lexists(path):
|
||||
os.unlink(path) # delete the old symlink
|
||||
os.symlink(new_target, path)
|
||||
print "Symlink created: %(path)s -> %(target)s" % params
|
||||
except:
|
||||
print "Failed to create symlink: %(path)s -> %(target)s" % params
|
||||
exit(1)
|
||||
# Fix journal_uuid
|
||||
path = mount_point + "/journal_uuid"
|
||||
try:
|
||||
with open(path, 'w') as f:
|
||||
f.write(journal_uuid)
|
||||
except Exception as ex:
|
||||
# The operation is noncritical, it only makes 'ceph-disk list'
|
||||
# display complete output. We log and continue.
|
||||
params = {"path": path, "uuid": journal_uuid}
|
||||
print "WARNING: Failed to set uuid of %(path)s to %(uuid)s" % params
|
||||
|
||||
# Clean the journal partition
|
||||
# even if erasing the partition table, if another journal was present here
|
||||
# it's going to be reused. Journals are always bigger than 100MB.
|
||||
command(['dd', 'if=/dev/zero', 'of=%s' % journal_node,
|
||||
'bs=1M', 'count=100'])
|
||||
|
||||
# Format the journal
|
||||
cmd = ['/usr/bin/ceph-osd', '-i', str(osdid),
|
||||
'--pid-file', '/var/run/ceph/osd.%s.pid' % osdid,
|
||||
'-c', '/etc/ceph/ceph.conf',
|
||||
'--cluster', 'ceph',
|
||||
'--mkjournal']
|
||||
out, err, ret = command(cmd)
|
||||
params = {"journal_node": journal_node,
|
||||
"osdid": osdid,
|
||||
"ret": ret,
|
||||
"reason": err}
|
||||
if not ret:
|
||||
print ("Prepared new journal partition: %(journal_node)s "
|
||||
"for osd id: %(osdid)s") % params
|
||||
else:
|
||||
print ("Error initializing journal node: "
|
||||
"%(journal_node)s for osd id: %(osdid)s "
|
||||
"ceph-osd return code: %(ret)s reason: %(reason)s" % params)
|
||||
|
||||
|
||||
########
|
||||
# Main #
|
||||
########
|
||||
|
||||
def main(argv):
|
||||
# parse and validate arguments
|
||||
err = False
|
||||
partitions = None
|
||||
location = None
|
||||
if len(argv) != 2:
|
||||
err = True
|
||||
elif argv[0] == "partitions":
|
||||
valid_keys = ['disk_path', 'journals']
|
||||
partitions = get_input(argv[1], valid_keys)
|
||||
if not partitions:
|
||||
err = True
|
||||
elif not isinstance(partitions['journals'], list):
|
||||
err = True
|
||||
elif argv[0] == "location":
|
||||
valid_keys = ['data_path', 'journal_path', 'osdid']
|
||||
location = get_input(argv[1], valid_keys)
|
||||
if not location:
|
||||
err = True
|
||||
elif not isinstance(location['osdid'], int):
|
||||
err = True
|
||||
else:
|
||||
err = True
|
||||
if err:
|
||||
print "Command intended for internal use only"
|
||||
exit(-1)
|
||||
|
||||
if partitions:
|
||||
# Recreate partitions only if the existing ones don't match input
|
||||
if not is_partitioning_correct(partitions['disk_path'],
|
||||
partitions['journals']):
|
||||
create_partitions(partitions['disk_path'], partitions['journals'])
|
||||
else:
|
||||
print ("Partition table for %s is correct, "
|
||||
"no need to repartition" %
|
||||
device_path_to_device_node(partitions['disk_path']))
|
||||
elif location:
|
||||
# we need to have the data partition mounted & we can let it mounted
|
||||
mount_point = mount_data_partition(location['data_path'],
|
||||
location['osdid'])
|
||||
# Update journal location only if link point to another partition
|
||||
if not is_location_correct(mount_point,
|
||||
location['journal_path'],
|
||||
location['osdid']):
|
||||
print ("Fixing journal location for "
|
||||
"OSD id: %(id)s" % {"node": location['data_path'],
|
||||
"id": location['osdid']})
|
||||
fix_location(mount_point,
|
||||
location['journal_path'],
|
||||
location['osdid'])
|
||||
else:
|
||||
print ("Journal location for %s is correct,"
|
||||
"no need to change it" % location['data_path'])
|
||||
|
||||
main(sys.argv[1:])
|
@ -1,3 +0,0 @@
|
||||
cgcs/middleware/ceph/recipes-common/ceph-manager|ceph-manager
|
||||
cgcs/openstack/recipes-base|openstack
|
||||
cgcs/recipes-extended/ceph|ceph
|
@ -396,11 +396,11 @@ done < %{SOURCE1}
|
||||
export PBR_VERSION=%{version}
|
||||
%{__python2} setup.py install -O1 --skip-build --root %{buildroot}
|
||||
|
||||
# WRS: Install sql migration cfg and sql files that were not installed by setup.py
|
||||
# Install sql migration cfg and sql files that were not installed by setup.py
|
||||
install -m 644 ceilometer/storage/sqlalchemy/migrate_repo/migrate.cfg %{buildroot}%{python_sitelib}/ceilometer/storage/sqlalchemy/migrate_repo/migrate.cfg
|
||||
install -m 644 ceilometer/storage/sqlalchemy/migrate_repo/versions/*.sql %{buildroot}%{python_sitelib}/ceilometer/storage/sqlalchemy/migrate_repo/versions/.
|
||||
|
||||
# WRS Mitaka. Install non python files that were not installed by setup.py
|
||||
# Install non python files that were not installed by setup.py
|
||||
install -m 755 -d %{buildroot}%{python_sitelib}/ceilometer/hardware/pollsters/data
|
||||
install -m 644 ceilometer/hardware/pollsters/data/snmp.yaml %{buildroot}%{python_sitelib}/ceilometer/hardware/pollsters/data/snmp.yaml
|
||||
|
||||
@ -445,10 +445,8 @@ install -p -D -m 640 ceilometer/pipeline/data/event_definitions.yaml %{buildroot
|
||||
install -p -D -m 640 etc/ceilometer/api_paste.ini %{buildroot}%{_sysconfdir}/ceilometer/api_paste.ini
|
||||
install -p -D -m 640 etc/ceilometer/rootwrap.conf %{buildroot}%{_sysconfdir}/ceilometer/rootwrap.conf
|
||||
install -p -D -m 640 etc/ceilometer/rootwrap.d/ipmi.filters %{buildroot}/%{_sysconfdir}/ceilometer/rootwrap.d/ipmi.filters
|
||||
install -p -D -m 640 ceilometer/dispatcher/data/gnocchi_resources.yaml %{buildroot}%{_sysconfdir}/ceilometer/gnocchi_resources.yaml
|
||||
install -p -D -m 640 ceilometer/publisher/data/gnocchi_resources.yaml %{buildroot}%{_sysconfdir}/ceilometer/gnocchi_resources.yaml
|
||||
install -p -D -m 640 ceilometer/data/meters.d/meters.yaml %{buildroot}%{_sysconfdir}/ceilometer/meters.d/meters.yaml
|
||||
# WRS
|
||||
install -p -D -m 640 etc/ceilometer/controller.yaml %{buildroot}%{_sysconfdir}/ceilometer/controller.yaml
|
||||
install -p -D -m 640 ceilometer/api/ceilometer-api.py %{buildroot}%{_datadir}/ceilometer/ceilometer-api.py
|
||||
|
||||
|
||||
@ -613,8 +611,6 @@ exit 0
|
||||
%config(noreplace) %attr(-, root, ceilometer) %{_sysconfdir}/ceilometer/api_paste.ini
|
||||
%config(noreplace) %attr(-, root, ceilometer) %{_sysconfdir}/ceilometer/gnocchi_resources.yaml
|
||||
|
||||
%{_sysconfdir}/ceilometer/controller.yaml
|
||||
|
||||
%dir %attr(0750, ceilometer, root) %{_localstatedir}/log/ceilometer
|
||||
|
||||
%{_bindir}/ceilometer-db-legacy-clean
|
||||
|
@ -1 +1 @@
|
||||
TIS_PATCH_VER=3
|
||||
TIS_PATCH_VER=4
|
||||
|
@ -0,0 +1,24 @@
|
||||
From 4aafd598ace7fc1bf4c5aaf3591b8880e7642d69 Mon Sep 17 00:00:00 2001
|
||||
From: Tyler Smith <tyler.smith@windriver.com>
|
||||
Date: Fri, 29 Jun 2018 20:07:09 -0500
|
||||
Subject: [PATCH 1/1] [PATCH] meta patch for distributed keystone fix
|
||||
|
||||
---
|
||||
SPECS/python-django-openstack-auth.spec | 1 +
|
||||
1 file changed, 1 insertion(+)
|
||||
|
||||
diff --git a/SPECS/python-django-openstack-auth.spec b/SPECS/python-django-openstack-auth.spec
|
||||
index 9e5aacb..73b87b5 100644
|
||||
--- a/SPECS/python-django-openstack-auth.spec
|
||||
+++ b/SPECS/python-django-openstack-auth.spec
|
||||
@@ -20,6 +20,7 @@ Patch0002: 0002-disable-token-validation-per-auth-request.patch
|
||||
Patch0003: 0003-cache-authorized-tenants-in-cookie-to-improve-performance.patch
|
||||
Patch0004: 0004-US112170-Distributed-Keystone.patch
|
||||
Patch0005: fix_for_session_timeout.patch
|
||||
+Patch0006: fix_for_dc_region_switching.patch
|
||||
|
||||
BuildArch: noarch
|
||||
|
||||
--
|
||||
1.8.3.1
|
||||
|
@ -5,3 +5,4 @@
|
||||
0005-meta-cache-authorized-tenants-in-cookie-to-improve-performance.patch
|
||||
0006-meta-Distributed-Keystone.patch
|
||||
spec-include-fix_for_session_timeout.patch
|
||||
0007-meta-patch-for-distributed-keystone-fix.patch
|
||||
|
@ -0,0 +1,100 @@
|
||||
From d7f75aa87d7b476509b21f9c29162763a73a65af Mon Sep 17 00:00:00 2001
|
||||
From: rpm-build <rpm-build>
|
||||
Date: Fri, 29 Jun 2018 19:51:32 -0500
|
||||
Subject: [PATCH 1/1] Horizon Distributed Cloud subcloud switching
|
||||
is broken
|
||||
|
||||
-Fixed switching from RegionOne to SystemController
|
||||
-Fixed tenant list not being displayed on login
|
||||
---
|
||||
openstack_auth/backend.py | 2 +-
|
||||
openstack_auth/forms.py | 14 +++++++++++++-
|
||||
openstack_auth/views.py | 21 ++++++++++++---------
|
||||
3 files changed, 26 insertions(+), 11 deletions(-)
|
||||
|
||||
diff --git a/openstack_auth/backend.py b/openstack_auth/backend.py
|
||||
index cd15ca8..4fa7129 100644
|
||||
--- a/openstack_auth/backend.py
|
||||
+++ b/openstack_auth/backend.py
|
||||
@@ -162,7 +162,7 @@ class KeystoneBackend(object):
|
||||
# We want to try to use the same region we just logged into
|
||||
# which may or may not be the default depending upon the order
|
||||
# keystone uses
|
||||
- region_name = None
|
||||
+ region_name = kwargs.get('force_region', None)
|
||||
id_endpoints = scoped_auth_ref.service_catalog.\
|
||||
get_endpoints(service_type='identity')
|
||||
for id_endpoint in [cat for cat in id_endpoints['identity']]:
|
||||
diff --git a/openstack_auth/forms.py b/openstack_auth/forms.py
|
||||
index 4834ab2..49b8d8b 100644
|
||||
--- a/openstack_auth/forms.py
|
||||
+++ b/openstack_auth/forms.py
|
||||
@@ -138,11 +138,24 @@ class Login(django_auth_forms.AuthenticationForm):
|
||||
if lockedout:
|
||||
raise forms.ValidationError("user currently locked out.")
|
||||
|
||||
+ # when logging in in DC mode we will force the region to
|
||||
+ # be system controller since authenticate can't map our
|
||||
+ # hostname to an endpoint/regionname. Changing the hostname
|
||||
+ # in settings to ip will work but will break region switching
|
||||
+ # from RegionOne to SystemController since SystemController
|
||||
+ # region maps back to RegionOne (same keystone)
|
||||
+ force_region = None
|
||||
+ if getattr(settings, 'DC_MODE', False) and \
|
||||
+ region == getattr(settings, 'OPENSTACK_KEYSTONE_URL', None):
|
||||
+ force_region = utils.DC_SYSTEMCONTROLLER_REGION
|
||||
+
|
||||
self.user_cache = authenticate(request=self.request,
|
||||
username=username,
|
||||
password=password,
|
||||
user_domain_name=domain,
|
||||
- auth_url=region)
|
||||
+ auth_url=region,
|
||||
+ force_region=force_region)
|
||||
+
|
||||
msg = 'Login successful for user "%(username)s", remote address '\
|
||||
'%(remote_ip)s.' % {
|
||||
'username': username,
|
||||
diff --git a/openstack_auth/views.py b/openstack_auth/views.py
|
||||
index a680abf..0b1351d 100644
|
||||
--- a/openstack_auth/views.py
|
||||
+++ b/openstack_auth/views.py
|
||||
@@ -293,15 +293,17 @@ def switch_region(request, region_name,
|
||||
endpoint_dict = utils.get_internal_identity_endpoints(
|
||||
request.user.service_catalog, region_filter=region_name)
|
||||
|
||||
- try:
|
||||
+ # If we were on a subcloud, then the SystemController Identity
|
||||
+ # endpoint will not be functional, therefore retrieve the
|
||||
+ # RegionOne endpoint from the session (cached at login)
|
||||
+ force_region = None
|
||||
+ if region_name == utils.DC_SYSTEMCONTROLLER_REGION:
|
||||
+ force_region = utils.DC_SYSTEMCONTROLLER_REGION
|
||||
+ region_auth_url = request.session.get(
|
||||
+ 'SystemController_endpoint', None)
|
||||
+ else:
|
||||
region_auth_url = endpoint_dict[region_name]
|
||||
- except KeyError as e:
|
||||
- # If we were on a subcloud, then the SystemController Identity
|
||||
- # endpoint will not be available, therefore retrieve it from
|
||||
- # the session (cached at login)
|
||||
- if region_name == utils.DC_SYSTEMCONTROLLER_REGION:
|
||||
- region_auth_url = request.session.get(
|
||||
- 'SystemController_endpoint', None)
|
||||
+
|
||||
|
||||
if not region_auth_url:
|
||||
msg = _('Cannot switch to subcloud %s, no Identity available '
|
||||
@@ -324,7 +326,8 @@ def switch_region(request, region_name,
|
||||
try:
|
||||
request.user = auth.authenticate(
|
||||
request=request, auth_url=unscoped_auth.auth_url,
|
||||
- token=unscoped_auth_ref.auth_token)
|
||||
+ token=unscoped_auth_ref.auth_token,
|
||||
+ force_region=force_region)
|
||||
except exceptions.KeystoneAuthException as exc:
|
||||
msg = 'Switching to Subcloud failed: %s' % six.text_type(exc)
|
||||
res = django_http.HttpResponseRedirect(settings.LOGIN_URL)
|
||||
--
|
||||
1.8.3.1
|
||||
|
@ -4,17 +4,16 @@
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
backup_dir="/opt/backups"
|
||||
tmp_dir="${backup_dir}/image_temp"
|
||||
|
||||
function usage {
|
||||
cat <<"EOF"
|
||||
cat <<"EOF"
|
||||
Helper tool for backing up Glance images
|
||||
Usage:
|
||||
image-backup export <uuid> - export the image with <uuid> into backup file /opt/backups/image_<uuid>.tgz
|
||||
image-backup import image_<uuid>.tgz - import the image from the backup source file at /opt/backups/image_<uuid>.tgz
|
||||
into the corresponding image.
|
||||
image-backup export <uuid> - export the image with <uuid> into backup file /opt/backups/image_<uuid>.tgz
|
||||
image-backup import image_<uuid>.tgz - import the image from the backup source file at /opt/backups/image_<uuid>.tgz
|
||||
into the corresponding image.
|
||||
|
||||
Temporary files are stored in /opt/backups/image_temp
|
||||
|
||||
@ -23,22 +22,22 @@ EOF
|
||||
}
|
||||
|
||||
function create_tmp {
|
||||
if [ ! -d ${backup_dir} ]; then
|
||||
echo "Error: backup directory ${backup_dir} does not exist"
|
||||
exit 1
|
||||
fi
|
||||
# Create temporary directory
|
||||
if [ ! -d ${tmp_dir} ]; then
|
||||
mkdir ${tmp_dir}
|
||||
fi
|
||||
if [ ! -d ${backup_dir} ]; then
|
||||
echo "Error: backup directory ${backup_dir} does not exist"
|
||||
exit 1
|
||||
fi
|
||||
# Create temporary directory
|
||||
if [ ! -d ${tmp_dir} ]; then
|
||||
mkdir ${tmp_dir}
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
function remove_tmp {
|
||||
# Remove temporary files and directory if not empty
|
||||
local uuid=$1
|
||||
rm -f ${tmp_dir}/${uuid}*
|
||||
rmdir --ignore-fail-on-non-empty ${tmp_dir} &>/dev/null
|
||||
# Remove temporary files and directory if not empty
|
||||
local uuid=$1
|
||||
rm -f ${tmp_dir}/${uuid}*
|
||||
rmdir --ignore-fail-on-non-empty ${tmp_dir} &>/dev/null
|
||||
}
|
||||
|
||||
function export_file_from_rbd_image {
|
||||
@ -56,9 +55,9 @@ function export_image {
|
||||
# Check if the corresponding image is present in the RBD pool
|
||||
rbd -p images ls | grep -q -e "^${uuid}$"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Corresponding file for image with id: ${uuid} was not found in the RBD images pool"
|
||||
remove_tmp; exit 1
|
||||
fi
|
||||
echo "Error: Corresponding file for image with id: ${uuid} was not found in the RBD images pool"
|
||||
remove_tmp; exit 1
|
||||
fi
|
||||
|
||||
# Export original image
|
||||
export_file_from_rbd_image ${uuid}
|
||||
@ -66,21 +65,21 @@ function export_image {
|
||||
# Export raw cache if present
|
||||
rbd -p images ls | grep -q ${uuid}_raw
|
||||
if [ $? -eq 0 ]; then
|
||||
export_file_from_rbd_image ${uuid}_raw
|
||||
export_file_from_rbd_image ${uuid}_raw
|
||||
raw="${uuid}_raw"
|
||||
fi
|
||||
|
||||
|
||||
echo -n "Creating backup archive..."
|
||||
archive="${backup_dir}/image_${uuid}.tgz"
|
||||
tar czf ${archive} -C ${tmp_dir} ${uuid} ${raw}
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Failed to create archive ${archive}"
|
||||
remove_tmp; exit 1
|
||||
else
|
||||
echo "Error: Failed to create archive ${archive}"
|
||||
remove_tmp; exit 1
|
||||
else
|
||||
echo "done"
|
||||
fi
|
||||
|
||||
echo "Backup archive ${archive} created"
|
||||
echo "Backup archive ${archive} created"
|
||||
}
|
||||
|
||||
function import_file_to_rbd_image {
|
||||
@ -88,17 +87,17 @@ function import_file_to_rbd_image {
|
||||
local snap="images/${file}@snap"
|
||||
rbd import --image-format 2 ${tmp_dir}/${file} images/${file}
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Failed to import image ${file} into Ceph images pool, please check status of storage cluster"
|
||||
echo "Error: Failed to import image ${file} into Ceph images pool, please check status of storage cluster"
|
||||
remove_tmp; exit 1
|
||||
fi
|
||||
rbd snap create ${snap} 1>/dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Failed to create snapshot ${snap}, please check status of storage cluster"
|
||||
echo "Error: Failed to create snapshot ${snap}, please check status of storage cluster"
|
||||
remove_tmp; exit 1
|
||||
fi
|
||||
rbd snap protect ${snap} 1>/dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Failed to protect snapshot ${snap}, please check status of storage cluster"
|
||||
echo "Error: Failed to protect snapshot ${snap}, please check status of storage cluster"
|
||||
remove_tmp; exit 1
|
||||
fi
|
||||
}
|
||||
@ -108,15 +107,15 @@ function import_image {
|
||||
|
||||
# Storage cluster must be healthy before starting the import
|
||||
if [ ! "$(ceph health)" = "HEALTH_OK" ]; then
|
||||
echo "Error: The storage cluster health must be HEALTH_OK before proceding"
|
||||
remove_tmp; exit 1
|
||||
echo "Error: The storage cluster health must be HEALTH_OK before proceding"
|
||||
remove_tmp; exit 1
|
||||
fi
|
||||
|
||||
# Check if the corresponding image is already present in the RBD pool
|
||||
rbd -p images ls | grep -q -e "^${uuid}$"
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "Error: Image with id: ${uuid} is already imported"
|
||||
remove_tmp; exit 1
|
||||
echo "Error: Image with id: ${uuid} is already imported"
|
||||
remove_tmp; exit 1
|
||||
fi
|
||||
|
||||
# Import original image
|
||||
@ -124,17 +123,16 @@ function import_image {
|
||||
|
||||
# Import raw cache
|
||||
if [ -f "${tmp_dir}/${uuid}_raw" ]; then
|
||||
import_file_to_rbd_image ${uuid}_raw
|
||||
import_file_to_rbd_image ${uuid}_raw
|
||||
fi
|
||||
}
|
||||
|
||||
if [ $EUID -ne 0 ]; then
|
||||
echo "This script must be executed as root"
|
||||
exit 1
|
||||
echo "This script must be executed as root"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ $# -ne 2 ] ;
|
||||
then
|
||||
if [ $# -ne 2 ]; then
|
||||
usage
|
||||
exit 0
|
||||
fi
|
||||
@ -144,69 +142,68 @@ source /etc/nova/openrc
|
||||
# Check if glance is using ceph as RBD
|
||||
cat /etc/glance/glance-api.conf | grep -q -e "^stores.*=.*rbd"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Glance is not configured to use the ceph backend."
|
||||
echo "This command should be used only on setups with configured Ceph storage."
|
||||
exit 1
|
||||
echo "Error: Glance is not configured to use the ceph backend."
|
||||
echo "This command should be used only on setups with configured Ceph storage."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$1" = "export" ]; then
|
||||
# Check that glance image is present in glance
|
||||
glance image-list | tail -n +3 | awk '{print $2}' | grep -q $2
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Glance image with id: $2 not found. Please try with an existing image id."
|
||||
remove_tmp; exit 1
|
||||
fi
|
||||
# Check that glance image is present in glance
|
||||
glance image-list | tail -n +3 | awk '{print $2}' | grep -q $2
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Glance image with id: $2 not found. Please try with an existing image id."
|
||||
remove_tmp; exit 1
|
||||
fi
|
||||
|
||||
# Only allow backup of images that use rbd as backend.
|
||||
glance image-show $2 | grep 'direct_url' | awk '{print $4}' | grep -q '^rbd://'
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Image with id: $2 is not stored in Ceph RBD. Backup using image-backup tool is not needed."
|
||||
echo "Please consult the Software Management Manual for more details."
|
||||
remove_tmp; exit 1
|
||||
fi
|
||||
# Only allow backup of images that use rbd as backend.
|
||||
glance image-show $2 | grep 'direct_url' | awk '{print $4}' | grep -q '^rbd://'
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Image with id: $2 is not stored in Ceph RBD. Backup using image-backup tool is not needed."
|
||||
echo "Please consult the Software Management Manual for more details."
|
||||
remove_tmp; exit 1
|
||||
fi
|
||||
|
||||
create_tmp
|
||||
export_image $2
|
||||
remove_tmp
|
||||
create_tmp
|
||||
export_image $2
|
||||
remove_tmp
|
||||
|
||||
elif [ "$1" = "import" ]; then
|
||||
# Check that the input file format is correct
|
||||
if [[ ! $2 =~ ^image_.*\.tgz$ ]]; then
|
||||
echo "Error: Source file name must conform to image_<uuid>.tgz format"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check that the source file exists
|
||||
if [ ! -f ${backup_dir}/$2 ]; then
|
||||
echo "Error: File $2 does not exists in ${backup_dir}"
|
||||
exit 1
|
||||
fi
|
||||
# Check that the input file format is correct
|
||||
if [[ ! $2 =~ ^image_.*\.tgz$ ]]; then
|
||||
echo "Error: Source file name must conform to image_<uuid>.tgz format"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Get glance uuid from filename
|
||||
uuid=$(echo $2 | sed "s/^image_\(.*\)\.tgz/\1/g")
|
||||
# Check that the source file exists
|
||||
if [ ! -f ${backup_dir}/$2 ]; then
|
||||
echo "Error: File $2 does not exists in ${backup_dir}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check that glance has this image in the database
|
||||
glance image-list | grep -q $uuid
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Glance image with id: ${uuid} not found. Please try with an existing image id."
|
||||
exit 1
|
||||
fi
|
||||
# Get glance uuid from filename
|
||||
uuid=$(echo $2 | sed "s/^image_\(.*\)\.tgz/\1/g")
|
||||
|
||||
create_tmp
|
||||
# Check that glance has this image in the database
|
||||
glance image-list | grep -q $uuid
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Glance image with id: ${uuid} not found. Please try with an existing image id."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Extract the files that need to be imported into the temp directory
|
||||
echo -n "Extracting files..."
|
||||
tar xfz ${backup_dir}/$2 -C ${tmp_dir} 1>/dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Failed to extract archive ${backup_dir}/$2 into ${tmp_dir}."
|
||||
remove_tmp; exit 1
|
||||
fi
|
||||
echo "done"
|
||||
create_tmp
|
||||
|
||||
# Importing images into RBD
|
||||
import_image $uuid
|
||||
remove_tmp
|
||||
# Extract the files that need to be imported into the temp directory
|
||||
echo -n "Extracting files..."
|
||||
tar xfz ${backup_dir}/$2 -C ${tmp_dir} 1>/dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Failed to extract archive ${backup_dir}/$2 into ${tmp_dir}."
|
||||
remove_tmp; exit 1
|
||||
fi
|
||||
echo "done"
|
||||
|
||||
# Importing images into RBD
|
||||
import_image $uuid
|
||||
remove_tmp
|
||||
else
|
||||
usage
|
||||
usage
|
||||
fi
|
||||
|
||||
|
@ -5,7 +5,11 @@ After=syslog.target network.target
|
||||
[Service]
|
||||
Type=simple
|
||||
User=root
|
||||
ExecStart=/bin/python /usr/bin/gunicorn --config /usr/share/gnocchi/gnocchi-api.conf --pythonpath /usr/share/gnocchi gnocchi-api --log-file /var/log/gnocchi/api.log
|
||||
ExecStart=/bin/python /usr/bin/gunicorn \
|
||||
--config /usr/share/gnocchi/gnocchi-api.conf \
|
||||
--pythonpath /usr/share/gnocchi gnocchi-api \
|
||||
--log-file /var/log/gnocchi/api.log \
|
||||
--pid /var/run/gnocchi-api.pid
|
||||
#Restart=on-failure
|
||||
|
||||
[Install]
|
||||
|
@ -17,16 +17,16 @@ def worker_abort(worker):
|
||||
path = ("/proc/%s/fd") % os.getpid()
|
||||
contents = os.listdir(path)
|
||||
upload_dir = getattr(settings, 'FILE_UPLOAD_TEMP_DIR', '/tmp')
|
||||
pattern = os.path.join(upload_dir, '*.upload')
|
||||
pattern = os.path.join(upload_dir, '*.upload')
|
||||
|
||||
for i in contents:
|
||||
f = os.path.join(path, i)
|
||||
if os.path.exists(f):
|
||||
try:
|
||||
l = os.readlink(f)
|
||||
if fnmatch.fnmatch(l, pattern):
|
||||
worker.log.info(l)
|
||||
os.remove(l)
|
||||
link = os.readlink(f)
|
||||
if fnmatch.fnmatch(link, pattern):
|
||||
worker.log.info(link)
|
||||
os.remove(link)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
|
@ -10,52 +10,55 @@ from openstack_dashboard.settings import HORIZON_CONFIG
|
||||
|
||||
from tsconfig.tsconfig import distributed_cloud_role
|
||||
|
||||
# Custom STX settings
|
||||
import configss
|
||||
|
||||
DEBUG = False
|
||||
|
||||
# This setting controls whether or not compression is enabled. Disabling
|
||||
# compression makes Horizon considerably slower, but makes it much easier
|
||||
# to debug JS and CSS changes
|
||||
#COMPRESS_ENABLED = not DEBUG
|
||||
# COMPRESS_ENABLED = not DEBUG
|
||||
|
||||
# This setting controls whether compression happens on the fly, or offline
|
||||
# with `python manage.py compress`
|
||||
# See https://django-compressor.readthedocs.io/en/latest/usage/#offline-compression
|
||||
# for more information
|
||||
#COMPRESS_OFFLINE = not DEBUG
|
||||
# COMPRESS_OFFLINE = not DEBUG
|
||||
|
||||
# WEBROOT is the location relative to Webserver root
|
||||
# should end with a slash.
|
||||
WEBROOT = '/'
|
||||
#LOGIN_URL = WEBROOT + 'auth/login/'
|
||||
#LOGOUT_URL = WEBROOT + 'auth/logout/'
|
||||
# LOGIN_URL = WEBROOT + 'auth/login/'
|
||||
# LOGOUT_URL = WEBROOT + 'auth/logout/'
|
||||
#
|
||||
# LOGIN_REDIRECT_URL can be used as an alternative for
|
||||
# HORIZON_CONFIG.user_home, if user_home is not set.
|
||||
# Do not set it to '/home/', as this will cause circular redirect loop
|
||||
#LOGIN_REDIRECT_URL = WEBROOT
|
||||
# LOGIN_REDIRECT_URL = WEBROOT
|
||||
|
||||
# If horizon is running in production (DEBUG is False), set this
|
||||
# with the list of host/domain names that the application can serve.
|
||||
# For more information see:
|
||||
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
|
||||
#ALLOWED_HOSTS = ['horizon.example.com', ]
|
||||
# ALLOWED_HOSTS = ['horizon.example.com', ]
|
||||
|
||||
# Set SSL proxy settings:
|
||||
# Pass this header from the proxy after terminating the SSL,
|
||||
# and don't forget to strip it from the client's request.
|
||||
# For more information see:
|
||||
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
|
||||
#SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
|
||||
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
|
||||
|
||||
# If Horizon is being served through SSL, then uncomment the following two
|
||||
# settings to better secure the cookies from security exploits
|
||||
#CSRF_COOKIE_SECURE = True
|
||||
#SESSION_COOKIE_SECURE = True
|
||||
# CSRF_COOKIE_SECURE = True
|
||||
# SESSION_COOKIE_SECURE = True
|
||||
|
||||
# The absolute path to the directory where message files are collected.
|
||||
# The message file must have a .json file extension. When the user logins to
|
||||
# horizon, the message files collected are processed and displayed to the user.
|
||||
#MESSAGES_PATH=None
|
||||
# MESSAGES_PATH=None
|
||||
|
||||
# Overrides for OpenStack API versions. Use this setting to force the
|
||||
# OpenStack dashboard to use a specific API version for a given service API.
|
||||
@ -64,32 +67,32 @@ WEBROOT = '/'
|
||||
# service API. For example, The identity service APIs have inconsistent
|
||||
# use of the decimal point, so valid options would be 2.0 or 3.
|
||||
# Minimum compute version to get the instance locked status is 2.9.
|
||||
#OPENSTACK_API_VERSIONS = {
|
||||
# OPENSTACK_API_VERSIONS = {
|
||||
# "data-processing": 1.1,
|
||||
# "identity": 3,
|
||||
# "image": 2,
|
||||
# "volume": 2,
|
||||
# "compute": 2,
|
||||
#}
|
||||
# }
|
||||
|
||||
# Set this to True if running on a multi-domain model. When this is enabled, it
|
||||
# will require the user to enter the Domain name in addition to the username
|
||||
# for login.
|
||||
#OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = False
|
||||
# OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = False
|
||||
|
||||
# Set this to True if you want available domains displayed as a dropdown menu
|
||||
# on the login screen. It is strongly advised NOT to enable this for public
|
||||
# clouds, as advertising enabled domains to unauthenticated customers
|
||||
# irresponsibly exposes private information. This should only be used for
|
||||
# private clouds where the dashboard sits behind a corporate firewall.
|
||||
#OPENSTACK_KEYSTONE_DOMAIN_DROPDOWN = False
|
||||
# OPENSTACK_KEYSTONE_DOMAIN_DROPDOWN = False
|
||||
|
||||
# If OPENSTACK_KEYSTONE_DOMAIN_DROPDOWN is enabled, this option can be used to
|
||||
# set the available domains to choose from. This is a list of pairs whose first
|
||||
# value is the domain name and the second is the display name.
|
||||
#OPENSTACK_KEYSTONE_DOMAIN_CHOICES = (
|
||||
# OPENSTACK_KEYSTONE_DOMAIN_CHOICES = (
|
||||
# ('Default', 'Default'),
|
||||
#)
|
||||
# )
|
||||
|
||||
# Overrides the default domain used when running on single-domain model
|
||||
# with Keystone V3. All entities will be created in the default domain.
|
||||
@ -97,45 +100,45 @@ WEBROOT = '/'
|
||||
# Also, you will most likely have a value in the keystone policy file like this
|
||||
# "cloud_admin": "rule:admin_required and domain_id:<your domain id>"
|
||||
# This value must be the name of the domain whose ID is specified there.
|
||||
#OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'Default'
|
||||
# OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'Default'
|
||||
|
||||
# Set this to True to enable panels that provide the ability for users to
|
||||
# manage Identity Providers (IdPs) and establish a set of rules to map
|
||||
# federation protocol attributes to Identity API attributes.
|
||||
# This extension requires v3.0+ of the Identity API.
|
||||
#OPENSTACK_KEYSTONE_FEDERATION_MANAGEMENT = False
|
||||
# OPENSTACK_KEYSTONE_FEDERATION_MANAGEMENT = False
|
||||
|
||||
# Set Console type:
|
||||
# valid options are "AUTO"(default), "VNC", "SPICE", "RDP", "SERIAL" or None
|
||||
# Set to None explicitly if you want to deactivate the console.
|
||||
#CONSOLE_TYPE = "AUTO"
|
||||
# CONSOLE_TYPE = "AUTO"
|
||||
|
||||
# If provided, a "Report Bug" link will be displayed in the site header
|
||||
# which links to the value of this setting (ideally a URL containing
|
||||
# information on how to report issues).
|
||||
#HORIZON_CONFIG["bug_url"] = "http://bug-report.example.com"
|
||||
# HORIZON_CONFIG["bug_url"] = "http://bug-report.example.com"
|
||||
|
||||
# Show backdrop element outside the modal, do not close the modal
|
||||
# after clicking on backdrop.
|
||||
#HORIZON_CONFIG["modal_backdrop"] = "static"
|
||||
# HORIZON_CONFIG["modal_backdrop"] = "static"
|
||||
|
||||
# Specify a regular expression to validate user passwords.
|
||||
#HORIZON_CONFIG["password_validator"] = {
|
||||
# HORIZON_CONFIG["password_validator"] = {
|
||||
# "regex": '.*',
|
||||
# "help_text": _("Your password does not meet the requirements."),
|
||||
#}
|
||||
# }
|
||||
|
||||
# Disable simplified floating IP address management for deployments with
|
||||
# multiple floating IP pools or complex network requirements.
|
||||
#HORIZON_CONFIG["simple_ip_management"] = False
|
||||
# HORIZON_CONFIG["simple_ip_management"] = False
|
||||
|
||||
# Turn off browser autocompletion for forms including the login form and
|
||||
# the database creation workflow if so desired.
|
||||
#HORIZON_CONFIG["password_autocomplete"] = "off"
|
||||
# HORIZON_CONFIG["password_autocomplete"] = "off"
|
||||
|
||||
# Setting this to True will disable the reveal button for password fields,
|
||||
# including on the login form.
|
||||
#HORIZON_CONFIG["disable_password_reveal"] = False
|
||||
# HORIZON_CONFIG["disable_password_reveal"] = False
|
||||
|
||||
LOCAL_PATH = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
@ -154,12 +157,12 @@ SECRET_KEY = secret_key.generate_or_read_from_file(
|
||||
# We recommend you use memcached for development; otherwise after every reload
|
||||
# of the django development server, you will have to login again. To use
|
||||
# memcached set CACHES to something like
|
||||
#CACHES = {
|
||||
# CACHES = {
|
||||
# 'default': {
|
||||
# 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
|
||||
# 'LOCATION': '127.0.0.1:11211',
|
||||
# },
|
||||
#}
|
||||
# }
|
||||
|
||||
CACHES = {
|
||||
'default': {
|
||||
@ -170,19 +173,19 @@ CACHES = {
|
||||
# Send email to the console by default
|
||||
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
|
||||
# Or send them to /dev/null
|
||||
#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
|
||||
# EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
|
||||
|
||||
# Configure these for your outgoing email host
|
||||
#EMAIL_HOST = 'smtp.my-company.com'
|
||||
#EMAIL_PORT = 25
|
||||
#EMAIL_HOST_USER = 'djangomail'
|
||||
#EMAIL_HOST_PASSWORD = 'top-secret!'
|
||||
# EMAIL_HOST = 'smtp.my-company.com'
|
||||
# EMAIL_PORT = 25
|
||||
# EMAIL_HOST_USER = 'djangomail'
|
||||
# EMAIL_HOST_PASSWORD = 'top-secret!'
|
||||
|
||||
# For multiple regions uncomment this configuration, and add (endpoint, title).
|
||||
#AVAILABLE_REGIONS = [
|
||||
# AVAILABLE_REGIONS = [
|
||||
# ('http://cluster1.example.com:5000/v2.0', 'cluster1'),
|
||||
# ('http://cluster2.example.com:5000/v2.0', 'cluster2'),
|
||||
#]
|
||||
# ]
|
||||
|
||||
OPENSTACK_HOST = "127.0.0.1"
|
||||
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v2.0" % OPENSTACK_HOST
|
||||
@ -191,15 +194,15 @@ OPENSTACK_KEYSTONE_DEFAULT_ROLE = "_member_"
|
||||
# For setting the default service region on a per-endpoint basis. Note that the
|
||||
# default value for this setting is {}, and below is just an example of how it
|
||||
# should be specified.
|
||||
#DEFAULT_SERVICE_REGIONS = {
|
||||
# DEFAULT_SERVICE_REGIONS = {
|
||||
# OPENSTACK_KEYSTONE_URL: 'RegionOne'
|
||||
#}
|
||||
# }
|
||||
|
||||
# Enables keystone web single-sign-on if set to True.
|
||||
#WEBSSO_ENABLED = False
|
||||
# WEBSSO_ENABLED = False
|
||||
|
||||
# Determines which authentication choice to show as default.
|
||||
#WEBSSO_INITIAL_CHOICE = "credentials"
|
||||
# WEBSSO_INITIAL_CHOICE = "credentials"
|
||||
|
||||
# The list of authentication mechanisms which include keystone
|
||||
# federation protocols and identity provider/federation protocol
|
||||
@ -209,13 +212,13 @@ OPENSTACK_KEYSTONE_DEFAULT_ROLE = "_member_"
|
||||
# Do not remove the mandatory credentials mechanism.
|
||||
# Note: The last two tuples are sample mapping keys to a identity provider
|
||||
# and federation protocol combination (WEBSSO_IDP_MAPPING).
|
||||
#WEBSSO_CHOICES = (
|
||||
# WEBSSO_CHOICES = (
|
||||
# ("credentials", _("Keystone Credentials")),
|
||||
# ("oidc", _("OpenID Connect")),
|
||||
# ("saml2", _("Security Assertion Markup Language")),
|
||||
# ("acme_oidc", "ACME - OpenID Connect"),
|
||||
# ("acme_saml2", "ACME - SAML2"),
|
||||
#)
|
||||
# )
|
||||
|
||||
# A dictionary of specific identity provider and federation protocol
|
||||
# combinations. From the selected authentication mechanism, the value
|
||||
@ -224,24 +227,24 @@ OPENSTACK_KEYSTONE_DEFAULT_ROLE = "_member_"
|
||||
# specific WebSSO endpoint in keystone, otherwise it will use the value
|
||||
# as the protocol_id when redirecting to the WebSSO by protocol endpoint.
|
||||
# NOTE: The value is expected to be a tuple formatted as: (<idp_id>, <protocol_id>).
|
||||
#WEBSSO_IDP_MAPPING = {
|
||||
# WEBSSO_IDP_MAPPING = {
|
||||
# "acme_oidc": ("acme", "oidc"),
|
||||
# "acme_saml2": ("acme", "saml2"),
|
||||
#}
|
||||
# }
|
||||
|
||||
# The Keystone Provider drop down uses Keystone to Keystone federation
|
||||
# to switch between Keystone service providers.
|
||||
# Set display name for Identity Provider (dropdown display name)
|
||||
#KEYSTONE_PROVIDER_IDP_NAME = "Local Keystone"
|
||||
# KEYSTONE_PROVIDER_IDP_NAME = "Local Keystone"
|
||||
# This id is used for only for comparison with the service provider IDs. This ID
|
||||
# should not match any service provider IDs.
|
||||
#KEYSTONE_PROVIDER_IDP_ID = "localkeystone"
|
||||
# KEYSTONE_PROVIDER_IDP_ID = "localkeystone"
|
||||
|
||||
# Disable SSL certificate checks (useful for self-signed certificates):
|
||||
#OPENSTACK_SSL_NO_VERIFY = True
|
||||
# OPENSTACK_SSL_NO_VERIFY = True
|
||||
|
||||
# The CA certificate to use to verify SSL connections
|
||||
#OPENSTACK_SSL_CACERT = '/path/to/cacert.pem'
|
||||
# OPENSTACK_SSL_CACERT = '/path/to/cacert.pem'
|
||||
|
||||
# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the
|
||||
# capabilities of the auth backend for Keystone.
|
||||
@ -260,12 +263,12 @@ OPENSTACK_KEYSTONE_BACKEND = {
|
||||
|
||||
# Setting this to True, will add a new "Retrieve Password" action on instance,
|
||||
# allowing Admin session password retrieval/decryption.
|
||||
#OPENSTACK_ENABLE_PASSWORD_RETRIEVE = False
|
||||
# OPENSTACK_ENABLE_PASSWORD_RETRIEVE = False
|
||||
|
||||
# This setting allows deployers to control whether a token is deleted on log
|
||||
# out. This can be helpful when there are often long running processes being
|
||||
# run in the Horizon environment.
|
||||
#TOKEN_DELETION_DISABLED = False
|
||||
# TOKEN_DELETION_DISABLED = False
|
||||
|
||||
# The Launch Instance user experience has been significantly enhanced.
|
||||
# You can choose whether to enable the new launch instance experience,
|
||||
@ -278,12 +281,12 @@ OPENSTACK_KEYSTONE_BACKEND = {
|
||||
# Toggle LAUNCH_INSTANCE_LEGACY_ENABLED and LAUNCH_INSTANCE_NG_ENABLED to
|
||||
# determine the experience to enable. Set them both to true to enable
|
||||
# both.
|
||||
#LAUNCH_INSTANCE_LEGACY_ENABLED = True
|
||||
#LAUNCH_INSTANCE_NG_ENABLED = False
|
||||
# LAUNCH_INSTANCE_LEGACY_ENABLED = True
|
||||
# LAUNCH_INSTANCE_NG_ENABLED = False
|
||||
|
||||
# A dictionary of settings which can be used to provide the default values for
|
||||
# properties found in the Launch Instance modal.
|
||||
#LAUNCH_INSTANCE_DEFAULTS = {
|
||||
# LAUNCH_INSTANCE_DEFAULTS = {
|
||||
# 'config_drive': False,
|
||||
# 'enable_scheduler_hints': True,
|
||||
# 'disable_image': False,
|
||||
@ -291,7 +294,7 @@ OPENSTACK_KEYSTONE_BACKEND = {
|
||||
# 'disable_volume': False,
|
||||
# 'disable_volume_snapshot': False,
|
||||
# 'create_volume': True,
|
||||
#}
|
||||
# }
|
||||
|
||||
# The Xen Hypervisor has the ability to set the mount point for volumes
|
||||
# attached to instances (other Hypervisors currently do not). Setting
|
||||
@ -374,7 +377,7 @@ OPENSTACK_HEAT_STACK = {
|
||||
# The OPENSTACK_IMAGE_BACKEND settings can be used to customize features
|
||||
# in the OpenStack Dashboard related to the Image service, such as the list
|
||||
# of supported image formats.
|
||||
#OPENSTACK_IMAGE_BACKEND = {
|
||||
# OPENSTACK_IMAGE_BACKEND = {
|
||||
# 'image_formats': [
|
||||
# ('', _('Select format')),
|
||||
# ('aki', _('AKI - Amazon Kernel Image')),
|
||||
@ -390,7 +393,7 @@ OPENSTACK_HEAT_STACK = {
|
||||
# ('vhdx', _('VHDX - Large Virtual Hard Disk')),
|
||||
# ('vmdk', _('VMDK - Virtual Machine Disk')),
|
||||
# ],
|
||||
#}
|
||||
# }
|
||||
|
||||
# The IMAGE_CUSTOM_PROPERTY_TITLES settings is used to customize the titles for
|
||||
# image custom property attributes that appear on image detail pages.
|
||||
@ -412,29 +415,29 @@ IMAGE_RESERVED_CUSTOM_PROPERTIES = []
|
||||
# Horizon server. When enabled, a file form field will appear on the create
|
||||
# image form. If set to 'off', there will be no file form field on the create
|
||||
# image form. See documentation for deployment considerations.
|
||||
#HORIZON_IMAGES_UPLOAD_MODE = 'legacy'
|
||||
# HORIZON_IMAGES_UPLOAD_MODE = 'legacy'
|
||||
|
||||
# Allow a location to be set when creating or updating Glance images.
|
||||
# If using Glance V2, this value should be False unless the Glance
|
||||
# configuration and policies allow setting locations.
|
||||
#IMAGES_ALLOW_LOCATION = False
|
||||
# IMAGES_ALLOW_LOCATION = False
|
||||
|
||||
# A dictionary of default settings for create image modal.
|
||||
#CREATE_IMAGE_DEFAULTS = {
|
||||
# CREATE_IMAGE_DEFAULTS = {
|
||||
# 'image_visibility': "public",
|
||||
#}
|
||||
# }
|
||||
|
||||
# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints
|
||||
# in the Keystone service catalog. Use this setting when Horizon is running
|
||||
# external to the OpenStack environment. The default is 'publicURL'.
|
||||
#OPENSTACK_ENDPOINT_TYPE = "publicURL"
|
||||
# OPENSTACK_ENDPOINT_TYPE = "publicURL"
|
||||
|
||||
# SECONDARY_ENDPOINT_TYPE specifies the fallback endpoint type to use in the
|
||||
# case that OPENSTACK_ENDPOINT_TYPE is not present in the endpoints
|
||||
# in the Keystone service catalog. Use this setting when Horizon is running
|
||||
# external to the OpenStack environment. The default is None. This
|
||||
# value should differ from OPENSTACK_ENDPOINT_TYPE if used.
|
||||
#SECONDARY_ENDPOINT_TYPE = None
|
||||
# SECONDARY_ENDPOINT_TYPE = None
|
||||
|
||||
# The number of objects (Swift containers/objects or images) to display
|
||||
# on a single page before providing a paging element (a "more" link)
|
||||
@ -461,24 +464,24 @@ TIME_ZONE = "UTC"
|
||||
# can provide a custom callback method to use for sorting. You can also provide
|
||||
# a flag for reverse sort. For more info, see
|
||||
# http://docs.python.org/2/library/functions.html#sorted
|
||||
#CREATE_INSTANCE_FLAVOR_SORT = {
|
||||
# CREATE_INSTANCE_FLAVOR_SORT = {
|
||||
# 'key': 'name',
|
||||
# # or
|
||||
# 'key': my_awesome_callback_method,
|
||||
# 'reverse': False,
|
||||
#}
|
||||
# }
|
||||
|
||||
# Set this to True to display an 'Admin Password' field on the Change Password
|
||||
# form to verify that it is indeed the admin logged-in who wants to change
|
||||
# the password.
|
||||
#ENFORCE_PASSWORD_CHECK = False
|
||||
# ENFORCE_PASSWORD_CHECK = False
|
||||
|
||||
# Modules that provide /auth routes that can be used to handle different types
|
||||
# of user authentication. Add auth plugins that require extra route handling to
|
||||
# this list.
|
||||
#AUTHENTICATION_URLS = [
|
||||
# AUTHENTICATION_URLS = [
|
||||
# 'openstack_auth.urls',
|
||||
#]
|
||||
# ]
|
||||
|
||||
# The Horizon Policy Enforcement engine uses these values to load per service
|
||||
# policy rule files. The content of these files should match the files the
|
||||
@ -486,7 +489,7 @@ TIME_ZONE = "UTC"
|
||||
# target installation.
|
||||
|
||||
# Path to directory containing policy.json files
|
||||
#POLICY_FILES_PATH = os.path.join(ROOT_PATH, "conf")
|
||||
# POLICY_FILES_PATH = os.path.join(ROOT_PATH, "conf")
|
||||
|
||||
# Map of local copy of service policy files.
|
||||
# Please insure that your identity policy file matches the one being used on
|
||||
@ -498,14 +501,14 @@ TIME_ZONE = "UTC"
|
||||
# policy.v3cloudsample.json
|
||||
# Having matching policy files on the Horizon and Keystone servers is essential
|
||||
# for normal operation. This holds true for all services and their policy files.
|
||||
#POLICY_FILES = {
|
||||
# POLICY_FILES = {
|
||||
# 'identity': 'keystone_policy.json',
|
||||
# 'compute': 'nova_policy.json',
|
||||
# 'volume': 'cinder_policy.json',
|
||||
# 'image': 'glance_policy.json',
|
||||
# 'orchestration': 'heat_policy.json',
|
||||
# 'network': 'neutron_policy.json',
|
||||
#}
|
||||
# }
|
||||
|
||||
# TODO: (david-lyle) remove when plugins support adding settings.
|
||||
# Note: Only used when trove-dashboard plugin is configured to be used by
|
||||
@ -514,16 +517,16 @@ TIME_ZONE = "UTC"
|
||||
# creating users and databases on database instances is turned on.
|
||||
# To disable these extensions set the permission here to something
|
||||
# unusable such as ["!"].
|
||||
#TROVE_ADD_USER_PERMS = []
|
||||
#TROVE_ADD_DATABASE_PERMS = []
|
||||
# TROVE_ADD_USER_PERMS = []
|
||||
# TROVE_ADD_DATABASE_PERMS = []
|
||||
|
||||
# Change this patch to the appropriate list of tuples containing
|
||||
# a key, label and static directory containing two files:
|
||||
# _variables.scss and _styles.scss
|
||||
#AVAILABLE_THEMES = [
|
||||
# AVAILABLE_THEMES = [
|
||||
# ('default', 'Default', 'themes/default'),
|
||||
# ('material', 'Material', 'themes/material'),
|
||||
#]
|
||||
# ]
|
||||
|
||||
LOGGING = {
|
||||
'version': 1,
|
||||
@ -780,13 +783,13 @@ SECURITY_GROUP_RULES = {
|
||||
# pool for use in their cluster. False by default. You would want
|
||||
# to set this to True if you were running Nova Networking with
|
||||
# auto_assign_floating_ip = True.
|
||||
#SAHARA_AUTO_IP_ALLOCATION_ENABLED = False
|
||||
# SAHARA_AUTO_IP_ALLOCATION_ENABLED = False
|
||||
|
||||
# The hash algorithm to use for authentication tokens. This must
|
||||
# match the hash algorithm that the identity server and the
|
||||
# auth_token middleware are using. Allowed values are the
|
||||
# algorithms supported by Python's hashlib library.
|
||||
#OPENSTACK_TOKEN_HASH_ALGORITHM = 'md5'
|
||||
# OPENSTACK_TOKEN_HASH_ALGORITHM = 'md5'
|
||||
|
||||
# AngularJS requires some settings to be made available to
|
||||
# the client side. Some settings are required by in-tree / built-in horizon
|
||||
@ -811,7 +814,7 @@ REST_API_REQUIRED_SETTINGS = ['OPENSTACK_HYPERVISOR_FEATURES',
|
||||
# !! Please use extreme caution as the settings are transferred via HTTP/S
|
||||
# and are not encrypted on the browser. This is an experimental API and
|
||||
# may be deprecated in the future without notice.
|
||||
#REST_API_ADDITIONAL_SETTINGS = []
|
||||
# REST_API_ADDITIONAL_SETTINGS = []
|
||||
|
||||
# DISALLOW_IFRAME_EMBED can be used to prevent Horizon from being embedded
|
||||
# within an iframe. Legacy browsers are still vulnerable to a Cross-Frame
|
||||
@ -819,11 +822,11 @@ REST_API_REQUIRED_SETTINGS = ['OPENSTACK_HYPERVISOR_FEATURES',
|
||||
# where iframes are not used in deployment. Default setting is True.
|
||||
# For more information see:
|
||||
# http://tinyurl.com/anticlickjack
|
||||
#DISALLOW_IFRAME_EMBED = True
|
||||
# DISALLOW_IFRAME_EMBED = True
|
||||
|
||||
# Help URL can be made available for the client. To provide a help URL, edit the
|
||||
# following attribute to the URL of your choice.
|
||||
#HORIZON_CONFIG["help_url"] = "http://openstack.mycompany.org"
|
||||
# HORIZON_CONFIG["help_url"] = "http://openstack.mycompany.org"
|
||||
|
||||
# Settings for OperationLogMiddleware
|
||||
# OPERATION_LOG_ENABLED is flag to use the function to log an operation on
|
||||
@ -831,8 +834,8 @@ REST_API_REQUIRED_SETTINGS = ['OPENSTACK_HYPERVISOR_FEATURES',
|
||||
# mask_targets is arrangement for appointing a target to mask.
|
||||
# method_targets is arrangement of HTTP method to output log.
|
||||
# format is the log contents.
|
||||
#OPERATION_LOG_ENABLED = False
|
||||
#OPERATION_LOG_OPTIONS = {
|
||||
# OPERATION_LOG_ENABLED = False
|
||||
# OPERATION_LOG_OPTIONS = {
|
||||
# 'mask_fields': ['password'],
|
||||
# 'target_methods': ['POST'],
|
||||
# 'ignored_urls': ['/js/', '/static/', '^/api/'],
|
||||
@ -841,19 +844,19 @@ REST_API_REQUIRED_SETTINGS = ['OPENSTACK_HYPERVISOR_FEATURES',
|
||||
# " [%(project_id)s] [%(user_name)s] [%(user_id)s] [%(request_scheme)s]"
|
||||
# " [%(referer_url)s] [%(request_url)s] [%(message)s] [%(method)s]"
|
||||
# " [%(http_status)s] [%(param)s]"),
|
||||
#}
|
||||
# }
|
||||
|
||||
# The default date range in the Overview panel meters - either <today> minus N
|
||||
# days (if the value is integer N), or from the beginning of the current month
|
||||
# until today (if set to None). This setting should be used to limit the amount
|
||||
# of data fetched by default when rendering the Overview panel.
|
||||
#OVERVIEW_DAYS_RANGE = 1
|
||||
# OVERVIEW_DAYS_RANGE = 1
|
||||
|
||||
# To allow operators to require users provide a search criteria first
|
||||
# before loading any data into the views, set the following dict
|
||||
# attributes to True in each one of the panels you want to enable this feature.
|
||||
# Follow the convention <dashboard>.<view>
|
||||
#FILTER_DATA_FIRST = {
|
||||
# FILTER_DATA_FIRST = {
|
||||
# 'admin.instances': False,
|
||||
# 'admin.images': False,
|
||||
# 'admin.networks': False,
|
||||
@ -863,7 +866,7 @@ REST_API_REQUIRED_SETTINGS = ['OPENSTACK_HYPERVISOR_FEATURES',
|
||||
# 'identity.projects': False,
|
||||
# 'identity.groups': False,
|
||||
# 'identity.roles': False
|
||||
#}
|
||||
# }
|
||||
|
||||
# Dict used to restrict user private subnet cidr range.
|
||||
# An empty list means that user input will not be restricted
|
||||
@ -871,10 +874,10 @@ REST_API_REQUIRED_SETTINGS = ['OPENSTACK_HYPERVISOR_FEATURES',
|
||||
# no restriction for IPv4 or IPv6. To restrict
|
||||
# user private subnet cidr range set ALLOWED_PRIVATE_SUBNET_CIDR
|
||||
# to something like
|
||||
#ALLOWED_PRIVATE_SUBNET_CIDR = {
|
||||
# ALLOWED_PRIVATE_SUBNET_CIDR = {
|
||||
# 'ipv4': ['10.0.0.0/8', '192.168.0.0/16'],
|
||||
# 'ipv6': ['fc00::/7']
|
||||
#}
|
||||
# }
|
||||
ALLOWED_PRIVATE_SUBNET_CIDR = {'ipv4': [], 'ipv6': []}
|
||||
|
||||
# Projects and users can have extra attributes as defined by keystone v3.
|
||||
@ -882,12 +885,12 @@ ALLOWED_PRIVATE_SUBNET_CIDR = {'ipv4': [], 'ipv6': []}
|
||||
# If you'd like to display extra data in the project or user tables, set the
|
||||
# corresponding dict key to the attribute name, followed by the display name.
|
||||
# For more information, see horizon's customization (http://docs.openstack.org/developer/horizon/topics/customizing.html#horizon-customization-module-overrides)
|
||||
#PROJECT_TABLE_EXTRA_INFO = {
|
||||
# PROJECT_TABLE_EXTRA_INFO = {
|
||||
# 'phone_num': _('Phone Number'),
|
||||
#}
|
||||
#USER_TABLE_EXTRA_INFO = {
|
||||
# }
|
||||
# USER_TABLE_EXTRA_INFO = {
|
||||
# 'phone_num': _('Phone Number'),
|
||||
#}
|
||||
# }
|
||||
|
||||
# Password will have an expiration date when using keystone v3 and enabling the
|
||||
# feature.
|
||||
@ -895,22 +898,8 @@ ALLOWED_PRIVATE_SUBNET_CIDR = {'ipv4': [], 'ipv6': []}
|
||||
# prior to the password expiration.
|
||||
# Once the password expires keystone will deny the access and users must
|
||||
# contact an admin to change their password.
|
||||
#PASSWORD_EXPIRES_WARNING_THRESHOLD_DAYS = 0
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# Custom WRS settings
|
||||
import configss
|
||||
|
||||
# PASSWORD_EXPIRES_WARNING_THRESHOLD_DAYS = 0
|
||||
ALLOWED_HOSTS = ["*"]
|
||||
|
||||
|
||||
HORIZON_CONFIG["password_autocomplete"] = "off"
|
||||
|
||||
# The OPENSTACK_HEAT_STACK settings can be used to disable password
|
||||
@ -921,7 +910,7 @@ OPENSTACK_HEAT_STACK = {
|
||||
|
||||
OPENSTACK_HOST = "controller"
|
||||
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
|
||||
OPENSTACK_API_VERSIONS={"identity":3}
|
||||
OPENSTACK_API_VERSIONS = {"identity": 3}
|
||||
|
||||
OPENSTACK_NEUTRON_NETWORK['enable_distributed_router'] = True
|
||||
|
||||
@ -936,7 +925,7 @@ try:
|
||||
|
||||
OPENSTACK_HOST = configss.CONFSS['shared_services']['openstack_host']
|
||||
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
|
||||
AVAILABLE_REGIONS = [(OPENSTACK_KEYSTONE_URL, configss.CONFSS['shared_services']['region_name']),]
|
||||
AVAILABLE_REGIONS = [(OPENSTACK_KEYSTONE_URL, configss.CONFSS['shared_services']['region_name'])]
|
||||
REGION_NAME = configss.CONFSS['shared_services']['region_name']
|
||||
SS_ENABLED = "True"
|
||||
else:
|
||||
@ -960,8 +949,8 @@ except Exception:
|
||||
|
||||
# check if it is in distributed cloud
|
||||
DC_MODE = False
|
||||
if distributed_cloud_role and distributed_cloud_role in [ 'systemcontroller', 'subcloud']:
|
||||
DC_MODE= True
|
||||
if distributed_cloud_role and distributed_cloud_role in ['systemcontroller', 'subcloud']:
|
||||
DC_MODE = True
|
||||
|
||||
OPENSTACK_ENDPOINT_TYPE = "internalURL"
|
||||
|
||||
@ -981,19 +970,17 @@ POLICY_FILES_PATH = "/etc/openstack-dashboard"
|
||||
# Settings for OperationLogMiddleware
|
||||
OPERATION_LOG_ENABLED = True
|
||||
OPERATION_LOG_OPTIONS = {
|
||||
'mask_fields': ['password', 'bm_password', 'bm_confirm_password',
|
||||
'current_password', 'confirm_password', 'new_password'],
|
||||
'target_methods': ['POST', 'PUT', 'DELETE'],
|
||||
'format': ("[%(project_name)s %(project_id)s] [%(user_name)s %(user_id)s]"
|
||||
" [%(method)s %(request_url)s %(http_status)s]"
|
||||
" parameters:[%(param)s] message:[%(message)s]"),
|
||||
'mask_fields': ['password', 'bm_password', 'bm_confirm_password',
|
||||
'current_password', 'confirm_password', 'new_password'],
|
||||
'target_methods': ['POST', 'PUT', 'DELETE'],
|
||||
'format': ("[%(project_name)s %(project_id)s] [%(user_name)s %(user_id)s]"
|
||||
" [%(method)s %(request_url)s %(http_status)s]"
|
||||
" parameters:[%(param)s] message:[%(message)s]"),
|
||||
}
|
||||
|
||||
|
||||
# Wind River CGCS Branding Settings
|
||||
SITE_BRANDING = "Akraino Edge Stack"
|
||||
# To be deprecated
|
||||
HORIZON_CONFIG["help_url"] = "http://www.windriver.com/support/"
|
||||
SITE_BRANDING = "StarlingX"
|
||||
|
||||
# Note (Eddie Ramirez): The theme name will be updated after r0
|
||||
AVAILABLE_THEMES = [
|
||||
@ -1005,7 +992,10 @@ DEFAULT_THEME = 'titanium'
|
||||
|
||||
for root, dirs, files in os.walk('/opt/branding/applied'):
|
||||
if 'manifest.py' in files:
|
||||
execfile(os.path.join(root, 'manifest.py'))
|
||||
with open(os.path.join(root, 'manifest.py')) as f:
|
||||
code = compile(f.read(), os.path.join(root, 'manifest.py'), 'exec')
|
||||
exec(code)
|
||||
|
||||
AVAILABLE_THEMES = [
|
||||
('default', 'Default', 'themes/default'),
|
||||
('material', 'Material', 'themes/material'),
|
||||
@ -1027,11 +1017,11 @@ try:
|
||||
if os.path.exists('/etc/openstack-dashboard/horizon-config.ini'):
|
||||
if not configss.CONFSS or 'horizon_params' not in configss.CONFSS:
|
||||
configss.load('/etc/openstack-dashboard/horizon-config.ini')
|
||||
|
||||
|
||||
if configss.CONFSS['horizon_params']['https_enabled'] == 'true':
|
||||
CSRF_COOKIE_SECURE = True
|
||||
SESSION_COOKIE_SECURE = True
|
||||
|
||||
|
||||
if configss.CONFSS['auth']['lockout_period']:
|
||||
LOCKOUT_PERIOD_SEC = float(configss.CONFSS['auth']['lockout_period'])
|
||||
if configss.CONFSS['auth']['lockout_retries']:
|
||||
@ -1192,6 +1182,3 @@ LOGGING = {
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
@ -212,9 +212,6 @@ install -p -D -m 755 tools/sample_data.sh %{buildroot}%{_datadir}/keystone/sampl
|
||||
# Install apache configuration files
|
||||
install -p -D -m 644 httpd/wsgi-keystone.conf %{buildroot}%{_datadir}/keystone/
|
||||
|
||||
# WRS: install policy rules
|
||||
install -p -D -m 640 etc/policy.wrs.json %{buildroot}%{_sysconfdir}/keystone/policy.json
|
||||
|
||||
# WRS install keystone cron script
|
||||
install -p -D -m 755 %{SOURCE101} %{buildroot}%{_bindir}/keystone-fernet-keys-rotate-active
|
||||
|
||||
@ -282,7 +279,6 @@ exit 0
|
||||
%config(noreplace) %attr(0640, root, keystone) %{_sysconfdir}/keystone/logging.conf
|
||||
%config(noreplace) %attr(0640, root, keystone) %{_sysconfdir}/keystone/default_catalog.templates
|
||||
%config(noreplace) %attr(0640, keystone, keystone) %{_sysconfdir}/keystone/keystone.policy.yaml
|
||||
%config(noreplace) %attr(0640, keystone, keystone) %{_sysconfdir}/keystone/policy.json
|
||||
%config(noreplace) %attr(0640, keystone, keystone) %{_sysconfdir}/keystone/sso_callback_template.html
|
||||
# WRS: add password rules configuration
|
||||
%attr(0440, root, keystone) %{_sysconfdir}/keystone/password-rules.conf
|
||||
|
@ -11,6 +11,7 @@ if command is None:
|
||||
|
||||
syslog.openlog('nova_migration_wrapper')
|
||||
|
||||
|
||||
def allow_command(user, args):
|
||||
syslog.syslog(syslog.LOG_INFO, "Allowing connection='{}' command={} ".format(
|
||||
ssh_connection,
|
||||
@ -18,6 +19,7 @@ def allow_command(user, args):
|
||||
))
|
||||
os.execlp('sudo', 'sudo', '-u', user, *args)
|
||||
|
||||
|
||||
def deny_command(args):
|
||||
syslog.syslog(syslog.LOG_ERR, "Denying connection='{}' command={}".format(
|
||||
ssh_connection,
|
||||
@ -26,13 +28,14 @@ def deny_command(args):
|
||||
sys.stderr.write('Forbidden\n')
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
# Handle libvirt ssh tunnel script snippet
|
||||
# https://github.com/libvirt/libvirt/blob/f0803dae93d62a4b8a2f67f4873c290a76d978b3/src/rpc/virnetsocket.c#L890
|
||||
libvirt_sock = '/var/run/libvirt/libvirt-sock'
|
||||
live_migration_tunnel_cmd = "sh -c 'if 'nc' -q 2>&1 | grep \"requires an argument\" >/dev/null 2>&1; then " \
|
||||
"ARG=-q0;" \
|
||||
" ARG=-q0;" \
|
||||
"else " \
|
||||
"ARG=;" \
|
||||
" ARG=;" \
|
||||
"fi;" \
|
||||
"'nc' $ARG -U {}'".format(libvirt_sock)
|
||||
|
||||
@ -49,11 +52,13 @@ cold_migration_cmds = [
|
||||
]
|
||||
rootwrap_args = ['/usr/bin/nova-rootwrap', '/etc/nova/migration/rootwrap.conf']
|
||||
|
||||
|
||||
def validate_cold_migration_cmd(args):
|
||||
target_path = os.path.normpath(args[-1])
|
||||
cmd = args[:-1]
|
||||
return cmd in cold_migration_cmds and target_path.startswith(cold_migration_root)
|
||||
|
||||
|
||||
# Rules
|
||||
args = command.split(' ')
|
||||
if command == live_migration_tunnel_cmd:
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/python2
|
||||
#PBR Generated from u'wsgi_scripts'
|
||||
# PBR Generated from u'wsgi_scripts'
|
||||
|
||||
import threading
|
||||
|
||||
@ -61,4 +61,3 @@ else:
|
||||
with app_lock:
|
||||
if application is None:
|
||||
application = init_application()
|
||||
|
||||
|
@ -16,7 +16,9 @@ import logging
|
||||
|
||||
|
||||
# logger
|
||||
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
||||
|
||||
logging.basicConfig(level=logging.DEBUG,
|
||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
||||
logging.getLogger('multiprocessing').setLevel(logging.DEBUG)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -82,6 +84,7 @@ def get_l3_cache_allocation_info():
|
||||
_L3_CACHE[cache]['num_cbm_bits'] = None
|
||||
return _L3_CACHE
|
||||
|
||||
|
||||
def get_l3_cache_allocation_schemata(uuid=None):
|
||||
"""Get resctrl L3 cache allocation technology schemata CBM corresponding
|
||||
to instance uuid, or the default schemata if uuid not provided.
|
||||
@ -136,6 +139,7 @@ def get_l3_cache_allocation_schemata(uuid=None):
|
||||
|
||||
return schemata
|
||||
|
||||
|
||||
def get_all_l3_schemata():
|
||||
"""Get L3 CLOS schemata CBM for all resctrl uuids.
|
||||
:param: None
|
||||
@ -238,8 +242,8 @@ def print_all_instance_schematas(l3_info=None, default_schemata=None, schematas=
|
||||
closids_used = 1 + len(schematas)
|
||||
|
||||
print('%6s %4s : %*s : %8s : %20s : %4s : %s'
|
||||
% ('cache', 'bank', uuid_len, 'uuid',
|
||||
'CBM', 'bitarray', 'size', 'setbits'))
|
||||
% ('cache', 'bank', uuid_len, 'uuid',
|
||||
'CBM', 'bitarray', 'size', 'setbits'))
|
||||
for cache_type in cache_types:
|
||||
for bank in banks:
|
||||
default_s = hextoset(mask=default_schemata[cache_type][bank])
|
||||
@ -247,12 +251,12 @@ def print_all_instance_schematas(l3_info=None, default_schemata=None, schematas=
|
||||
default_d = int(default_h, 16)
|
||||
name = 'default'
|
||||
print('%6s %4d : %*s : %08x : %s : %4d : %s'
|
||||
% (cache_type, bank, uuid_len, name, default_d,
|
||||
format(default_d, '020b'), bin(default_d).count('1'),
|
||||
list_to_range(input_list=default_s)))
|
||||
% (cache_type, bank, uuid_len, name, default_d,
|
||||
format(default_d, '020b'), bin(default_d).count('1'),
|
||||
list_to_range(input_list=default_s)))
|
||||
|
||||
for name, schemata in sorted(schematas.items(),
|
||||
key=lambda x: msb(int(x[1][cache_type][bank], 16))):
|
||||
key=lambda x: msb(int(x[1][cache_type][bank], 16))):
|
||||
|
||||
if schemata[cache_type][bank] == cbm_mask:
|
||||
cbm_s = set()
|
||||
@ -261,11 +265,12 @@ def print_all_instance_schematas(l3_info=None, default_schemata=None, schematas=
|
||||
cbm_h = settohex(setbits=cbm_s)
|
||||
cbm_d = int(cbm_h, 16)
|
||||
print('%6s %4d : %s : %08x : %s : %4d : %s'
|
||||
% (cache_type, bank, name, cbm_d,
|
||||
format(cbm_d, '020b'), bin(cbm_d).count('1'),
|
||||
list_to_range(input_list=cbm_s) or '-'))
|
||||
% (cache_type, bank, name, cbm_d,
|
||||
format(cbm_d, '020b'), bin(cbm_d).count('1'),
|
||||
list_to_range(input_list=cbm_s) or '-'))
|
||||
print('CLOSIDS/type: %d total, %d used' % (closids_total, closids_used))
|
||||
|
||||
|
||||
def main():
|
||||
l3_info = get_l3_cache_allocation_info()
|
||||
if not _L3_RESCTRL_SUPPORT:
|
||||
@ -276,6 +281,7 @@ def main():
|
||||
default_schemata=default_schemata,
|
||||
schematas=schematas)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
sys.exit(0)
|
||||
|
@ -38,7 +38,7 @@ find /mnt/huge-1048576kB/|xargs ls -ld >> ${logfile} 2>> ${logfile}
|
||||
|
||||
echo "Locked smaps" >> ${logfile}
|
||||
echo "------------" >> ${logfile}
|
||||
grep Locked: /proc/*/smaps 2>/dev/null| awk '($2 > 0) {a[$1]+=$2} END {for (i in a) print i,a[i]/1024.0, "MiB";}' >> ${logfile} 2>> ${logfile}
|
||||
grep Locked: /proc/*/smaps 2>/dev/null | awk '($2 > 0) {a[$1]+=$2} END {for (i in a) print i,a[i]/1024.0, "MiB";}' >> ${logfile} 2>> ${logfile}
|
||||
|
||||
date '+%F %T' >> ${logfile} 2>> ${logfile}
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
bashate >= 0.2
|
||||
PyYAML >= 3.1.0
|
||||
yamllint >= 0.5.2
|
||||
flake8 >= 2.5.4 # MIT
|
||||
|
32
tox.ini
32
tox.ini
@ -1,5 +1,5 @@
|
||||
[tox]
|
||||
envlist = linters
|
||||
envlist = linters,pep8
|
||||
minversion = 2.3
|
||||
skipsdist = True
|
||||
|
||||
@ -14,6 +14,8 @@ deps = -r{toxinidir}/test-requirements.txt
|
||||
|
||||
[testenv:linters]
|
||||
whitelist_externals = bash
|
||||
#bashate ignore
|
||||
#E006 Line too long
|
||||
commands =
|
||||
bash -c "find {toxinidir} \
|
||||
-not \( -type d -name .?\* -prune \) \
|
||||
@ -21,20 +23,32 @@ commands =
|
||||
-not -name \*~ \
|
||||
-not -name \*.md \
|
||||
-name \*.sh \
|
||||
-print0 | xargs -0 bashate -v"
|
||||
-print0 | xargs -0 bashate -v -i E006"
|
||||
bash -c "find {toxinidir} \
|
||||
\( -name middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/* -prune \) \
|
||||
-o \( -name .tox -prune \) \
|
||||
-o -type f -name '*.yaml' \
|
||||
\( -path '{toxinidir}/openstack/python-heat/python-heat/templates*' \
|
||||
-o -path '{toxinidir}/.tox' \) -a -prune \
|
||||
-o -name '*.yaml' \
|
||||
-print0 | xargs -0 yamllint"
|
||||
|
||||
[testenv:pep8]
|
||||
usedevelop = False
|
||||
skip_install = True
|
||||
deps =
|
||||
pep8
|
||||
description =
|
||||
Run style checks.
|
||||
|
||||
|
||||
commands =
|
||||
pep8
|
||||
flake8
|
||||
|
||||
|
||||
[flake8]
|
||||
# E123, E125 skipped as they are invalid PEP-8.
|
||||
# E501 skipped because some of the code files include templates
|
||||
# that end up quite wide
|
||||
# H405: multi line docstring summary not separated with an empty line
|
||||
show-source = True
|
||||
ignore = E123,E125,E501,H405
|
||||
exclude = .venv,.git,.tox,dist,doc,*lib/python*,*egg,build,release-tag-*
|
||||
|
||||
|
||||
[testenv:venv]
|
||||
commands = {posargs}
|
||||
|
Loading…
x
Reference in New Issue
Block a user