SysInv Decoupling: Create Inventory Service
Create host inventory services (api, conductor and agent) and python-inventoryclient. The inventory service collects the host resources and provides a REST API and client to expose the host resources. Create plugin for integration with system configuration (sysinv) service. This is the initial inventory service infratructure commit. Puppet configuration, SM integration and host integration with sysinv(systemconfig) changes are pending and planned to be delivered in future commits. Tests Performed: Verify the changes are inert on config_controller installation and provisioning. Puppet and spec changes are required in order to create keystone, database and activate inventory services. Unit tests performed (when puppet configuration for keystone, database is applied): Trigger host configure_check, configure signals into systemconfig(sysinv). Verify python-inventoryclient and api service: Disks and related storage resources are pending. inventory host-cpu-list/show inventory host-device-list/show/modify inventory host-ethernetport-list/show inventory host-lldp-neighbor-list inventory host-lldp-agent-list/show inventory host-memory-list/show inventory host-node-list/show inventory host-port-list/show Tox Unit tests: inventory: pep8 python-inventoryclient: py27, pep8, cover, pylint Change-Id: I744ac0de098608c55b9356abf180cc36601cfb8d Story: 2002950 Task: 22952 Signed-off-by: John Kung <john.kung@windriver.com>
This commit is contained in:
parent
a92c543fd5
commit
bd998017d5
@ -23,3 +23,7 @@ pxe-network-installer
|
|||||||
|
|
||||||
# platform-kickstarts
|
# platform-kickstarts
|
||||||
platform-kickstarts
|
platform-kickstarts
|
||||||
|
|
||||||
|
# inventory
|
||||||
|
inventory
|
||||||
|
python-inventoryclient
|
||||||
|
@ -5,3 +5,5 @@ mtce-control
|
|||||||
mtce-storage
|
mtce-storage
|
||||||
installer/pxe-network-installer
|
installer/pxe-network-installer
|
||||||
kickstart
|
kickstart
|
||||||
|
inventory
|
||||||
|
python-inventoryclient
|
||||||
|
13
inventory/PKG-INFO
Normal file
13
inventory/PKG-INFO
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
Metadata-Version: 1.1
|
||||||
|
Name: inventory
|
||||||
|
Version: 1.0
|
||||||
|
Summary: Inventory
|
||||||
|
Home-page: https://wiki.openstack.org/wiki/StarlingX
|
||||||
|
Author: StarlingX
|
||||||
|
Author-email: starlingx-discuss@lists.starlingx.io
|
||||||
|
License: Apache-2.0
|
||||||
|
|
||||||
|
Description: Inventory Service
|
||||||
|
|
||||||
|
|
||||||
|
Platform: UNKNOWN
|
2
inventory/centos/build_srpm.data
Normal file
2
inventory/centos/build_srpm.data
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
SRC_DIR="inventory"
|
||||||
|
TIS_PATCH_VER=1
|
195
inventory/centos/inventory.spec
Normal file
195
inventory/centos/inventory.spec
Normal file
@ -0,0 +1,195 @@
|
|||||||
|
Summary: Inventory
|
||||||
|
Name: inventory
|
||||||
|
Version: 1.0
|
||||||
|
Release: %{tis_patch_ver}%{?_tis_dist}
|
||||||
|
License: Apache-2.0
|
||||||
|
Group: base
|
||||||
|
Packager: Wind River <info@windriver.com>
|
||||||
|
URL: unknown
|
||||||
|
Source0: %{name}-%{version}.tar.gz
|
||||||
|
|
||||||
|
BuildRequires: cgts-client
|
||||||
|
BuildRequires: python-setuptools
|
||||||
|
BuildRequires: python-jsonpatch
|
||||||
|
BuildRequires: python-keystoneauth1
|
||||||
|
BuildRequires: python-keystonemiddleware
|
||||||
|
BuildRequires: python-mock
|
||||||
|
BuildRequires: python-neutronclient
|
||||||
|
BuildRequires: python-oslo-concurrency
|
||||||
|
BuildRequires: python-oslo-config
|
||||||
|
BuildRequires: python-oslo-context
|
||||||
|
BuildRequires: python-oslo-db
|
||||||
|
BuildRequires: python-oslo-db-tests
|
||||||
|
BuildRequires: python-oslo-i18n
|
||||||
|
BuildRequires: python-oslo-log
|
||||||
|
BuildRequires: python-oslo-messaging
|
||||||
|
BuildRequires: python-oslo-middleware
|
||||||
|
BuildRequires: python-oslo-policy
|
||||||
|
BuildRequires: python-oslo-rootwrap
|
||||||
|
BuildRequires: python-oslo-serialization
|
||||||
|
BuildRequires: python-oslo-service
|
||||||
|
BuildRequires: python-oslo-utils
|
||||||
|
BuildRequires: python-oslo-versionedobjects
|
||||||
|
BuildRequires: python-oslotest
|
||||||
|
BuildRequires: python-osprofiler
|
||||||
|
BuildRequires: python-os-testr
|
||||||
|
BuildRequires: python-pbr
|
||||||
|
BuildRequires: python-pecan
|
||||||
|
BuildRequires: python-psutil
|
||||||
|
BuildRequires: python-requests
|
||||||
|
BuildRequires: python-retrying
|
||||||
|
BuildRequires: python-six
|
||||||
|
BuildRequires: python-sqlalchemy
|
||||||
|
BuildRequires: python-stevedore
|
||||||
|
BuildRequires: python-webob
|
||||||
|
BuildRequires: python-wsme
|
||||||
|
BuildRequires: systemd
|
||||||
|
BuildRequires: systemd-devel
|
||||||
|
|
||||||
|
|
||||||
|
Requires: python-pyudev
|
||||||
|
Requires: pyparted
|
||||||
|
Requires: python-ipaddr
|
||||||
|
Requires: python-paste
|
||||||
|
Requires: python-eventlet
|
||||||
|
Requires: python-futurist >= 0.11.0
|
||||||
|
Requires: python-jsonpatch
|
||||||
|
Requires: python-keystoneauth1 >= 3.1.0
|
||||||
|
Requires: python-keystonemiddleware >= 4.12.0
|
||||||
|
Requires: python-neutronclient >= 6.3.0
|
||||||
|
Requires: python-oslo-concurrency >= 3.8.0
|
||||||
|
Requires: python-oslo-config >= 2:4.0.0
|
||||||
|
Requires: python-oslo-context >= 2.14.0
|
||||||
|
Requires: python-oslo-db >= 4.24.0
|
||||||
|
Requires: python-oslo-i18n >= 2.1.0
|
||||||
|
Requires: python-oslo-log >= 3.22.0
|
||||||
|
Requires: python-oslo-messaging >= 5.24.2
|
||||||
|
Requires: python-oslo-middleware >= 3.27.0
|
||||||
|
Requires: python-oslo-policy >= 1.23.0
|
||||||
|
Requires: python-oslo-rootwrap >= 5.0.0
|
||||||
|
Requires: python-oslo-serialization >= 1.10.0
|
||||||
|
Requires: python-oslo-service >= 1.10.0
|
||||||
|
Requires: python-oslo-utils >= 3.20.0
|
||||||
|
Requires: python-oslo-versionedobjects >= 1.17.0
|
||||||
|
Requires: python-osprofiler >= 1.4.0
|
||||||
|
Requires: python-pbr
|
||||||
|
Requires: python-pecan
|
||||||
|
Requires: python-psutil
|
||||||
|
Requires: python-requests
|
||||||
|
Requires: python-retrying
|
||||||
|
Requires: python-six
|
||||||
|
Requires: python-sqlalchemy
|
||||||
|
Requires: python-stevedore >= 1.20.0
|
||||||
|
Requires: python-webob >= 1.7.1
|
||||||
|
Requires: python-wsme
|
||||||
|
|
||||||
|
%description
|
||||||
|
Inventory Service
|
||||||
|
|
||||||
|
%define local_bindir /usr/bin/
|
||||||
|
%define local_etc_goenabledd /etc/goenabled.d/
|
||||||
|
%define local_etc_inventory /etc/inventory/
|
||||||
|
%define local_etc_motdd /etc/motd.d/
|
||||||
|
%define pythonroot /usr/lib64/python2.7/site-packages
|
||||||
|
%define ocf_resourced /usr/lib/ocf/resource.d
|
||||||
|
|
||||||
|
%define local_etc_initd /etc/init.d/
|
||||||
|
%define local_etc_pmond /etc/pmon.d/
|
||||||
|
|
||||||
|
%define debug_package %{nil}
|
||||||
|
|
||||||
|
%prep
|
||||||
|
%setup
|
||||||
|
|
||||||
|
# Remove bundled egg-info
|
||||||
|
rm -rf *.egg-info
|
||||||
|
|
||||||
|
%build
|
||||||
|
echo "Start inventory build"
|
||||||
|
export PBR_VERSION=%{version}
|
||||||
|
%{__python} setup.py build
|
||||||
|
PYTHONPATH=. oslo-config-generator --config-file=inventory/config-generator.conf
|
||||||
|
|
||||||
|
%install
|
||||||
|
echo "Start inventory install"
|
||||||
|
export PBR_VERSION=%{version}
|
||||||
|
%{__python} setup.py install --root=%{buildroot} \
|
||||||
|
--install-lib=%{pythonroot} \
|
||||||
|
--prefix=/usr \
|
||||||
|
--install-data=/usr/share \
|
||||||
|
--single-version-externally-managed
|
||||||
|
|
||||||
|
install -d -m 755 %{buildroot}%{local_etc_goenabledd}
|
||||||
|
install -p -D -m 755 etc/inventory/inventory_goenabled_check.sh %{buildroot}%{local_etc_goenabledd}/inventory_goenabled_check.sh
|
||||||
|
|
||||||
|
install -d -m 755 %{buildroot}%{local_etc_inventory}
|
||||||
|
install -p -D -m 755 etc/inventory/policy.json %{buildroot}%{local_etc_inventory}/policy.json
|
||||||
|
|
||||||
|
install -d -m 755 %{buildroot}%{local_etc_motdd}
|
||||||
|
install -p -D -m 755 etc/inventory/motd-system %{buildroot}%{local_etc_motdd}/10-system-config
|
||||||
|
|
||||||
|
install -m 755 -p -D scripts/inventory-api %{buildroot}/usr/lib/ocf/resource.d/platform/inventory-api
|
||||||
|
install -m 755 -p -D scripts/inventory-conductor %{buildroot}/usr/lib/ocf/resource.d/platform/inventory-conductor
|
||||||
|
|
||||||
|
install -m 644 -p -D scripts/inventory-api.service %{buildroot}%{_unitdir}/inventory-api.service
|
||||||
|
install -m 644 -p -D scripts/inventory-conductor.service %{buildroot}%{_unitdir}/inventory-conductor.service
|
||||||
|
|
||||||
|
# TODO(jkung) activate inventory-agent with puppet integration)
|
||||||
|
# install -d -m 755 %{buildroot}%{local_etc_initd}
|
||||||
|
# install -p -D -m 755 scripts/inventory-agent-initd %{buildroot}%{local_etc_initd}/inventory-agent
|
||||||
|
|
||||||
|
# install -d -m 755 %{buildroot}%{local_etc_pmond}
|
||||||
|
# install -p -D -m 644 etc/inventory/inventory-agent-pmond.conf %{buildroot}%{local_etc_pmond}/inventory-agent-pmond.conf
|
||||||
|
# install -p -D -m 644 scripts/inventory-agent.service %{buildroot}%{_unitdir}/inventory-agent.service
|
||||||
|
|
||||||
|
# Install sql migration
|
||||||
|
install -m 644 inventory/db/sqlalchemy/migrate_repo/migrate.cfg %{buildroot}%{pythonroot}/inventory/db/sqlalchemy/migrate_repo/migrate.cfg
|
||||||
|
|
||||||
|
# install default config files
|
||||||
|
cd %{_builddir}/%{name}-%{version} && oslo-config-generator --config-file inventory/config-generator.conf --output-file %{_builddir}/%{name}-%{version}/inventory.conf.sample
|
||||||
|
# install -p -D -m 644 %{_builddir}/%{name}-%{version}/inventory.conf.sample %{buildroot}%{_sysconfdir}/inventory/inventory.conf
|
||||||
|
|
||||||
|
|
||||||
|
# TODO(jkung) activate inventory-agent
|
||||||
|
# %post
|
||||||
|
# /usr/bin/systemctl enable inventory-agent.service >/dev/null 2>&1
|
||||||
|
|
||||||
|
|
||||||
|
%clean
|
||||||
|
echo "CLEAN CALLED"
|
||||||
|
rm -rf $RPM_BUILD_ROOT
|
||||||
|
|
||||||
|
%files
|
||||||
|
%defattr(-,root,root,-)
|
||||||
|
%doc LICENSE
|
||||||
|
|
||||||
|
%{local_bindir}/*
|
||||||
|
|
||||||
|
%{pythonroot}/%{name}
|
||||||
|
|
||||||
|
%{pythonroot}/%{name}-%{version}*.egg-info
|
||||||
|
|
||||||
|
%{local_etc_goenabledd}/*
|
||||||
|
|
||||||
|
%{local_etc_inventory}/*
|
||||||
|
|
||||||
|
%{local_etc_motdd}/*
|
||||||
|
|
||||||
|
# SM OCF Start/Stop/Monitor Scripts
|
||||||
|
%{ocf_resourced}/platform/inventory-api
|
||||||
|
%{ocf_resourced}/platform/inventory-conductor
|
||||||
|
|
||||||
|
# systemctl service files
|
||||||
|
%{_unitdir}/inventory-api.service
|
||||||
|
%{_unitdir}/inventory-conductor.service
|
||||||
|
|
||||||
|
# %{_bindir}/inventory-agent
|
||||||
|
%{_bindir}/inventory-api
|
||||||
|
%{_bindir}/inventory-conductor
|
||||||
|
%{_bindir}/inventory-dbsync
|
||||||
|
%{_bindir}/inventory-dnsmasq-lease-update
|
||||||
|
|
||||||
|
# inventory-agent files
|
||||||
|
# %{local_etc_initd}/inventory-agent
|
||||||
|
# %{local_etc_pmond}/inventory-agent-pmond.conf
|
||||||
|
# %{_unitdir}/inventory-agent.service
|
6
inventory/inventory/.coveragerc
Normal file
6
inventory/inventory/.coveragerc
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
[run]
|
||||||
|
branch = True
|
||||||
|
source = inventory
|
||||||
|
|
||||||
|
[report]
|
||||||
|
ignore_errors = True
|
59
inventory/inventory/.gitignore
vendored
Normal file
59
inventory/inventory/.gitignore
vendored
Normal file
@ -0,0 +1,59 @@
|
|||||||
|
*.py[cod]
|
||||||
|
|
||||||
|
# C extensions
|
||||||
|
*.so
|
||||||
|
|
||||||
|
# Packages
|
||||||
|
*.egg*
|
||||||
|
*.egg-info
|
||||||
|
dist
|
||||||
|
build
|
||||||
|
eggs
|
||||||
|
parts
|
||||||
|
bin
|
||||||
|
var
|
||||||
|
sdist
|
||||||
|
develop-eggs
|
||||||
|
.installed.cfg
|
||||||
|
lib
|
||||||
|
lib64
|
||||||
|
|
||||||
|
# Installer logs
|
||||||
|
pip-log.txt
|
||||||
|
|
||||||
|
# Unit test / coverage reports
|
||||||
|
cover/
|
||||||
|
.coverage*
|
||||||
|
!.coveragerc
|
||||||
|
.tox
|
||||||
|
nosetests.xml
|
||||||
|
.testrepository
|
||||||
|
.stestr
|
||||||
|
.venv
|
||||||
|
|
||||||
|
# Translations
|
||||||
|
*.mo
|
||||||
|
|
||||||
|
# Mr Developer
|
||||||
|
.mr.developer.cfg
|
||||||
|
.project
|
||||||
|
.pydevproject
|
||||||
|
|
||||||
|
# Complexity
|
||||||
|
output/*.html
|
||||||
|
output/*/index.html
|
||||||
|
|
||||||
|
# Sphinx
|
||||||
|
doc/build
|
||||||
|
|
||||||
|
# pbr generates these
|
||||||
|
AUTHORS
|
||||||
|
ChangeLog
|
||||||
|
|
||||||
|
# Editors
|
||||||
|
*~
|
||||||
|
.*.swp
|
||||||
|
.*sw?
|
||||||
|
|
||||||
|
# Files created by releasenotes build
|
||||||
|
releasenotes/build
|
3
inventory/inventory/.mailmap
Normal file
3
inventory/inventory/.mailmap
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
# Format is:
|
||||||
|
# <preferred e-mail> <other e-mail 1>
|
||||||
|
# <preferred e-mail> <other e-mail 2>
|
3
inventory/inventory/.stestr.conf
Normal file
3
inventory/inventory/.stestr.conf
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
[DEFAULT]
|
||||||
|
test_path=./inventory/tests
|
||||||
|
top_dir=./
|
19
inventory/inventory/CONTRIBUTING.rst
Normal file
19
inventory/inventory/CONTRIBUTING.rst
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
If you would like to contribute to the development of StarlingX, you must
|
||||||
|
follow the steps in this page:
|
||||||
|
|
||||||
|
https://wiki.openstack.org/wiki/StarlingX/Contribution_Guidelines
|
||||||
|
|
||||||
|
If you already have a good understanding of how the system works and your
|
||||||
|
StarlingX accounts are set up, you can skip to the development workflow
|
||||||
|
section of this documentation to learn how changes to StarlingX should be
|
||||||
|
submitted for review via the Gerrit tool:
|
||||||
|
|
||||||
|
http://docs.openstack.org/infra/manual/developers.html#development-workflow
|
||||||
|
|
||||||
|
Pull requests submitted through GitHub will be ignored.
|
||||||
|
|
||||||
|
Bugs should be filed on Launchpad:
|
||||||
|
https://bugs.launchpad.net/starlingx
|
||||||
|
|
||||||
|
Storyboard:
|
||||||
|
https://storyboard.openstack.org/#!/story/2002950
|
4
inventory/inventory/HACKING.rst
Normal file
4
inventory/inventory/HACKING.rst
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
inventory Style Commandments
|
||||||
|
============================
|
||||||
|
|
||||||
|
Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/
|
176
inventory/inventory/LICENSE
Normal file
176
inventory/inventory/LICENSE
Normal file
@ -0,0 +1,176 @@
|
|||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
3
inventory/inventory/README.rst
Normal file
3
inventory/inventory/README.rst
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
Placeholder to allow setup.py to work.
|
||||||
|
Removing this requires modifying the
|
||||||
|
setup.py manifest.
|
2
inventory/inventory/babel.cfg
Normal file
2
inventory/inventory/babel.cfg
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
[python: **.py]
|
||||||
|
|
4
inventory/inventory/doc/requirements.txt
Normal file
4
inventory/inventory/doc/requirements.txt
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
sphinx!=1.6.6,!=1.6.7,>=1.6.2 # BSD
|
||||||
|
openstackdocstheme>=1.18.1 # Apache-2.0
|
||||||
|
# releasenotes
|
||||||
|
reno>=2.5.0 # Apache-2.0
|
5
inventory/inventory/doc/source/admin/index.rst
Normal file
5
inventory/inventory/doc/source/admin/index.rst
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
====================
|
||||||
|
Administrators guide
|
||||||
|
====================
|
||||||
|
|
||||||
|
Administrators guide of inventory.
|
5
inventory/inventory/doc/source/cli/index.rst
Normal file
5
inventory/inventory/doc/source/cli/index.rst
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
================================
|
||||||
|
Command line interface reference
|
||||||
|
================================
|
||||||
|
|
||||||
|
CLI reference of inventory.
|
82
inventory/inventory/doc/source/conf.py
Executable file
82
inventory/inventory/doc/source/conf.py
Executable file
@ -0,0 +1,82 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
sys.path.insert(0, os.path.abspath('../..'))
|
||||||
|
# -- General configuration ----------------------------------------------------
|
||||||
|
|
||||||
|
# Add any Sphinx extension module names here, as strings. They can be
|
||||||
|
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
||||||
|
extensions = [
|
||||||
|
'sphinx.ext.autodoc',
|
||||||
|
'openstackdocstheme',
|
||||||
|
#'sphinx.ext.intersphinx',
|
||||||
|
]
|
||||||
|
|
||||||
|
# autodoc generation is a bit aggressive and a nuisance when doing heavy
|
||||||
|
# text edit cycles.
|
||||||
|
# execute "export SPHINX_DEBUG=1" in your terminal to disable
|
||||||
|
|
||||||
|
# The suffix of source filenames.
|
||||||
|
source_suffix = '.rst'
|
||||||
|
|
||||||
|
# The master toctree document.
|
||||||
|
master_doc = 'index'
|
||||||
|
|
||||||
|
# General information about the project.
|
||||||
|
project = u'inventory'
|
||||||
|
copyright = u'2018, StarlingX'
|
||||||
|
|
||||||
|
# openstackdocstheme options
|
||||||
|
repository_name = 'stx-metal'
|
||||||
|
bug_project = '22952'
|
||||||
|
bug_tag = ''
|
||||||
|
html_last_updated_fmt = '%Y-%m-%d %H:%M'
|
||||||
|
|
||||||
|
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||||
|
add_function_parentheses = True
|
||||||
|
|
||||||
|
# If true, the current module name will be prepended to all description
|
||||||
|
# unit titles (such as .. function::).
|
||||||
|
add_module_names = True
|
||||||
|
|
||||||
|
# The name of the Pygments (syntax highlighting) style to use.
|
||||||
|
pygments_style = 'sphinx'
|
||||||
|
|
||||||
|
# -- Options for HTML output --------------------------------------------------
|
||||||
|
|
||||||
|
# The theme to use for HTML and HTML Help pages. Major themes that come with
|
||||||
|
# Sphinx are currently 'default' and 'sphinxdoc'.
|
||||||
|
# html_theme_path = ["."]
|
||||||
|
# html_theme = '_theme'
|
||||||
|
# html_static_path = ['static']
|
||||||
|
html_theme = 'starlingxdocs'
|
||||||
|
|
||||||
|
# Output file base name for HTML help builder.
|
||||||
|
htmlhelp_basename = '%sdoc' % project
|
||||||
|
|
||||||
|
# Grouping the document tree into LaTeX files. List of tuples
|
||||||
|
# (source start file, target name, title, author, documentclass
|
||||||
|
# [howto/manual]).
|
||||||
|
latex_documents = [
|
||||||
|
('index',
|
||||||
|
'%s.tex' % project,
|
||||||
|
u'%s Documentation' % project,
|
||||||
|
u'OpenStack Developers', 'manual'),
|
||||||
|
]
|
||||||
|
|
||||||
|
# Example configuration for intersphinx: refer to the Python standard library.
|
||||||
|
#intersphinx_mapping = {'http://docs.python.org/': None}
|
5
inventory/inventory/doc/source/configuration/index.rst
Normal file
5
inventory/inventory/doc/source/configuration/index.rst
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
=============
|
||||||
|
Configuration
|
||||||
|
=============
|
||||||
|
|
||||||
|
Configuration of inventory.
|
@ -0,0 +1,4 @@
|
|||||||
|
============
|
||||||
|
Contributing
|
||||||
|
============
|
||||||
|
.. include:: ../../../CONTRIBUTING.rst
|
9
inventory/inventory/doc/source/contributor/index.rst
Normal file
9
inventory/inventory/doc/source/contributor/index.rst
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
=========================
|
||||||
|
Contributor Documentation
|
||||||
|
=========================
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
:maxdepth: 2
|
||||||
|
|
||||||
|
contributing
|
||||||
|
|
30
inventory/inventory/doc/source/index.rst
Normal file
30
inventory/inventory/doc/source/index.rst
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
.. inventory documentation master file, created by
|
||||||
|
sphinx-quickstart on Tue Jul 9 22:26:36 2013.
|
||||||
|
You can adapt this file completely to your liking, but it should at least
|
||||||
|
contain the root `toctree` directive.
|
||||||
|
|
||||||
|
=========================================
|
||||||
|
Welcome to the documentation of inventory
|
||||||
|
=========================================
|
||||||
|
|
||||||
|
Contents:
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
:maxdepth: 2
|
||||||
|
|
||||||
|
readme
|
||||||
|
install/index
|
||||||
|
library/index
|
||||||
|
contributor/index
|
||||||
|
configuration/index
|
||||||
|
cli/index
|
||||||
|
user/index
|
||||||
|
admin/index
|
||||||
|
reference/index
|
||||||
|
|
||||||
|
Indices and tables
|
||||||
|
==================
|
||||||
|
|
||||||
|
* :ref:`genindex`
|
||||||
|
* :ref:`modindex`
|
||||||
|
* :ref:`search`
|
10
inventory/inventory/doc/source/install/common_configure.rst
Normal file
10
inventory/inventory/doc/source/install/common_configure.rst
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
2. Edit the ``/etc/inventory/inventory.conf`` file and complete the following
|
||||||
|
actions:
|
||||||
|
|
||||||
|
* In the ``[database]`` section, configure database access:
|
||||||
|
|
||||||
|
.. code-block:: ini
|
||||||
|
|
||||||
|
[database]
|
||||||
|
...
|
||||||
|
connection = mysql+pymysql://inventory:INVENTORY_DBPASS@controller/inventory
|
@ -0,0 +1,75 @@
|
|||||||
|
Prerequisites
|
||||||
|
-------------
|
||||||
|
|
||||||
|
Before you install and configure the inventory service,
|
||||||
|
you must create a database, service credentials, and API endpoints.
|
||||||
|
|
||||||
|
#. To create the database, complete these steps:
|
||||||
|
|
||||||
|
* Use the database access client to connect to the database
|
||||||
|
server as the ``root`` user:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
$ mysql -u root -p
|
||||||
|
|
||||||
|
* Create the ``inventory`` database:
|
||||||
|
|
||||||
|
.. code-block:: none
|
||||||
|
|
||||||
|
CREATE DATABASE inventory;
|
||||||
|
|
||||||
|
* Grant proper access to the ``inventory`` database:
|
||||||
|
|
||||||
|
.. code-block:: none
|
||||||
|
|
||||||
|
GRANT ALL PRIVILEGES ON inventory.* TO 'inventory'@'localhost' \
|
||||||
|
IDENTIFIED BY 'INVENTORY_DBPASS';
|
||||||
|
GRANT ALL PRIVILEGES ON inventory.* TO 'inventory'@'%' \
|
||||||
|
IDENTIFIED BY 'INVENTORY_DBPASS';
|
||||||
|
|
||||||
|
Replace ``INVENTORY_DBPASS`` with a suitable password.
|
||||||
|
|
||||||
|
* Exit the database access client.
|
||||||
|
|
||||||
|
.. code-block:: none
|
||||||
|
|
||||||
|
exit;
|
||||||
|
|
||||||
|
#. Source the ``admin`` credentials to gain access to
|
||||||
|
admin-only CLI commands:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
$ . admin-openrc
|
||||||
|
|
||||||
|
#. To create the service credentials, complete these steps:
|
||||||
|
|
||||||
|
* Create the ``inventory`` user:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
$ openstack user create --domain default --password-prompt inventory
|
||||||
|
|
||||||
|
* Add the ``admin`` role to the ``inventory`` user:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
$ openstack role add --project service --user inventory admin
|
||||||
|
|
||||||
|
* Create the inventory service entities:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
$ openstack service create --name inventory --description "inventory" inventory
|
||||||
|
|
||||||
|
#. Create the inventory service API endpoints:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
$ openstack endpoint create --region RegionOne \
|
||||||
|
inventory public http://controller:XXXX/vY/%\(tenant_id\)s
|
||||||
|
$ openstack endpoint create --region RegionOne \
|
||||||
|
inventory internal http://controller:XXXX/vY/%\(tenant_id\)s
|
||||||
|
$ openstack endpoint create --region RegionOne \
|
||||||
|
inventory admin http://controller:XXXX/vY/%\(tenant_id\)s
|
9
inventory/inventory/doc/source/install/get_started.rst
Normal file
9
inventory/inventory/doc/source/install/get_started.rst
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
==========================
|
||||||
|
inventory service overview
|
||||||
|
==========================
|
||||||
|
The inventory service provides host inventory of resources on the host.
|
||||||
|
|
||||||
|
The inventory service consists of the following components:
|
||||||
|
|
||||||
|
``inventory-api`` service
|
||||||
|
Accepts and responds to end user API calls...
|
17
inventory/inventory/doc/source/install/index.rst
Normal file
17
inventory/inventory/doc/source/install/index.rst
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
====================================
|
||||||
|
inventory service installation guide
|
||||||
|
====================================
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
:maxdepth: 2
|
||||||
|
|
||||||
|
get_started.rst
|
||||||
|
install.rst
|
||||||
|
verify.rst
|
||||||
|
next-steps.rst
|
||||||
|
|
||||||
|
The inventory service (inventory) provides...
|
||||||
|
|
||||||
|
This chapter assumes a working setup of StarlingX following the
|
||||||
|
`StarlingX Installation Guide
|
||||||
|
<https://docs.starlingx.io/installation_guide/index.html>`_.
|
34
inventory/inventory/doc/source/install/install-obs.rst
Normal file
34
inventory/inventory/doc/source/install/install-obs.rst
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
.. _install-obs:
|
||||||
|
|
||||||
|
|
||||||
|
Install and configure for openSUSE and SUSE Linux Enterprise
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
This section describes how to install and configure the inventory service
|
||||||
|
for openSUSE Leap 42.1 and SUSE Linux Enterprise Server 12 SP1.
|
||||||
|
|
||||||
|
.. include:: common_prerequisites.rst
|
||||||
|
|
||||||
|
Install and configure components
|
||||||
|
--------------------------------
|
||||||
|
|
||||||
|
#. Install the packages:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# zypper --quiet --non-interactive install
|
||||||
|
|
||||||
|
.. include:: common_configure.rst
|
||||||
|
|
||||||
|
|
||||||
|
Finalize installation
|
||||||
|
---------------------
|
||||||
|
|
||||||
|
Start the inventory services and configure them to start when
|
||||||
|
the system boots:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# systemctl enable openstack-inventory-api.service
|
||||||
|
|
||||||
|
# systemctl start openstack-inventory-api.service
|
33
inventory/inventory/doc/source/install/install-rdo.rst
Normal file
33
inventory/inventory/doc/source/install/install-rdo.rst
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
.. _install-rdo:
|
||||||
|
|
||||||
|
Install and configure for Red Hat Enterprise Linux and CentOS
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
|
||||||
|
This section describes how to install and configure the inventory service
|
||||||
|
for Red Hat Enterprise Linux 7 and CentOS 7.
|
||||||
|
|
||||||
|
.. include:: common_prerequisites.rst
|
||||||
|
|
||||||
|
Install and configure components
|
||||||
|
--------------------------------
|
||||||
|
|
||||||
|
#. Install the packages:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# yum install
|
||||||
|
|
||||||
|
.. include:: common_configure.rst
|
||||||
|
|
||||||
|
Finalize installation
|
||||||
|
---------------------
|
||||||
|
|
||||||
|
Start the inventory services and configure them to start when
|
||||||
|
the system boots:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# systemctl enable openstack-inventory-api.service
|
||||||
|
|
||||||
|
# systemctl start openstack-inventory-api.service
|
31
inventory/inventory/doc/source/install/install-ubuntu.rst
Normal file
31
inventory/inventory/doc/source/install/install-ubuntu.rst
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
.. _install-ubuntu:
|
||||||
|
|
||||||
|
Install and configure for Ubuntu
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
This section describes how to install and configure the inventory
|
||||||
|
service for Ubuntu 14.04 (LTS).
|
||||||
|
|
||||||
|
.. include:: common_prerequisites.rst
|
||||||
|
|
||||||
|
Install and configure components
|
||||||
|
--------------------------------
|
||||||
|
|
||||||
|
#. Install the packages:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# apt-get update
|
||||||
|
|
||||||
|
# apt-get install
|
||||||
|
|
||||||
|
.. include:: common_configure.rst
|
||||||
|
|
||||||
|
Finalize installation
|
||||||
|
---------------------
|
||||||
|
|
||||||
|
Restart the inventory services:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# service openstack-inventory-api restart
|
20
inventory/inventory/doc/source/install/install.rst
Normal file
20
inventory/inventory/doc/source/install/install.rst
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
.. _install:
|
||||||
|
|
||||||
|
Install and configure
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
This section describes how to install and configure the
|
||||||
|
inventory service, code-named inventory, on the controller node.
|
||||||
|
|
||||||
|
This section assumes that you already have a working OpenStack
|
||||||
|
environment with at least the following components installed:
|
||||||
|
.. (add the appropriate services here and further notes)
|
||||||
|
|
||||||
|
Note that installation and configuration vary by distribution.
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
:maxdepth: 2
|
||||||
|
|
||||||
|
install-obs.rst
|
||||||
|
install-rdo.rst
|
||||||
|
install-ubuntu.rst
|
9
inventory/inventory/doc/source/install/next-steps.rst
Normal file
9
inventory/inventory/doc/source/install/next-steps.rst
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
.. _next-steps:
|
||||||
|
|
||||||
|
Next steps
|
||||||
|
~~~~~~~~~~
|
||||||
|
|
||||||
|
Your OpenStack environment now includes the inventory service.
|
||||||
|
|
||||||
|
To add additional services, see
|
||||||
|
https://docs.openstack.org/project-install-guide/ocata/.
|
24
inventory/inventory/doc/source/install/verify.rst
Normal file
24
inventory/inventory/doc/source/install/verify.rst
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
.. _verify:
|
||||||
|
|
||||||
|
Verify operation
|
||||||
|
~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Verify operation of the inventory service.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
Perform these commands on the controller node.
|
||||||
|
|
||||||
|
#. Source the ``admin`` project credentials to gain access to
|
||||||
|
admin-only CLI commands:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
$ . admin-openrc
|
||||||
|
|
||||||
|
#. List service components to verify successful launch and registration
|
||||||
|
of each process:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
$ openstack inventory service list
|
7
inventory/inventory/doc/source/library/index.rst
Normal file
7
inventory/inventory/doc/source/library/index.rst
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
=====
|
||||||
|
Usage
|
||||||
|
=====
|
||||||
|
|
||||||
|
To use inventory in a project:
|
||||||
|
|
||||||
|
import inventory
|
1
inventory/inventory/doc/source/readme.rst
Normal file
1
inventory/inventory/doc/source/readme.rst
Normal file
@ -0,0 +1 @@
|
|||||||
|
.. include:: ../../README.rst
|
5
inventory/inventory/doc/source/reference/index.rst
Normal file
5
inventory/inventory/doc/source/reference/index.rst
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
==========
|
||||||
|
References
|
||||||
|
==========
|
||||||
|
|
||||||
|
References of inventory.
|
5
inventory/inventory/doc/source/user/index.rst
Normal file
5
inventory/inventory/doc/source/user/index.rst
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
===========
|
||||||
|
Users guide
|
||||||
|
===========
|
||||||
|
|
||||||
|
Users guide of inventory.
|
20
inventory/inventory/etc/inventory/delete_load.sh
Normal file
20
inventory/inventory/etc/inventory/delete_load.sh
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Copyright (c) 2015-2018 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
# This script removes a load from a controller.
|
||||||
|
# The load version is passed in as the first variable.
|
||||||
|
|
||||||
|
: ${1?"Usage $0 VERSION"}
|
||||||
|
VERSION=$1
|
||||||
|
|
||||||
|
FEED_DIR=/www/pages/feed/rel-$VERSION
|
||||||
|
|
||||||
|
rm -f /pxeboot/pxelinux.cfg.files/*-$VERSION
|
||||||
|
rm -rf /pxeboot/rel-$VERSION
|
||||||
|
|
||||||
|
rm -f /usr/sbin/pxeboot-update-$VERSION.sh
|
||||||
|
|
||||||
|
rm -rf $FEED_DIR
|
@ -0,0 +1,9 @@
|
|||||||
|
[process]
|
||||||
|
process = inventory-agent
|
||||||
|
pidfile = /var/run/inventory-agent.pid
|
||||||
|
script = /etc/init.d/inventory-agent
|
||||||
|
style = lsb ; ocf or lsb
|
||||||
|
severity = major ; minor, major, critical
|
||||||
|
restarts = 3 ; restarts before error assertion
|
||||||
|
interval = 5 ; number of seconds to wait between restarts
|
||||||
|
debounce = 20 ; number of seconds to wait before degrade clear
|
@ -0,0 +1,36 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Copyright (c) 2013-2018 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
# Inventory "goenabled" check.
|
||||||
|
# Wait for inventory information to be posted prior to allowing goenabled.
|
||||||
|
|
||||||
|
NAME=$(basename $0)
|
||||||
|
INVENTORY_READY_FLAG=/var/run/.inventory_ready
|
||||||
|
|
||||||
|
# logfile=/var/log/platform.log
|
||||||
|
|
||||||
|
function LOG {
|
||||||
|
logger "$NAME: $*"
|
||||||
|
# echo "`date "+%FT%T"`: $NAME: $*" >> $logfile
|
||||||
|
}
|
||||||
|
|
||||||
|
count=0
|
||||||
|
while [ $count -le 45 ]; do
|
||||||
|
if [ -f $INVENTORY_READY_FLAG ]; then
|
||||||
|
LOG "Inventory is ready. Passing goenabled check."
|
||||||
|
echo "Inventory goenabled iterations PASS $count"
|
||||||
|
LOG "Inventory goenabled iterations PASS $count"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
count=$(($count+1))
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "Inventory goenabled iterations FAIL $count"
|
||||||
|
|
||||||
|
LOG "Inventory is not ready. Continue."
|
||||||
|
exit 0
|
10
inventory/inventory/etc/inventory/motd-system
Normal file
10
inventory/inventory/etc/inventory/motd-system
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Copyright (c) 2013-2018 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
# update inventory MOTD if motd.system content present
|
||||||
|
|
||||||
|
[ -f /etc/inventory/motd.system ] && cat /etc/inventory/motd.system || true
|
5
inventory/inventory/etc/inventory/policy.json
Normal file
5
inventory/inventory/etc/inventory/policy.json
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
{
|
||||||
|
"admin": "role:admin or role:administrator",
|
||||||
|
"admin_api": "is_admin:True",
|
||||||
|
"default": "rule:admin_api"
|
||||||
|
}
|
11
inventory/inventory/inventory/__init__.py
Normal file
11
inventory/inventory/inventory/__init__.py
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
#
|
||||||
|
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
import pbr.version
|
||||||
|
|
||||||
|
|
||||||
|
__version__ = pbr.version.VersionInfo(
|
||||||
|
'inventory').version_string()
|
0
inventory/inventory/inventory/agent/__init__.py
Normal file
0
inventory/inventory/inventory/agent/__init__.py
Normal file
114
inventory/inventory/inventory/agent/base_manager.py
Normal file
114
inventory/inventory/inventory/agent/base_manager.py
Normal file
@ -0,0 +1,114 @@
|
|||||||
|
#
|
||||||
|
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
"""Base agent manager functionality."""
|
||||||
|
|
||||||
|
import futurist
|
||||||
|
from futurist import periodics
|
||||||
|
from futurist import rejection
|
||||||
|
import inspect
|
||||||
|
from inventory.common import exception
|
||||||
|
from inventory.common.i18n import _
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class BaseAgentManager(object):
|
||||||
|
|
||||||
|
def __init__(self, host, topic):
|
||||||
|
super(BaseAgentManager, self).__init__()
|
||||||
|
if not host:
|
||||||
|
host = cfg.CONF.host
|
||||||
|
self.host = host
|
||||||
|
self.topic = topic
|
||||||
|
self._started = False
|
||||||
|
|
||||||
|
def init_host(self, admin_context=None):
|
||||||
|
"""Initialize the agent host.
|
||||||
|
|
||||||
|
:param admin_context: the admin context to pass to periodic tasks.
|
||||||
|
:raises: RuntimeError when agent is already running.
|
||||||
|
"""
|
||||||
|
if self._started:
|
||||||
|
raise RuntimeError(_('Attempt to start an already running '
|
||||||
|
'agent manager'))
|
||||||
|
|
||||||
|
rejection_func = rejection.reject_when_reached(64)
|
||||||
|
# CONF.conductor.workers_pool_size)
|
||||||
|
self._executor = futurist.GreenThreadPoolExecutor(
|
||||||
|
64, check_and_reject=rejection_func)
|
||||||
|
# JK max_workers=CONF.conductor.workers_pool_size,
|
||||||
|
"""Executor for performing tasks async."""
|
||||||
|
|
||||||
|
# Collect driver-specific periodic tasks.
|
||||||
|
# Conductor periodic tasks accept context argument,
|
||||||
|
LOG.info('Collecting periodic tasks')
|
||||||
|
self._periodic_task_callables = []
|
||||||
|
self._collect_periodic_tasks(self, (admin_context,))
|
||||||
|
|
||||||
|
self._periodic_tasks = periodics.PeriodicWorker(
|
||||||
|
self._periodic_task_callables,
|
||||||
|
executor_factory=periodics.ExistingExecutor(self._executor))
|
||||||
|
|
||||||
|
# Start periodic tasks
|
||||||
|
self._periodic_tasks_worker = self._executor.submit(
|
||||||
|
self._periodic_tasks.start, allow_empty=True)
|
||||||
|
self._periodic_tasks_worker.add_done_callback(
|
||||||
|
self._on_periodic_tasks_stop)
|
||||||
|
|
||||||
|
self._started = True
|
||||||
|
|
||||||
|
def del_host(self, deregister=True):
|
||||||
|
# Conductor deregistration fails if called on non-initialized
|
||||||
|
# agent (e.g. when rpc server is unreachable).
|
||||||
|
if not hasattr(self, 'agent'):
|
||||||
|
return
|
||||||
|
|
||||||
|
self._periodic_tasks.stop()
|
||||||
|
self._periodic_tasks.wait()
|
||||||
|
self._executor.shutdown(wait=True)
|
||||||
|
self._started = False
|
||||||
|
|
||||||
|
def _collect_periodic_tasks(self, obj, args):
|
||||||
|
"""Collect periodic tasks from a given object.
|
||||||
|
|
||||||
|
Populates self._periodic_task_callables with tuples
|
||||||
|
(callable, args, kwargs).
|
||||||
|
|
||||||
|
:param obj: object containing periodic tasks as methods
|
||||||
|
:param args: tuple with arguments to pass to every task
|
||||||
|
"""
|
||||||
|
for name, member in inspect.getmembers(obj):
|
||||||
|
if periodics.is_periodic(member):
|
||||||
|
LOG.debug('Found periodic task %(owner)s.%(member)s',
|
||||||
|
{'owner': obj.__class__.__name__,
|
||||||
|
'member': name})
|
||||||
|
self._periodic_task_callables.append((member, args, {}))
|
||||||
|
|
||||||
|
def _on_periodic_tasks_stop(self, fut):
|
||||||
|
try:
|
||||||
|
fut.result()
|
||||||
|
except Exception as exc:
|
||||||
|
LOG.critical('Periodic tasks worker has failed: %s', exc)
|
||||||
|
else:
|
||||||
|
LOG.info('Successfully shut down periodic tasks')
|
||||||
|
|
||||||
|
def _spawn_worker(self, func, *args, **kwargs):
|
||||||
|
|
||||||
|
"""Create a greenthread to run func(*args, **kwargs).
|
||||||
|
|
||||||
|
Spawns a greenthread if there are free slots in pool, otherwise raises
|
||||||
|
exception. Execution control returns immediately to the caller.
|
||||||
|
|
||||||
|
:returns: Future object.
|
||||||
|
:raises: NoFreeConductorWorker if worker pool is currently full.
|
||||||
|
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
return self._executor.submit(func, *args, **kwargs)
|
||||||
|
except futurist.RejectedSubmission:
|
||||||
|
raise exception.NoFreeConductorWorker()
|
369
inventory/inventory/inventory/agent/disk.py
Normal file
369
inventory/inventory/inventory/agent/disk.py
Normal file
@ -0,0 +1,369 @@
|
|||||||
|
#
|
||||||
|
# Copyright (c) 2013-2018 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
|
||||||
|
""" inventory idisk Utilities and helper functions."""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import pyudev
|
||||||
|
import re
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from inventory.common import constants
|
||||||
|
from inventory.common import context
|
||||||
|
from inventory.common import utils
|
||||||
|
from inventory.conductor import rpcapi as conductor_rpcapi
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class DiskOperator(object):
|
||||||
|
'''Class to encapsulate Disk operations for System Inventory'''
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
|
||||||
|
self.num_cpus = 0
|
||||||
|
self.num_nodes = 0
|
||||||
|
self.float_cpuset = 0
|
||||||
|
self.default_hugepage_size_kB = 0
|
||||||
|
self.total_memory_MiB = 0
|
||||||
|
self.free_memory_MiB = 0
|
||||||
|
self.total_memory_nodes_MiB = []
|
||||||
|
self.free_memory_nodes_MiB = []
|
||||||
|
self.topology = {}
|
||||||
|
|
||||||
|
def convert_range_string_to_list(self, s):
|
||||||
|
olist = []
|
||||||
|
s = s.strip()
|
||||||
|
if s:
|
||||||
|
for part in s.split(','):
|
||||||
|
if '-' in part:
|
||||||
|
a, b = part.split('-')
|
||||||
|
a, b = int(a), int(b)
|
||||||
|
olist.extend(range(a, b + 1))
|
||||||
|
else:
|
||||||
|
a = int(part)
|
||||||
|
olist.append(a)
|
||||||
|
olist.sort()
|
||||||
|
return olist
|
||||||
|
|
||||||
|
def get_rootfs_node(self):
|
||||||
|
cmdline_file = '/proc/cmdline'
|
||||||
|
device = None
|
||||||
|
|
||||||
|
with open(cmdline_file, 'r') as f:
|
||||||
|
for line in f:
|
||||||
|
for param in line.split():
|
||||||
|
params = param.split("=", 1)
|
||||||
|
if params[0] == "root":
|
||||||
|
if "UUID=" in params[1]:
|
||||||
|
key, uuid = params[1].split("=")
|
||||||
|
symlink = "/dev/disk/by-uuid/%s" % uuid
|
||||||
|
device = os.path.basename(os.readlink(symlink))
|
||||||
|
else:
|
||||||
|
device = os.path.basename(params[1])
|
||||||
|
|
||||||
|
if device is not None:
|
||||||
|
if constants.DEVICE_NAME_NVME in device:
|
||||||
|
re_line = re.compile(r'^(nvme[0-9]*n[0-9]*)')
|
||||||
|
else:
|
||||||
|
re_line = re.compile(r'^(\D*)')
|
||||||
|
match = re_line.search(device)
|
||||||
|
if match:
|
||||||
|
return os.path.join("/dev", match.group(1))
|
||||||
|
|
||||||
|
return
|
||||||
|
|
||||||
|
@utils.skip_udev_partition_probe
|
||||||
|
def get_disk_available_mib(self, device_node):
|
||||||
|
# Check that partition table format is GPT.
|
||||||
|
# Return 0 if not.
|
||||||
|
if not utils.disk_is_gpt(device_node=device_node):
|
||||||
|
LOG.debug("Format of disk node %s is not GPT." % device_node)
|
||||||
|
return 0
|
||||||
|
|
||||||
|
pvs_command = '{} {}'.format('pvs | grep -w ', device_node)
|
||||||
|
pvs_process = subprocess.Popen(pvs_command, stdout=subprocess.PIPE,
|
||||||
|
shell=True)
|
||||||
|
pvs_output = pvs_process.stdout.read()
|
||||||
|
|
||||||
|
if pvs_output:
|
||||||
|
LOG.debug("Disk %s is completely used by a PV => 0 available mib."
|
||||||
|
% device_node)
|
||||||
|
return 0
|
||||||
|
|
||||||
|
# Get sector size command.
|
||||||
|
sector_size_bytes_cmd = '{} {}'.format('blockdev --getss', device_node)
|
||||||
|
|
||||||
|
# Get total free space in sectors command.
|
||||||
|
avail_space_sectors_cmd = '{} {} {}'.format(
|
||||||
|
'sgdisk -p', device_node, "| grep \"Total free space\"")
|
||||||
|
|
||||||
|
# Get the sector size.
|
||||||
|
sector_size_bytes_process = subprocess.Popen(
|
||||||
|
sector_size_bytes_cmd, stdout=subprocess.PIPE, shell=True)
|
||||||
|
sector_size_bytes = sector_size_bytes_process.stdout.read().rstrip()
|
||||||
|
|
||||||
|
# Get the free space.
|
||||||
|
avail_space_sectors_process = subprocess.Popen(
|
||||||
|
avail_space_sectors_cmd, stdout=subprocess.PIPE, shell=True)
|
||||||
|
avail_space_sectors_output = avail_space_sectors_process.stdout.read()
|
||||||
|
avail_space_sectors = re.findall(
|
||||||
|
'\d+', avail_space_sectors_output)[0].rstrip()
|
||||||
|
|
||||||
|
# Free space in MiB.
|
||||||
|
avail_space_mib = (int(sector_size_bytes) * int(avail_space_sectors) /
|
||||||
|
(1024 ** 2))
|
||||||
|
|
||||||
|
# Keep 2 MiB for partition table.
|
||||||
|
if avail_space_mib >= 2:
|
||||||
|
avail_space_mib = avail_space_mib - 2
|
||||||
|
else:
|
||||||
|
avail_space_mib = 0
|
||||||
|
|
||||||
|
return avail_space_mib
|
||||||
|
|
||||||
|
def disk_format_gpt(self, host_uuid, idisk_dict, is_cinder_device):
|
||||||
|
disk_node = idisk_dict.get('device_path')
|
||||||
|
|
||||||
|
utils.disk_wipe(disk_node)
|
||||||
|
utils.execute('parted', disk_node, 'mklabel', 'gpt')
|
||||||
|
|
||||||
|
if is_cinder_device:
|
||||||
|
LOG.debug("Removing .node_cinder_lvm_config_complete_file")
|
||||||
|
try:
|
||||||
|
os.remove(constants.NODE_CINDER_LVM_CONFIG_COMPLETE_FILE)
|
||||||
|
except OSError:
|
||||||
|
LOG.error(".node_cinder_lvm_config_complete_file not present.")
|
||||||
|
pass
|
||||||
|
|
||||||
|
# On SX ensure wipe succeeds before DB is updated.
|
||||||
|
# Flag file is used to mark wiping in progress.
|
||||||
|
try:
|
||||||
|
os.remove(constants.DISK_WIPE_IN_PROGRESS_FLAG)
|
||||||
|
except OSError:
|
||||||
|
# it's ok if file is not present.
|
||||||
|
pass
|
||||||
|
|
||||||
|
# We need to send the updated info about the host disks back to
|
||||||
|
# the conductor.
|
||||||
|
idisk_update = self.idisk_get()
|
||||||
|
ctxt = context.get_admin_context()
|
||||||
|
rpcapi = conductor_rpcapi.ConductorAPI(
|
||||||
|
topic=conductor_rpcapi.MANAGER_TOPIC)
|
||||||
|
rpcapi.idisk_update_by_ihost(ctxt,
|
||||||
|
host_uuid,
|
||||||
|
idisk_update)
|
||||||
|
|
||||||
|
def handle_exception(self, e):
|
||||||
|
traceback = sys.exc_info()[-1]
|
||||||
|
LOG.error("%s @ %s:%s" % (
|
||||||
|
e, traceback.tb_frame.f_code.co_filename, traceback.tb_lineno))
|
||||||
|
|
||||||
|
def is_rotational(self, device_name):
|
||||||
|
"""Find out if a certain disk is rotational or not. Mostly used for
|
||||||
|
determining if disk is HDD or SSD.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Obtain the path to the rotational file for the current device.
|
||||||
|
device = device_name['DEVNAME'].split('/')[-1]
|
||||||
|
rotational_path = "/sys/block/{device}/queue/rotational"\
|
||||||
|
.format(device=device)
|
||||||
|
|
||||||
|
rotational = None
|
||||||
|
# Read file and remove trailing whitespaces.
|
||||||
|
if os.path.isfile(rotational_path):
|
||||||
|
with open(rotational_path, 'r') as rot_file:
|
||||||
|
rotational = rot_file.read()
|
||||||
|
rotational = rotational.rstrip()
|
||||||
|
|
||||||
|
return rotational
|
||||||
|
|
||||||
|
def get_device_id_wwn(self, device):
|
||||||
|
"""Determine the ID and WWN of a disk from the value of the DEVLINKS
|
||||||
|
attribute.
|
||||||
|
|
||||||
|
Note: This data is not currently being used for anything. We are
|
||||||
|
gathering this information so conductor can store for future use.
|
||||||
|
"""
|
||||||
|
# The ID and WWN default to None.
|
||||||
|
device_id = None
|
||||||
|
device_wwn = None
|
||||||
|
|
||||||
|
# If there is no DEVLINKS attribute, return None.
|
||||||
|
if 'DEVLINKS' not in device:
|
||||||
|
return device_id, device_wwn
|
||||||
|
|
||||||
|
# Extract the ID and the WWN.
|
||||||
|
LOG.debug("[DiskEnum] get_device_id_wwn: devlinks= %s" %
|
||||||
|
device['DEVLINKS'])
|
||||||
|
devlinks = device['DEVLINKS'].split()
|
||||||
|
for devlink in devlinks:
|
||||||
|
if "by-id" in devlink:
|
||||||
|
if "wwn" not in devlink:
|
||||||
|
device_id = devlink.split('/')[-1]
|
||||||
|
LOG.debug("[DiskEnum] by-id: %s id: %s" % (devlink,
|
||||||
|
device_id))
|
||||||
|
else:
|
||||||
|
device_wwn = devlink.split('/')[-1]
|
||||||
|
LOG.debug("[DiskEnum] by-wwn: %s wwn: %s" % (devlink,
|
||||||
|
device_wwn))
|
||||||
|
|
||||||
|
return device_id, device_wwn
|
||||||
|
|
||||||
|
def idisk_get(self):
|
||||||
|
"""Enumerate disk topology based on:
|
||||||
|
|
||||||
|
:param self
|
||||||
|
:returns list of disk and attributes
|
||||||
|
"""
|
||||||
|
idisk = []
|
||||||
|
context = pyudev.Context()
|
||||||
|
|
||||||
|
for device in context.list_devices(DEVTYPE='disk'):
|
||||||
|
if not utils.is_system_usable_block_device(device):
|
||||||
|
continue
|
||||||
|
|
||||||
|
if device['MAJOR'] in constants.VALID_MAJOR_LIST:
|
||||||
|
if 'ID_PATH' in device:
|
||||||
|
device_path = "/dev/disk/by-path/" + device['ID_PATH']
|
||||||
|
LOG.debug("[DiskEnum] device_path: %s ", device_path)
|
||||||
|
else:
|
||||||
|
# We should always have a udev supplied /dev/disk/by-path
|
||||||
|
# value as a matter of normal operation. We do not expect
|
||||||
|
# this to occur, thus the error.
|
||||||
|
#
|
||||||
|
# The kickstart files for the host install require the
|
||||||
|
# by-path value also to be present or the host install will
|
||||||
|
# fail. Since the installer and the runtime share the same
|
||||||
|
# kernel/udev we should not see this message on an
|
||||||
|
# installed system.
|
||||||
|
device_path = None
|
||||||
|
LOG.error("Device %s does not have an ID_PATH value "
|
||||||
|
"provided by udev" % device.device_node)
|
||||||
|
|
||||||
|
size_mib = 0
|
||||||
|
available_mib = 0
|
||||||
|
model_num = ''
|
||||||
|
serial_id = ''
|
||||||
|
|
||||||
|
# Can merge all try/except in one block but this allows
|
||||||
|
# at least attributes with no exception to be filled
|
||||||
|
try:
|
||||||
|
size_mib = utils.get_disk_capacity_mib(device.device_node)
|
||||||
|
except Exception as e:
|
||||||
|
self.handle_exception("Could not retrieve disk size - %s "
|
||||||
|
% e)
|
||||||
|
|
||||||
|
try:
|
||||||
|
available_mib = self.get_disk_available_mib(
|
||||||
|
device_node=device.device_node)
|
||||||
|
except Exception as e:
|
||||||
|
self.handle_exception(
|
||||||
|
"Could not retrieve disk %s free space" % e)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# ID_MODEL received from udev is not correct for disks that
|
||||||
|
# are used entirely for LVM. LVM replaced the model ID with
|
||||||
|
# its own identifier that starts with "LVM PV".For this
|
||||||
|
# reason we will attempt to retrieve the correct model ID
|
||||||
|
# by using 2 different commands: hdparm and lsblk and
|
||||||
|
# hdparm. If one of them fails, the other one can attempt
|
||||||
|
# to retrieve the information. Else we use udev.
|
||||||
|
|
||||||
|
# try hdparm command first
|
||||||
|
hdparm_command = 'hdparm -I %s |grep Model' % (
|
||||||
|
device.get('DEVNAME'))
|
||||||
|
hdparm_process = subprocess.Popen(
|
||||||
|
hdparm_command,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
shell=True)
|
||||||
|
hdparm_output = hdparm_process.communicate()[0]
|
||||||
|
if hdparm_process.returncode == 0:
|
||||||
|
second_half = hdparm_output.split(':')[1]
|
||||||
|
model_num = second_half.strip()
|
||||||
|
else:
|
||||||
|
# try lsblk command
|
||||||
|
lsblk_command = 'lsblk -dn --output MODEL %s' % (
|
||||||
|
device.get('DEVNAME'))
|
||||||
|
lsblk_process = subprocess.Popen(
|
||||||
|
lsblk_command,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
shell=True)
|
||||||
|
lsblk_output = lsblk_process.communicate()[0]
|
||||||
|
if lsblk_process.returncode == 0:
|
||||||
|
model_num = lsblk_output.strip()
|
||||||
|
else:
|
||||||
|
# both hdparm and lsblk commands failed, try udev
|
||||||
|
model_num = device.get('ID_MODEL')
|
||||||
|
if not model_num:
|
||||||
|
model_num = constants.DEVICE_MODEL_UNKNOWN
|
||||||
|
except Exception as e:
|
||||||
|
self.handle_exception("Could not retrieve disk model "
|
||||||
|
"for disk %s. Exception: %s" %
|
||||||
|
(device.get('DEVNAME'), e))
|
||||||
|
try:
|
||||||
|
if 'ID_SCSI_SERIAL' in device:
|
||||||
|
serial_id = device['ID_SCSI_SERIAL']
|
||||||
|
else:
|
||||||
|
serial_id = device['ID_SERIAL_SHORT']
|
||||||
|
except Exception as e:
|
||||||
|
self.handle_exception("Could not retrieve disk "
|
||||||
|
"serial ID - %s " % e)
|
||||||
|
|
||||||
|
capabilities = dict()
|
||||||
|
if model_num:
|
||||||
|
capabilities.update({'model_num': model_num})
|
||||||
|
|
||||||
|
if self.get_rootfs_node() == device.device_node:
|
||||||
|
capabilities.update({'stor_function': 'rootfs'})
|
||||||
|
|
||||||
|
rotational = self.is_rotational(device)
|
||||||
|
device_type = device.device_type
|
||||||
|
|
||||||
|
rotation_rate = constants.DEVICE_TYPE_UNDETERMINED
|
||||||
|
if rotational is '1':
|
||||||
|
device_type = constants.DEVICE_TYPE_HDD
|
||||||
|
if 'ID_ATA_ROTATION_RATE_RPM' in device:
|
||||||
|
rotation_rate = device['ID_ATA_ROTATION_RATE_RPM']
|
||||||
|
elif rotational is '0':
|
||||||
|
if constants.DEVICE_NAME_NVME in device.device_node:
|
||||||
|
device_type = constants.DEVICE_TYPE_NVME
|
||||||
|
else:
|
||||||
|
device_type = constants.DEVICE_TYPE_SSD
|
||||||
|
rotation_rate = constants.DEVICE_TYPE_NA
|
||||||
|
|
||||||
|
# TODO(sc) else: what are other possible stor_function value?
|
||||||
|
# or do we just use pair { 'is_rootfs': True } instead?
|
||||||
|
# Obtain device ID and WWN.
|
||||||
|
device_id, device_wwn = self.get_device_id_wwn(device)
|
||||||
|
|
||||||
|
attr = {
|
||||||
|
'device_node': device.device_node,
|
||||||
|
'device_num': device.device_number,
|
||||||
|
'device_type': device_type,
|
||||||
|
'device_path': device_path,
|
||||||
|
'device_id': device_id,
|
||||||
|
'device_wwn': device_wwn,
|
||||||
|
'size_mib': size_mib,
|
||||||
|
'available_mib': available_mib,
|
||||||
|
'serial_id': serial_id,
|
||||||
|
'capabilities': capabilities,
|
||||||
|
'rpm': rotation_rate,
|
||||||
|
}
|
||||||
|
|
||||||
|
idisk.append(attr)
|
||||||
|
|
||||||
|
LOG.debug("idisk= %s" % idisk)
|
||||||
|
|
||||||
|
return idisk
|
23
inventory/inventory/inventory/agent/lldp/config.py
Normal file
23
inventory/inventory/inventory/agent/lldp/config.py
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
#
|
||||||
|
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_utils._i18n import _
|
||||||
|
|
||||||
|
INVENTORY_LLDP_OPTS = [
|
||||||
|
cfg.ListOpt('drivers',
|
||||||
|
default=['lldpd'],
|
||||||
|
help=_("An ordered list of inventory LLDP driver "
|
||||||
|
"entrypoints to be loaded from the "
|
||||||
|
"inventory.agent namespace.")),
|
||||||
|
]
|
||||||
|
|
||||||
|
cfg.CONF.register_opts(INVENTORY_LLDP_OPTS, group="lldp")
|
47
inventory/inventory/inventory/agent/lldp/drivers/base.py
Normal file
47
inventory/inventory/inventory/agent/lldp/drivers/base.py
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
#
|
||||||
|
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
|
||||||
|
import abc
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
|
||||||
|
@six.add_metaclass(abc.ABCMeta)
|
||||||
|
class InventoryLldpDriverBase(object):
|
||||||
|
"""Inventory LLDP Driver Base Class."""
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def lldp_has_neighbour(self, name):
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def lldp_update(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def lldp_agents_list(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def lldp_neighbours_list(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def lldp_agents_clear(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def lldp_neighbours_clear(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def lldp_update_systemname(self, systemname):
|
||||||
|
pass
|
321
inventory/inventory/inventory/agent/lldp/drivers/lldpd/driver.py
Normal file
321
inventory/inventory/inventory/agent/lldp/drivers/lldpd/driver.py
Normal file
@ -0,0 +1,321 @@
|
|||||||
|
#
|
||||||
|
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
|
||||||
|
from oslo_log import log as logging
|
||||||
|
|
||||||
|
import simplejson as json
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
from inventory.agent.lldp.drivers import base
|
||||||
|
from inventory.agent.lldp import plugin
|
||||||
|
from inventory.common import k_lldp
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class InventoryLldpdAgentDriver(base.InventoryLldpDriverBase):
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self.client = ""
|
||||||
|
self.agents = []
|
||||||
|
self.neighbours = []
|
||||||
|
self.current_neighbours = []
|
||||||
|
self.previous_neighbours = []
|
||||||
|
self.current_agents = []
|
||||||
|
self.previous_agents = []
|
||||||
|
self.agent_audit_count = 0
|
||||||
|
self.neighbour_audit_count = 0
|
||||||
|
|
||||||
|
def initialize(self):
|
||||||
|
self.__init__()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _lldpd_get_agent_status():
|
||||||
|
json_obj = json
|
||||||
|
p = subprocess.Popen(["lldpcli", "-f", "json", "show",
|
||||||
|
"configuration"],
|
||||||
|
stdout=subprocess.PIPE)
|
||||||
|
data = json_obj.loads(p.communicate()[0])
|
||||||
|
|
||||||
|
configuration = data['configuration'][0]
|
||||||
|
config = configuration['config'][0]
|
||||||
|
rx_only = config['rx-only'][0]
|
||||||
|
|
||||||
|
if rx_only.get("value") == "no":
|
||||||
|
return "rx=enabled,tx=enabled"
|
||||||
|
else:
|
||||||
|
return "rx=enabled,tx=disabled"
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _lldpd_get_attrs(iface):
|
||||||
|
name_or_uuid = None
|
||||||
|
chassis_id = None
|
||||||
|
system_name = None
|
||||||
|
system_desc = None
|
||||||
|
capability = None
|
||||||
|
management_address = None
|
||||||
|
port_desc = None
|
||||||
|
dot1_lag = None
|
||||||
|
dot1_port_vid = None
|
||||||
|
dot1_vid_digest = None
|
||||||
|
dot1_mgmt_vid = None
|
||||||
|
dot1_vlan_names = None
|
||||||
|
dot1_proto_vids = None
|
||||||
|
dot1_proto_ids = None
|
||||||
|
dot3_mac_status = None
|
||||||
|
dot3_max_frame = None
|
||||||
|
dot3_power_mdi = None
|
||||||
|
ttl = None
|
||||||
|
attrs = {}
|
||||||
|
|
||||||
|
# Note: dot1_vid_digest, dot1_mgmt_vid are not currently supported
|
||||||
|
# by the lldpd daemon
|
||||||
|
|
||||||
|
name_or_uuid = iface.get("name")
|
||||||
|
chassis = iface.get("chassis")[0]
|
||||||
|
port = iface.get("port")[0]
|
||||||
|
|
||||||
|
if not chassis.get('id'):
|
||||||
|
return attrs
|
||||||
|
chassis_id = chassis['id'][0].get("value")
|
||||||
|
|
||||||
|
if not port.get('id'):
|
||||||
|
return attrs
|
||||||
|
port_id = port["id"][0].get("value")
|
||||||
|
|
||||||
|
if not port.get('ttl'):
|
||||||
|
return attrs
|
||||||
|
ttl = port['ttl'][0].get("value")
|
||||||
|
|
||||||
|
if chassis.get("name"):
|
||||||
|
system_name = chassis['name'][0].get("value")
|
||||||
|
|
||||||
|
if chassis.get("descr"):
|
||||||
|
system_desc = chassis['descr'][0].get("value")
|
||||||
|
|
||||||
|
if chassis.get("capability"):
|
||||||
|
capability = ""
|
||||||
|
for cap in chassis["capability"]:
|
||||||
|
if cap.get("enabled"):
|
||||||
|
if capability:
|
||||||
|
capability += ", "
|
||||||
|
capability += cap.get("type").lower()
|
||||||
|
|
||||||
|
if chassis.get("mgmt-ip"):
|
||||||
|
management_address = ""
|
||||||
|
for addr in chassis["mgmt-ip"]:
|
||||||
|
if management_address:
|
||||||
|
management_address += ", "
|
||||||
|
management_address += addr.get("value").lower()
|
||||||
|
|
||||||
|
if port.get("descr"):
|
||||||
|
port_desc = port["descr"][0].get("value")
|
||||||
|
|
||||||
|
if port.get("link-aggregation"):
|
||||||
|
dot1_lag_supported = port["link-aggregation"][0].get("supported")
|
||||||
|
dot1_lag_enabled = port["link-aggregation"][0].get("enabled")
|
||||||
|
dot1_lag = "capable="
|
||||||
|
if dot1_lag_supported:
|
||||||
|
dot1_lag += "y,"
|
||||||
|
else:
|
||||||
|
dot1_lag += "n,"
|
||||||
|
dot1_lag += "enabled="
|
||||||
|
if dot1_lag_enabled:
|
||||||
|
dot1_lag += "y"
|
||||||
|
else:
|
||||||
|
dot1_lag += "n"
|
||||||
|
|
||||||
|
if port.get("auto-negotiation"):
|
||||||
|
port_auto_neg_support = port["auto-negotiation"][0].get(
|
||||||
|
"supported")
|
||||||
|
port_auto_neg_enabled = port["auto-negotiation"][0].get("enabled")
|
||||||
|
dot3_mac_status = "auto-negotiation-capable="
|
||||||
|
if port_auto_neg_support:
|
||||||
|
dot3_mac_status += "y,"
|
||||||
|
else:
|
||||||
|
dot3_mac_status += "n,"
|
||||||
|
dot3_mac_status += "auto-negotiation-enabled="
|
||||||
|
if port_auto_neg_enabled:
|
||||||
|
dot3_mac_status += "y,"
|
||||||
|
else:
|
||||||
|
dot3_mac_status += "n,"
|
||||||
|
advertised = ""
|
||||||
|
if port.get("auto-negotiation")[0].get("advertised"):
|
||||||
|
for adv in port["auto-negotiation"][0].get("advertised"):
|
||||||
|
if advertised:
|
||||||
|
advertised += ", "
|
||||||
|
type = adv.get("type").lower()
|
||||||
|
if adv.get("hd") and not adv.get("fd"):
|
||||||
|
type += "hd"
|
||||||
|
elif adv.get("fd"):
|
||||||
|
type += "fd"
|
||||||
|
advertised += type
|
||||||
|
dot3_mac_status += advertised
|
||||||
|
|
||||||
|
if port.get("mfs"):
|
||||||
|
dot3_max_frame = port["mfs"][0].get("value")
|
||||||
|
|
||||||
|
if port.get("power"):
|
||||||
|
power_mdi_support = port["power"][0].get("supported")
|
||||||
|
power_mdi_enabled = port["power"][0].get("enabled")
|
||||||
|
power_mdi_devicetype = port["power"][0].get("device-type")[0].get(
|
||||||
|
"value")
|
||||||
|
power_mdi_pairs = port["power"][0].get("pairs")[0].get("value")
|
||||||
|
power_mdi_class = port["power"][0].get("class")[0].get("value")
|
||||||
|
dot3_power_mdi = "power-mdi-supported="
|
||||||
|
if power_mdi_support:
|
||||||
|
dot3_power_mdi += "y,"
|
||||||
|
else:
|
||||||
|
dot3_power_mdi += "n,"
|
||||||
|
dot3_power_mdi += "power-mdi-enabled="
|
||||||
|
if power_mdi_enabled:
|
||||||
|
dot3_power_mdi += "y,"
|
||||||
|
else:
|
||||||
|
dot3_power_mdi += "n,"
|
||||||
|
if power_mdi_support and power_mdi_enabled:
|
||||||
|
dot3_power_mdi += "device-type=" + power_mdi_devicetype
|
||||||
|
dot3_power_mdi += ",pairs=" + power_mdi_pairs
|
||||||
|
dot3_power_mdi += ",class=" + power_mdi_class
|
||||||
|
|
||||||
|
vlans = None
|
||||||
|
if iface.get("vlan"):
|
||||||
|
vlans = iface.get("vlan")
|
||||||
|
|
||||||
|
if vlans:
|
||||||
|
dot1_vlan_names = ""
|
||||||
|
for vlan in vlans:
|
||||||
|
if vlan.get("pvid"):
|
||||||
|
dot1_port_vid = vlan.get("vlan-id")
|
||||||
|
continue
|
||||||
|
if dot1_vlan_names:
|
||||||
|
dot1_vlan_names += ", "
|
||||||
|
dot1_vlan_names += vlan.get("value")
|
||||||
|
|
||||||
|
ppvids = None
|
||||||
|
if iface.get("ppvids"):
|
||||||
|
ppvids = iface.get("ppvid")
|
||||||
|
|
||||||
|
if ppvids:
|
||||||
|
dot1_proto_vids = ""
|
||||||
|
for ppvid in ppvids:
|
||||||
|
if dot1_proto_vids:
|
||||||
|
dot1_proto_vids += ", "
|
||||||
|
dot1_proto_vids += ppvid.get("value")
|
||||||
|
|
||||||
|
pids = None
|
||||||
|
if iface.get("pi"):
|
||||||
|
pids = iface.get('pi')
|
||||||
|
dot1_proto_ids = ""
|
||||||
|
for id in pids:
|
||||||
|
if dot1_proto_ids:
|
||||||
|
dot1_proto_ids += ", "
|
||||||
|
dot1_proto_ids += id.get("value")
|
||||||
|
|
||||||
|
msap = chassis_id + "," + port_id
|
||||||
|
|
||||||
|
attrs = {"name_or_uuid": name_or_uuid,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_CHASSIS_ID: chassis_id,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_PORT_ID: port_id,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_TTL: ttl,
|
||||||
|
"msap": msap,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_SYSTEM_NAME: system_name,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_SYSTEM_DESC: system_desc,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_SYSTEM_CAP: capability,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_MGMT_ADDR: management_address,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_PORT_DESC: port_desc,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT1_LAG: dot1_lag,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT1_PORT_VID: dot1_port_vid,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT1_VID_DIGEST: dot1_vid_digest,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT1_MGMT_VID: dot1_mgmt_vid,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT1_VLAN_NAMES: dot1_vlan_names,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT1_PROTO_VIDS: dot1_proto_vids,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT1_PROTO_IDS: dot1_proto_ids,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT3_MAC_STATUS: dot3_mac_status,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT3_MAX_FRAME: dot3_max_frame,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT3_POWER_MDI: dot3_power_mdi}
|
||||||
|
|
||||||
|
return attrs
|
||||||
|
|
||||||
|
def lldp_has_neighbour(self, name):
|
||||||
|
p = subprocess.check_output(["lldpcli", "-f", "keyvalue", "show",
|
||||||
|
"neighbors", "summary", "ports", name])
|
||||||
|
return len(p) > 0
|
||||||
|
|
||||||
|
def lldp_update(self):
|
||||||
|
subprocess.call(['lldpcli', 'update'])
|
||||||
|
|
||||||
|
def lldp_agents_list(self):
|
||||||
|
json_obj = json
|
||||||
|
lldp_agents = []
|
||||||
|
|
||||||
|
p = subprocess.Popen(["lldpcli", "-f", "json", "show", "interface",
|
||||||
|
"detail"], stdout=subprocess.PIPE)
|
||||||
|
data = json_obj.loads(p.communicate()[0])
|
||||||
|
|
||||||
|
lldp = data['lldp'][0]
|
||||||
|
|
||||||
|
if not lldp.get('interface'):
|
||||||
|
return lldp_agents
|
||||||
|
|
||||||
|
for iface in lldp['interface']:
|
||||||
|
agent_attrs = self._lldpd_get_attrs(iface)
|
||||||
|
status = self._lldpd_get_agent_status()
|
||||||
|
agent_attrs.update({"status": status})
|
||||||
|
agent = plugin.Agent(**agent_attrs)
|
||||||
|
lldp_agents.append(agent)
|
||||||
|
|
||||||
|
return lldp_agents
|
||||||
|
|
||||||
|
def lldp_agents_clear(self):
|
||||||
|
self.current_agents = []
|
||||||
|
self.previous_agents = []
|
||||||
|
|
||||||
|
def lldp_neighbours_list(self):
|
||||||
|
json_obj = json
|
||||||
|
lldp_neighbours = []
|
||||||
|
p = subprocess.Popen(["lldpcli", "-f", "json", "show", "neighbor",
|
||||||
|
"detail"], stdout=subprocess.PIPE)
|
||||||
|
data = json_obj.loads(p.communicate()[0])
|
||||||
|
|
||||||
|
lldp = data['lldp'][0]
|
||||||
|
|
||||||
|
if not lldp.get('interface'):
|
||||||
|
return lldp_neighbours
|
||||||
|
|
||||||
|
for iface in lldp['interface']:
|
||||||
|
neighbour_attrs = self._lldpd_get_attrs(iface)
|
||||||
|
neighbour = plugin.Neighbour(**neighbour_attrs)
|
||||||
|
lldp_neighbours.append(neighbour)
|
||||||
|
|
||||||
|
return lldp_neighbours
|
||||||
|
|
||||||
|
def lldp_neighbours_clear(self):
|
||||||
|
self.current_neighbours = []
|
||||||
|
self.previous_neighbours = []
|
||||||
|
|
||||||
|
def lldp_update_systemname(self, systemname):
|
||||||
|
p = subprocess.Popen(["lldpcli", "-f", "json", "show", "chassis"],
|
||||||
|
stdout=subprocess.PIPE)
|
||||||
|
data = json.loads(p.communicate()[0])
|
||||||
|
|
||||||
|
local_chassis = data['local-chassis'][0]
|
||||||
|
chassis = local_chassis['chassis'][0]
|
||||||
|
name = chassis.get('name', None)
|
||||||
|
if name is None or not name[0].get("value"):
|
||||||
|
return
|
||||||
|
name = name[0]
|
||||||
|
|
||||||
|
hostname = name.get("value").partition(':')[0]
|
||||||
|
|
||||||
|
newname = hostname + ":" + systemname
|
||||||
|
|
||||||
|
p = subprocess.Popen(["lldpcli", "configure", "system", "hostname",
|
||||||
|
newname], stdout=subprocess.PIPE)
|
167
inventory/inventory/inventory/agent/lldp/drivers/ovs/driver.py
Normal file
167
inventory/inventory/inventory/agent/lldp/drivers/ovs/driver.py
Normal file
@ -0,0 +1,167 @@
|
|||||||
|
#
|
||||||
|
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
|
||||||
|
import simplejson as json
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
from oslo_log import log as logging
|
||||||
|
|
||||||
|
from inventory.agent.lldp.drivers.lldpd import driver as lldpd_driver
|
||||||
|
from inventory.common import k_lldp
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class InventoryOVSAgentDriver(lldpd_driver.InventoryLldpdAgentDriver):
|
||||||
|
|
||||||
|
def run_cmd(self, cmd):
|
||||||
|
p = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.PIPE)
|
||||||
|
p.wait()
|
||||||
|
output, error = p.communicate()
|
||||||
|
if p.returncode != 0:
|
||||||
|
LOG.error("Failed to run command %s: error: %s", cmd, error)
|
||||||
|
return None
|
||||||
|
return output
|
||||||
|
|
||||||
|
def lldp_ovs_get_interface_port_map(self):
|
||||||
|
interface_port_map = {}
|
||||||
|
|
||||||
|
cmd = "ovs-vsctl --timeout 10 --format json "\
|
||||||
|
"--columns name,_uuid,interfaces list Port"
|
||||||
|
|
||||||
|
output = self.run_cmd(cmd)
|
||||||
|
if not output:
|
||||||
|
return
|
||||||
|
|
||||||
|
ports = json.loads(output)
|
||||||
|
ports = ports['data']
|
||||||
|
|
||||||
|
for port in ports:
|
||||||
|
port_uuid = port[1][1]
|
||||||
|
interfaces = port[2][1]
|
||||||
|
|
||||||
|
if isinstance(interfaces, list):
|
||||||
|
for interface in interfaces:
|
||||||
|
interface_uuid = interface[1]
|
||||||
|
interface_port_map[interface_uuid] = port_uuid
|
||||||
|
else:
|
||||||
|
interface_uuid = interfaces
|
||||||
|
interface_port_map[interface_uuid] = port_uuid
|
||||||
|
|
||||||
|
return interface_port_map
|
||||||
|
|
||||||
|
def lldp_ovs_get_port_bridge_map(self):
|
||||||
|
port_bridge_map = {}
|
||||||
|
|
||||||
|
cmd = "ovs-vsctl --timeout 10 --format json "\
|
||||||
|
"--columns name,ports list Bridge"
|
||||||
|
output = self.run_cmd(cmd)
|
||||||
|
if not output:
|
||||||
|
return
|
||||||
|
|
||||||
|
bridges = json.loads(output)
|
||||||
|
bridges = bridges['data']
|
||||||
|
|
||||||
|
for bridge in bridges:
|
||||||
|
bridge_name = bridge[0]
|
||||||
|
port_set = bridge[1][1]
|
||||||
|
for port in port_set:
|
||||||
|
value = port[1]
|
||||||
|
port_bridge_map[value] = bridge_name
|
||||||
|
|
||||||
|
return port_bridge_map
|
||||||
|
|
||||||
|
def lldp_ovs_lldp_flow_exists(self, brname, in_port):
|
||||||
|
|
||||||
|
cmd = "ovs-ofctl dump-flows {} in_port={},dl_dst={},dl_type={}".format(
|
||||||
|
brname, in_port, k_lldp.LLDP_MULTICAST_ADDRESS,
|
||||||
|
k_lldp.LLDP_ETHER_TYPE)
|
||||||
|
output = self.run_cmd(cmd)
|
||||||
|
if not output:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return (output.count("\n") > 1)
|
||||||
|
|
||||||
|
def lldp_ovs_add_flows(self, brname, in_port, out_port):
|
||||||
|
|
||||||
|
cmd = ("ovs-ofctl add-flow {} in_port={},dl_dst={},dl_type={},"
|
||||||
|
"actions=output:{}".format(
|
||||||
|
brname, in_port, k_lldp.LLDP_MULTICAST_ADDRESS,
|
||||||
|
k_lldp.LLDP_ETHER_TYPE, out_port))
|
||||||
|
output = self.run_cmd(cmd)
|
||||||
|
if not output:
|
||||||
|
return
|
||||||
|
|
||||||
|
cmd = ("ovs-ofctl add-flow {} in_port={},dl_dst={},dl_type={},"
|
||||||
|
"actions=output:{}".format(
|
||||||
|
brname, out_port, k_lldp.LLDP_MULTICAST_ADDRESS,
|
||||||
|
k_lldp.LLDP_ETHER_TYPE, in_port))
|
||||||
|
output = self.run_cmd(cmd)
|
||||||
|
if not output:
|
||||||
|
return
|
||||||
|
|
||||||
|
def lldp_ovs_update_flows(self):
|
||||||
|
|
||||||
|
port_bridge_map = self.lldp_ovs_get_port_bridge_map()
|
||||||
|
if not port_bridge_map:
|
||||||
|
return
|
||||||
|
|
||||||
|
interface_port_map = self.lldp_ovs_get_interface_port_map()
|
||||||
|
if not interface_port_map:
|
||||||
|
return
|
||||||
|
|
||||||
|
cmd = "ovs-vsctl --timeout 10 --format json "\
|
||||||
|
"--columns name,_uuid,type,other_config list Interface"
|
||||||
|
|
||||||
|
output = self.run_cmd(cmd)
|
||||||
|
if not output:
|
||||||
|
return
|
||||||
|
|
||||||
|
data = json.loads(output)
|
||||||
|
data = data['data']
|
||||||
|
|
||||||
|
for interface in data:
|
||||||
|
name = interface[0]
|
||||||
|
uuid = interface[1][1]
|
||||||
|
type = interface[2]
|
||||||
|
other_config = interface[3]
|
||||||
|
|
||||||
|
if type != 'internal':
|
||||||
|
continue
|
||||||
|
|
||||||
|
config_map = other_config[1]
|
||||||
|
for config in config_map:
|
||||||
|
key = config[0]
|
||||||
|
value = config[1]
|
||||||
|
if key != 'lldp_phy_peer':
|
||||||
|
continue
|
||||||
|
|
||||||
|
phy_peer = value
|
||||||
|
brname = port_bridge_map[interface_port_map[uuid]]
|
||||||
|
if not self.lldp_ovs_lldp_flow_exists(brname, name):
|
||||||
|
LOG.info("Adding missing LLDP flow from %s to %s",
|
||||||
|
name, phy_peer)
|
||||||
|
self.lldp_ovs_add_flows(brname, name, phy_peer)
|
||||||
|
|
||||||
|
if not self.lldp_ovs_lldp_flow_exists(brname, value):
|
||||||
|
LOG.info("Adding missing LLDP flow from %s to %s",
|
||||||
|
phy_peer, name)
|
||||||
|
self.lldp_ovs_add_flows(brname, phy_peer, name)
|
||||||
|
|
||||||
|
def lldp_agents_list(self):
|
||||||
|
self.lldp_ovs_update_flows()
|
||||||
|
return lldpd_driver.InventoryLldpdAgentDriver.lldp_agents_list(self)
|
||||||
|
|
||||||
|
def lldp_neighbours_list(self):
|
||||||
|
self.lldp_ovs_update_flows()
|
||||||
|
return lldpd_driver.InventoryLldpdAgentDriver.lldp_neighbours_list(
|
||||||
|
self)
|
176
inventory/inventory/inventory/agent/lldp/manager.py
Normal file
176
inventory/inventory/inventory/agent/lldp/manager.py
Normal file
@ -0,0 +1,176 @@
|
|||||||
|
#
|
||||||
|
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
|
||||||
|
from inventory.common import exception
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log
|
||||||
|
from stevedore.named import NamedExtensionManager
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
cfg.CONF.import_opt('drivers',
|
||||||
|
'inventory.agent.lldp.config',
|
||||||
|
group='lldp')
|
||||||
|
|
||||||
|
|
||||||
|
class InventoryLldpDriverManager(NamedExtensionManager):
|
||||||
|
"""Implementation of Inventory LLDP drivers."""
|
||||||
|
|
||||||
|
def __init__(self, namespace='inventory.agent.lldp.drivers'):
|
||||||
|
|
||||||
|
# Registered inventory lldp agent drivers, keyed by name.
|
||||||
|
self.drivers = {}
|
||||||
|
|
||||||
|
# Ordered list of inventory lldp agent drivers, defining
|
||||||
|
# the order in which the drivers are called.
|
||||||
|
self.ordered_drivers = []
|
||||||
|
|
||||||
|
names = cfg.CONF.lldp.drivers
|
||||||
|
LOG.info("Configured inventory LLDP agent drivers: %s", names)
|
||||||
|
|
||||||
|
super(InventoryLldpDriverManager, self).__init__(
|
||||||
|
namespace,
|
||||||
|
names,
|
||||||
|
invoke_on_load=True,
|
||||||
|
name_order=True)
|
||||||
|
|
||||||
|
LOG.info("Loaded inventory LLDP agent drivers: %s", self.names())
|
||||||
|
self._register_drivers()
|
||||||
|
|
||||||
|
def _register_drivers(self):
|
||||||
|
"""Register all inventory LLDP agent drivers.
|
||||||
|
|
||||||
|
This method should only be called once in the
|
||||||
|
InventoryLldpDriverManager constructor.
|
||||||
|
"""
|
||||||
|
for ext in self:
|
||||||
|
self.drivers[ext.name] = ext
|
||||||
|
self.ordered_drivers.append(ext)
|
||||||
|
LOG.info("Registered inventory LLDP agent drivers: %s",
|
||||||
|
[driver.name for driver in self.ordered_drivers])
|
||||||
|
|
||||||
|
def _call_drivers_and_return_array(self, method_name, attr=None,
|
||||||
|
raise_orig_exc=False):
|
||||||
|
"""Helper method for calling a method across all drivers.
|
||||||
|
|
||||||
|
:param method_name: name of the method to call
|
||||||
|
:param attr: an optional attribute to provide to the drivers
|
||||||
|
:param raise_orig_exc: whether or not to raise the original
|
||||||
|
driver exception, or use a general one
|
||||||
|
"""
|
||||||
|
ret = []
|
||||||
|
for driver in self.ordered_drivers:
|
||||||
|
try:
|
||||||
|
method = getattr(driver.obj, method_name)
|
||||||
|
if attr:
|
||||||
|
ret = ret + method(attr)
|
||||||
|
else:
|
||||||
|
ret = ret + method()
|
||||||
|
except Exception as e:
|
||||||
|
LOG.exception(e)
|
||||||
|
LOG.error(
|
||||||
|
"Inventory LLDP agent driver '%(name)s' "
|
||||||
|
"failed in %(method)s",
|
||||||
|
{'name': driver.name, 'method': method_name}
|
||||||
|
)
|
||||||
|
if raise_orig_exc:
|
||||||
|
raise
|
||||||
|
else:
|
||||||
|
raise exception.LLDPDriverError(
|
||||||
|
method=method_name
|
||||||
|
)
|
||||||
|
return list(set(ret))
|
||||||
|
|
||||||
|
def _call_drivers(self, method_name, attr=None, raise_orig_exc=False):
|
||||||
|
"""Helper method for calling a method across all drivers.
|
||||||
|
|
||||||
|
:param method_name: name of the method to call
|
||||||
|
:param attr: an optional attribute to provide to the drivers
|
||||||
|
:param raise_orig_exc: whether or not to raise the original
|
||||||
|
driver exception, or use a general one
|
||||||
|
"""
|
||||||
|
for driver in self.ordered_drivers:
|
||||||
|
try:
|
||||||
|
method = getattr(driver.obj, method_name)
|
||||||
|
if attr:
|
||||||
|
method(attr)
|
||||||
|
else:
|
||||||
|
method()
|
||||||
|
except Exception as e:
|
||||||
|
LOG.exception(e)
|
||||||
|
LOG.error(
|
||||||
|
"Inventory LLDP agent driver '%(name)s' "
|
||||||
|
"failed in %(method)s",
|
||||||
|
{'name': driver.name, 'method': method_name}
|
||||||
|
)
|
||||||
|
if raise_orig_exc:
|
||||||
|
raise
|
||||||
|
else:
|
||||||
|
raise exception.LLDPDriverError(
|
||||||
|
method=method_name
|
||||||
|
)
|
||||||
|
|
||||||
|
def lldp_has_neighbour(self, name):
|
||||||
|
try:
|
||||||
|
return self._call_drivers("lldp_has_neighbour",
|
||||||
|
attr=name,
|
||||||
|
raise_orig_exc=True)
|
||||||
|
except Exception as e:
|
||||||
|
LOG.exception(e)
|
||||||
|
return []
|
||||||
|
|
||||||
|
def lldp_update(self):
|
||||||
|
try:
|
||||||
|
return self._call_drivers("lldp_update",
|
||||||
|
raise_orig_exc=True)
|
||||||
|
except Exception as e:
|
||||||
|
LOG.exception(e)
|
||||||
|
return []
|
||||||
|
|
||||||
|
def lldp_agents_list(self):
|
||||||
|
try:
|
||||||
|
return self._call_drivers_and_return_array("lldp_agents_list",
|
||||||
|
raise_orig_exc=True)
|
||||||
|
except Exception as e:
|
||||||
|
LOG.exception(e)
|
||||||
|
return []
|
||||||
|
|
||||||
|
def lldp_neighbours_list(self):
|
||||||
|
try:
|
||||||
|
return self._call_drivers_and_return_array("lldp_neighbours_list",
|
||||||
|
raise_orig_exc=True)
|
||||||
|
except Exception as e:
|
||||||
|
LOG.exception(e)
|
||||||
|
return []
|
||||||
|
|
||||||
|
def lldp_agents_clear(self):
|
||||||
|
try:
|
||||||
|
return self._call_drivers("lldp_agents_clear",
|
||||||
|
raise_orig_exc=True)
|
||||||
|
except Exception as e:
|
||||||
|
LOG.exception(e)
|
||||||
|
return
|
||||||
|
|
||||||
|
def lldp_neighbours_clear(self):
|
||||||
|
try:
|
||||||
|
return self._call_drivers("lldp_neighbours_clear",
|
||||||
|
raise_orig_exc=True)
|
||||||
|
except Exception as e:
|
||||||
|
LOG.exception(e)
|
||||||
|
return
|
||||||
|
|
||||||
|
def lldp_update_systemname(self, systemname):
|
||||||
|
try:
|
||||||
|
return self._call_drivers("lldp_update_systemname",
|
||||||
|
attr=systemname,
|
||||||
|
raise_orig_exc=True)
|
||||||
|
except Exception as e:
|
||||||
|
LOG.exception(e)
|
||||||
|
return
|
246
inventory/inventory/inventory/agent/lldp/plugin.py
Normal file
246
inventory/inventory/inventory/agent/lldp/plugin.py
Normal file
@ -0,0 +1,246 @@
|
|||||||
|
#
|
||||||
|
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
|
||||||
|
from oslo_log import log
|
||||||
|
from oslo_utils import excutils
|
||||||
|
|
||||||
|
from inventory.agent.lldp import manager
|
||||||
|
from inventory.common import exception
|
||||||
|
from inventory.common import k_lldp
|
||||||
|
from inventory.common.utils import compare as cmp
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class Key(object):
|
||||||
|
def __init__(self, chassisid, portid, portname):
|
||||||
|
self.chassisid = chassisid
|
||||||
|
self.portid = portid
|
||||||
|
self.portname = portname
|
||||||
|
|
||||||
|
def __hash__(self):
|
||||||
|
return hash((self.chassisid, self.portid, self.portname))
|
||||||
|
|
||||||
|
def __cmp__(self, rhs):
|
||||||
|
return (cmp(self.chassisid, rhs.chassisid) or
|
||||||
|
cmp(self.portid, rhs.portid) or
|
||||||
|
cmp(self.portname, rhs.portname))
|
||||||
|
|
||||||
|
def __eq__(self, rhs):
|
||||||
|
return (self.chassisid == rhs.chassisid and
|
||||||
|
self.portid == rhs.portid and
|
||||||
|
self.portname == rhs.portname)
|
||||||
|
|
||||||
|
def __ne__(self, rhs):
|
||||||
|
return (self.chassisid != rhs.chassisid or
|
||||||
|
self.portid != rhs.portid or
|
||||||
|
self.portname != rhs.portname)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return "%s [%s] [%s]" % (self.portname, self.chassisid, self.portid)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "<Key '%s'>" % str(self)
|
||||||
|
|
||||||
|
|
||||||
|
class Agent(object):
|
||||||
|
'''Class to encapsulate LLDP agent data for System Inventory'''
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
'''Construct an Agent object with the given values.'''
|
||||||
|
self.key = Key(kwargs.get(k_lldp.LLDP_TLV_TYPE_CHASSIS_ID),
|
||||||
|
kwargs.get(k_lldp.LLDP_TLV_TYPE_PORT_ID),
|
||||||
|
kwargs.get("name_or_uuid"))
|
||||||
|
self.status = kwargs.get('status')
|
||||||
|
self.ttl = kwargs.get(k_lldp.LLDP_TLV_TYPE_TTL)
|
||||||
|
self.system_name = kwargs.get(k_lldp.LLDP_TLV_TYPE_SYSTEM_NAME)
|
||||||
|
self.system_desc = kwargs.get(k_lldp.LLDP_TLV_TYPE_SYSTEM_DESC)
|
||||||
|
self.port_desc = kwargs.get(k_lldp.LLDP_TLV_TYPE_PORT_DESC)
|
||||||
|
self.capabilities = kwargs.get(k_lldp.LLDP_TLV_TYPE_SYSTEM_CAP)
|
||||||
|
self.mgmt_addr = kwargs.get(k_lldp.LLDP_TLV_TYPE_MGMT_ADDR)
|
||||||
|
self.dot1_lag = kwargs.get(k_lldp.LLDP_TLV_TYPE_DOT1_LAG)
|
||||||
|
self.dot1_vlan_names = kwargs.get(
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT1_VLAN_NAMES)
|
||||||
|
self.dot3_max_frame = kwargs.get(
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT3_MAX_FRAME)
|
||||||
|
self.state = None
|
||||||
|
|
||||||
|
def __hash__(self):
|
||||||
|
return self.key.__hash__()
|
||||||
|
|
||||||
|
def __eq__(self, rhs):
|
||||||
|
return (self.key == rhs.key)
|
||||||
|
|
||||||
|
def __ne__(self, rhs):
|
||||||
|
return (self.key != rhs.key or
|
||||||
|
self.status != rhs.status or
|
||||||
|
self.ttl != rhs.ttl or
|
||||||
|
self.system_name != rhs.system_name or
|
||||||
|
self.system_desc != rhs.system_desc or
|
||||||
|
self.port_desc != rhs.port_desc or
|
||||||
|
self.capabilities != rhs.capabilities or
|
||||||
|
self.mgmt_addr != rhs.mgmt_addr or
|
||||||
|
self.dot1_lag != rhs.dot1_lag or
|
||||||
|
self.dot1_vlan_names != rhs.dot1_vlan_names or
|
||||||
|
self.dot3_max_frame != rhs.dot3_max_frame or
|
||||||
|
self.state != rhs.state)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return "%s: [%s] [%s] [%s], [%s], [%s], [%s], [%s], [%s]" % (
|
||||||
|
self.key, self.status, self.system_name, self.system_desc,
|
||||||
|
self.port_desc, self.capabilities,
|
||||||
|
self.mgmt_addr, self.dot1_lag,
|
||||||
|
self.dot3_max_frame)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "<Agent '%s'>" % str(self)
|
||||||
|
|
||||||
|
|
||||||
|
class Neighbour(object):
|
||||||
|
'''Class to encapsulate LLDP neighbour data for System Inventory'''
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
'''Construct an Neighbour object with the given values.'''
|
||||||
|
self.key = Key(kwargs.get(k_lldp.LLDP_TLV_TYPE_CHASSIS_ID),
|
||||||
|
kwargs.get(k_lldp.LLDP_TLV_TYPE_PORT_ID),
|
||||||
|
kwargs.get("name_or_uuid"))
|
||||||
|
self.msap = kwargs.get('msap')
|
||||||
|
self.ttl = kwargs.get(k_lldp.LLDP_TLV_TYPE_TTL)
|
||||||
|
self.system_name = kwargs.get(k_lldp.LLDP_TLV_TYPE_SYSTEM_NAME)
|
||||||
|
self.system_desc = kwargs.get(k_lldp.LLDP_TLV_TYPE_SYSTEM_DESC)
|
||||||
|
self.port_desc = kwargs.get(k_lldp.LLDP_TLV_TYPE_PORT_DESC)
|
||||||
|
self.capabilities = kwargs.get(k_lldp.LLDP_TLV_TYPE_SYSTEM_CAP)
|
||||||
|
self.mgmt_addr = kwargs.get(k_lldp.LLDP_TLV_TYPE_MGMT_ADDR)
|
||||||
|
self.dot1_port_vid = kwargs.get(k_lldp.LLDP_TLV_TYPE_DOT1_PORT_VID)
|
||||||
|
self.dot1_vid_digest = kwargs.get(
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT1_VID_DIGEST)
|
||||||
|
self.dot1_mgmt_vid = kwargs.get(k_lldp.LLDP_TLV_TYPE_DOT1_MGMT_VID)
|
||||||
|
self.dot1_vid_digest = kwargs.get(
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT1_VID_DIGEST)
|
||||||
|
self.dot1_mgmt_vid = kwargs.get(k_lldp.LLDP_TLV_TYPE_DOT1_MGMT_VID)
|
||||||
|
self.dot1_lag = kwargs.get(k_lldp.LLDP_TLV_TYPE_DOT1_LAG)
|
||||||
|
self.dot1_vlan_names = kwargs.get(
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT1_VLAN_NAMES)
|
||||||
|
self.dot1_proto_vids = kwargs.get(
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT1_PROTO_VIDS)
|
||||||
|
self.dot1_proto_ids = kwargs.get(
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT1_PROTO_IDS)
|
||||||
|
self.dot3_mac_status = kwargs.get(
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT3_MAC_STATUS)
|
||||||
|
self.dot3_max_frame = kwargs.get(
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT3_MAX_FRAME)
|
||||||
|
self.dot3_power_mdi = kwargs.get(
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT3_POWER_MDI)
|
||||||
|
|
||||||
|
self.state = None
|
||||||
|
|
||||||
|
def __hash__(self):
|
||||||
|
return self.key.__hash__()
|
||||||
|
|
||||||
|
def __eq__(self, rhs):
|
||||||
|
return (self.key == rhs.key)
|
||||||
|
|
||||||
|
def __ne__(self, rhs):
|
||||||
|
return (self.key != rhs.key or
|
||||||
|
self.msap != rhs.msap or
|
||||||
|
self.system_name != rhs.system_name or
|
||||||
|
self.system_desc != rhs.system_desc or
|
||||||
|
self.port_desc != rhs.port_desc or
|
||||||
|
self.capabilities != rhs.capabilities or
|
||||||
|
self.mgmt_addr != rhs.mgmt_addr or
|
||||||
|
self.dot1_port_vid != rhs.dot1_port_vid or
|
||||||
|
self.dot1_vid_digest != rhs.dot1_vid_digest or
|
||||||
|
self.dot1_mgmt_vid != rhs.dot1_mgmt_vid or
|
||||||
|
self.dot1_vid_digest != rhs.dot1_vid_digest or
|
||||||
|
self.dot1_mgmt_vid != rhs.dot1_mgmt_vid or
|
||||||
|
self.dot1_lag != rhs.dot1_lag or
|
||||||
|
self.dot1_vlan_names != rhs.dot1_vlan_names or
|
||||||
|
self.dot1_proto_vids != rhs.dot1_proto_vids or
|
||||||
|
self.dot1_proto_ids != rhs.dot1_proto_ids or
|
||||||
|
self.dot3_mac_status != rhs.dot3_mac_status or
|
||||||
|
self.dot3_max_frame != rhs.dot3_max_frame or
|
||||||
|
self.dot3_power_mdi != rhs.dot3_power_mdi)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return "%s [%s] [%s] [%s], [%s]" % (
|
||||||
|
self.key, self.system_name, self.system_desc,
|
||||||
|
self.port_desc, self.capabilities)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "<Neighbour '%s'>" % str(self)
|
||||||
|
|
||||||
|
|
||||||
|
class InventoryLldpPlugin(object):
|
||||||
|
|
||||||
|
"""Implementation of the Plugin."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.manager = manager.InventoryLldpDriverManager()
|
||||||
|
|
||||||
|
def lldp_has_neighbour(self, name):
|
||||||
|
try:
|
||||||
|
return self.manager.lldp_has_neighbour(name)
|
||||||
|
except exception.LLDPDriverError as e:
|
||||||
|
LOG.exception(e)
|
||||||
|
with excutils.save_and_reraise_exception():
|
||||||
|
LOG.error("LLDP has neighbour failed")
|
||||||
|
|
||||||
|
def lldp_update(self):
|
||||||
|
try:
|
||||||
|
self.manager.lldp_update()
|
||||||
|
except exception.LLDPDriverError as e:
|
||||||
|
LOG.exception(e)
|
||||||
|
with excutils.save_and_reraise_exception():
|
||||||
|
LOG.error("LLDP update failed")
|
||||||
|
|
||||||
|
def lldp_agents_list(self):
|
||||||
|
try:
|
||||||
|
agents = self.manager.lldp_agents_list()
|
||||||
|
except exception.LLDPDriverError as e:
|
||||||
|
LOG.exception(e)
|
||||||
|
with excutils.save_and_reraise_exception():
|
||||||
|
LOG.error("LLDP agents list failed")
|
||||||
|
|
||||||
|
return agents
|
||||||
|
|
||||||
|
def lldp_agents_clear(self):
|
||||||
|
try:
|
||||||
|
self.manager.lldp_agents_clear()
|
||||||
|
except exception.LLDPDriverError as e:
|
||||||
|
LOG.exception(e)
|
||||||
|
with excutils.save_and_reraise_exception():
|
||||||
|
LOG.error("LLDP agents clear failed")
|
||||||
|
|
||||||
|
def lldp_neighbours_list(self):
|
||||||
|
try:
|
||||||
|
neighbours = self.manager.lldp_neighbours_list()
|
||||||
|
except exception.LLDPDriverError as e:
|
||||||
|
LOG.exception(e)
|
||||||
|
with excutils.save_and_reraise_exception():
|
||||||
|
LOG.error("LLDP neighbours list failed")
|
||||||
|
|
||||||
|
return neighbours
|
||||||
|
|
||||||
|
def lldp_neighbours_clear(self):
|
||||||
|
try:
|
||||||
|
self.manager.lldp_neighbours_clear()
|
||||||
|
except exception.LLDPDriverError as e:
|
||||||
|
LOG.exception(e)
|
||||||
|
with excutils.save_and_reraise_exception():
|
||||||
|
LOG.error("LLDP neighbours clear failed")
|
||||||
|
|
||||||
|
def lldp_update_systemname(self, systemname):
|
||||||
|
try:
|
||||||
|
self.manager.lldp_update_systemname(systemname)
|
||||||
|
except exception.LLDPDriverError as e:
|
||||||
|
LOG.exception(e)
|
||||||
|
with excutils.save_and_reraise_exception():
|
||||||
|
LOG.error("LLDP update systemname failed")
|
973
inventory/inventory/inventory/agent/manager.py
Normal file
973
inventory/inventory/inventory/agent/manager.py
Normal file
@ -0,0 +1,973 @@
|
|||||||
|
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
""" Perform activity related to local inventory.
|
||||||
|
|
||||||
|
A single instance of :py:class:`inventory.agent.manager.AgentManager` is
|
||||||
|
created within the *inventory-agent* process, and is responsible for
|
||||||
|
performing all actions for this host managed by inventory .
|
||||||
|
|
||||||
|
On start, collect and post inventory.
|
||||||
|
|
||||||
|
Commands (from conductors) are received via RPC calls.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
import errno
|
||||||
|
import fcntl
|
||||||
|
import os
|
||||||
|
import oslo_messaging as messaging
|
||||||
|
import socket
|
||||||
|
import subprocess
|
||||||
|
import time
|
||||||
|
|
||||||
|
from futurist import periodics
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
|
# from inventory.agent import partition
|
||||||
|
from inventory.agent import base_manager
|
||||||
|
from inventory.agent.lldp import plugin as lldp_plugin
|
||||||
|
from inventory.agent import node
|
||||||
|
from inventory.agent import pci
|
||||||
|
from inventory.common import constants
|
||||||
|
from inventory.common import context as mycontext
|
||||||
|
from inventory.common import exception
|
||||||
|
from inventory.common.i18n import _
|
||||||
|
from inventory.common import k_host
|
||||||
|
from inventory.common import k_lldp
|
||||||
|
from inventory.common import utils
|
||||||
|
from inventory.conductor import rpcapi as conductor_rpcapi
|
||||||
|
import tsconfig.tsconfig as tsc
|
||||||
|
|
||||||
|
MANAGER_TOPIC = 'inventory.agent_manager'
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
agent_opts = [
|
||||||
|
cfg.StrOpt('api_url',
|
||||||
|
default=None,
|
||||||
|
help=('Url of Inventory API service. If not set Inventory can '
|
||||||
|
'get current value from Keystone service catalog.')),
|
||||||
|
cfg.IntOpt('audit_interval',
|
||||||
|
default=60,
|
||||||
|
help='Maximum time since the last check-in of a agent'),
|
||||||
|
]
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
CONF.register_opts(agent_opts, 'agent')
|
||||||
|
|
||||||
|
MAXSLEEP = 300 # 5 minutes
|
||||||
|
|
||||||
|
INVENTORY_READY_FLAG = os.path.join(tsc.VOLATILE_PATH, ".inventory_ready")
|
||||||
|
|
||||||
|
|
||||||
|
FIRST_BOOT_FLAG = os.path.join(
|
||||||
|
tsc.PLATFORM_CONF_PATH, ".first_boot")
|
||||||
|
|
||||||
|
|
||||||
|
class AgentManager(base_manager.BaseAgentManager):
|
||||||
|
"""Inventory Agent service main class."""
|
||||||
|
|
||||||
|
# Must be in sync with rpcapi.AgentAPI's
|
||||||
|
RPC_API_VERSION = '1.0'
|
||||||
|
|
||||||
|
target = messaging.Target(version=RPC_API_VERSION)
|
||||||
|
|
||||||
|
def __init__(self, host, topic):
|
||||||
|
super(AgentManager, self).__init__(host, topic)
|
||||||
|
|
||||||
|
self._report_to_conductor = False
|
||||||
|
self._report_to_conductor_iplatform_avail_flag = False
|
||||||
|
self._ipci_operator = pci.PCIOperator()
|
||||||
|
self._inode_operator = node.NodeOperator()
|
||||||
|
self._lldp_operator = lldp_plugin.InventoryLldpPlugin()
|
||||||
|
self._ihost_personality = None
|
||||||
|
self._ihost_uuid = ""
|
||||||
|
self._agent_throttle = 0
|
||||||
|
self._subfunctions = None
|
||||||
|
self._subfunctions_configured = False
|
||||||
|
self._notify_subfunctions_alarm_clear = False
|
||||||
|
self._notify_subfunctions_alarm_raise = False
|
||||||
|
self._first_grub_update = False
|
||||||
|
|
||||||
|
@property
|
||||||
|
def report_to_conductor_required(self):
|
||||||
|
return self._report_to_conductor
|
||||||
|
|
||||||
|
@report_to_conductor_required.setter
|
||||||
|
def report_to_conductor_required(self, val):
|
||||||
|
if not isinstance(val, bool):
|
||||||
|
raise ValueError("report_to_conductor_required not bool %s" %
|
||||||
|
val)
|
||||||
|
self._report_to_conductor = val
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
# Do not collect inventory and report to conductor at startup in
|
||||||
|
# order to eliminate two inventory reports
|
||||||
|
# (one from here and one from audit) being sent to the conductor
|
||||||
|
|
||||||
|
super(AgentManager, self).start()
|
||||||
|
|
||||||
|
if os.path.isfile('/etc/inventory/inventory.conf'):
|
||||||
|
LOG.info("inventory-agent started, "
|
||||||
|
"inventory to be reported by audit")
|
||||||
|
else:
|
||||||
|
LOG.info("No config file for inventory-agent found.")
|
||||||
|
|
||||||
|
if tsc.system_mode == constants.SYSTEM_MODE_SIMPLEX:
|
||||||
|
utils.touch(INVENTORY_READY_FLAG)
|
||||||
|
|
||||||
|
def init_host(self, admin_context=None):
|
||||||
|
super(AgentManager, self).init_host(admin_context)
|
||||||
|
if os.path.isfile('/etc/inventory/inventory.conf'):
|
||||||
|
LOG.info(_("inventory-agent started, "
|
||||||
|
"system config to be reported by audit"))
|
||||||
|
else:
|
||||||
|
LOG.info(_("No config file for inventory-agent found."))
|
||||||
|
|
||||||
|
if tsc.system_mode == constants.SYSTEM_MODE_SIMPLEX:
|
||||||
|
utils.touch(INVENTORY_READY_FLAG)
|
||||||
|
|
||||||
|
def del_host(self, deregister=True):
|
||||||
|
return
|
||||||
|
|
||||||
|
def periodic_tasks(self, context, raise_on_error=False):
|
||||||
|
"""Periodic tasks are run at pre-specified intervals. """
|
||||||
|
return self.run_periodic_tasks(context,
|
||||||
|
raise_on_error=raise_on_error)
|
||||||
|
|
||||||
|
def _report_to_conductor_iplatform_avail(self):
|
||||||
|
utils.touch(INVENTORY_READY_FLAG)
|
||||||
|
time.sleep(1) # give time for conductor to process
|
||||||
|
self._report_to_conductor_iplatform_avail_flag = True
|
||||||
|
|
||||||
|
def _update_ttys_dcd_status(self, context, host_id):
|
||||||
|
# Retrieve the serial line carrier detect flag
|
||||||
|
ttys_dcd = None
|
||||||
|
rpcapi = conductor_rpcapi.ConductorAPI(
|
||||||
|
topic=conductor_rpcapi.MANAGER_TOPIC)
|
||||||
|
try:
|
||||||
|
ttys_dcd = rpcapi.get_host_ttys_dcd(context, host_id)
|
||||||
|
except exception.InventoryException:
|
||||||
|
LOG.exception("Inventory Agent exception getting host ttys_dcd.")
|
||||||
|
pass
|
||||||
|
if ttys_dcd is not None:
|
||||||
|
self._config_ttys_login(ttys_dcd)
|
||||||
|
else:
|
||||||
|
LOG.debug("ttys_dcd is not configured")
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _get_active_device():
|
||||||
|
# the list of currently configured console devices,
|
||||||
|
# like 'tty1 ttyS0' or just 'ttyS0'
|
||||||
|
# The last entry in the file is the active device connected
|
||||||
|
# to /dev/console.
|
||||||
|
active_device = 'ttyS0'
|
||||||
|
try:
|
||||||
|
cmd = 'cat /sys/class/tty/console/active | grep ttyS'
|
||||||
|
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
|
||||||
|
output = proc.stdout.read().strip()
|
||||||
|
proc.communicate()[0]
|
||||||
|
if proc.returncode != 0:
|
||||||
|
LOG.info("Cannot find the current configured serial device, "
|
||||||
|
"return default %s" % active_device)
|
||||||
|
return active_device
|
||||||
|
# if more than one devices are found, take the last entry
|
||||||
|
if ' ' in output:
|
||||||
|
devs = output.split(' ')
|
||||||
|
active_device = devs[len(devs) - 1]
|
||||||
|
else:
|
||||||
|
active_device = output
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
LOG.error("Failed to execute (%s) (%d)", cmd, e.returncode)
|
||||||
|
except OSError as e:
|
||||||
|
LOG.error("Failed to execute (%s) OS error (%d)", cmd, e.errno)
|
||||||
|
|
||||||
|
return active_device
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _is_local_flag_disabled(device):
|
||||||
|
"""
|
||||||
|
:param device:
|
||||||
|
:return: boolean: True if the local flag is disabled 'i.e. -clocal is
|
||||||
|
set'. This means the serial data carrier detect
|
||||||
|
signal is significant
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# uses -o for only-matching and -e for a pattern beginning with a
|
||||||
|
# hyphen (-), the following command returns 0 if the local flag
|
||||||
|
# is disabled
|
||||||
|
cmd = 'stty -a -F /dev/%s | grep -o -e -clocal' % device
|
||||||
|
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
|
||||||
|
proc.communicate()[0]
|
||||||
|
return proc.returncode == 0
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
LOG.error("Failed to execute (%s) (%d)", cmd, e.returncode)
|
||||||
|
return False
|
||||||
|
except OSError as e:
|
||||||
|
LOG.error("Failed to execute (%s) OS error (%d)", cmd, e.errno)
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _config_ttys_login(self, ttys_dcd):
|
||||||
|
# agetty is now enabled by systemd
|
||||||
|
# we only need to disable the local flag to enable carrier detection
|
||||||
|
# and enable the local flag when the feature is turned off
|
||||||
|
toggle_flag = None
|
||||||
|
active_device = self._get_active_device()
|
||||||
|
local_flag_disabled = self._is_local_flag_disabled(active_device)
|
||||||
|
if str(ttys_dcd) in ['True', 'true']:
|
||||||
|
LOG.info("ttys_dcd is enabled")
|
||||||
|
# check if the local flag is disabled
|
||||||
|
if not local_flag_disabled:
|
||||||
|
LOG.info("Disable (%s) local line" % active_device)
|
||||||
|
toggle_flag = 'stty -clocal -F /dev/%s' % active_device
|
||||||
|
else:
|
||||||
|
if local_flag_disabled:
|
||||||
|
# enable local flag to ignore the carrier detection
|
||||||
|
LOG.info("Enable local flag for device :%s" % active_device)
|
||||||
|
toggle_flag = 'stty clocal -F /dev/%s' % active_device
|
||||||
|
|
||||||
|
if toggle_flag:
|
||||||
|
try:
|
||||||
|
subprocess.Popen(toggle_flag, stdout=subprocess.PIPE,
|
||||||
|
shell=True)
|
||||||
|
# restart serial-getty
|
||||||
|
restart_cmd = ('systemctl restart serial-getty@%s.service'
|
||||||
|
% active_device)
|
||||||
|
subprocess.check_call(restart_cmd, shell=True)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
LOG.error("subprocess error: (%d)", e.returncode)
|
||||||
|
|
||||||
|
def _force_grub_update(self):
|
||||||
|
"""Force update the grub on the first AIO controller after the initial
|
||||||
|
config is completed
|
||||||
|
"""
|
||||||
|
if (not self._first_grub_update and
|
||||||
|
os.path.isfile(tsc.INITIAL_CONFIG_COMPLETE_FLAG)):
|
||||||
|
self._first_grub_update = True
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def host_lldp_get_and_report(self, context, rpcapi, host_uuid):
|
||||||
|
neighbour_dict_array = []
|
||||||
|
agent_dict_array = []
|
||||||
|
neighbours = []
|
||||||
|
agents = []
|
||||||
|
|
||||||
|
try:
|
||||||
|
neighbours = self._lldp_operator.lldp_neighbours_list()
|
||||||
|
except Exception as e:
|
||||||
|
LOG.error("Failed to get LLDP neighbours: %s", str(e))
|
||||||
|
|
||||||
|
for neighbour in neighbours:
|
||||||
|
neighbour_dict = {
|
||||||
|
'name_or_uuid': neighbour.key.portname,
|
||||||
|
'msap': neighbour.msap,
|
||||||
|
'state': neighbour.state,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_CHASSIS_ID: neighbour.key.chassisid,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_PORT_ID: neighbour.key.portid,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_TTL: neighbour.ttl,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_SYSTEM_NAME: neighbour.system_name,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_SYSTEM_DESC: neighbour.system_desc,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_SYSTEM_CAP: neighbour.capabilities,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_MGMT_ADDR: neighbour.mgmt_addr,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_PORT_DESC: neighbour.port_desc,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT1_LAG: neighbour.dot1_lag,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT1_PORT_VID: neighbour.dot1_port_vid,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT1_VID_DIGEST:
|
||||||
|
neighbour.dot1_vid_digest,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT1_MGMT_VID: neighbour.dot1_mgmt_vid,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT1_PROTO_VIDS:
|
||||||
|
neighbour.dot1_proto_vids,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT1_PROTO_IDS:
|
||||||
|
neighbour.dot1_proto_ids,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT1_VLAN_NAMES:
|
||||||
|
neighbour.dot1_vlan_names,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT3_MAC_STATUS:
|
||||||
|
neighbour.dot3_mac_status,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT3_MAX_FRAME:
|
||||||
|
neighbour.dot3_max_frame,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT3_POWER_MDI:
|
||||||
|
neighbour.dot3_power_mdi,
|
||||||
|
}
|
||||||
|
neighbour_dict_array.append(neighbour_dict)
|
||||||
|
|
||||||
|
if neighbour_dict_array:
|
||||||
|
try:
|
||||||
|
rpcapi.lldp_neighbour_update_by_host(context,
|
||||||
|
host_uuid,
|
||||||
|
neighbour_dict_array)
|
||||||
|
except exception.InventoryException:
|
||||||
|
LOG.exception("Inventory Agent exception updating "
|
||||||
|
"lldp neighbours.")
|
||||||
|
self._lldp_operator.lldp_neighbours_clear()
|
||||||
|
pass
|
||||||
|
|
||||||
|
try:
|
||||||
|
agents = self._lldp_operator.lldp_agents_list()
|
||||||
|
except Exception as e:
|
||||||
|
LOG.error("Failed to get LLDP agents: %s", str(e))
|
||||||
|
|
||||||
|
for agent in agents:
|
||||||
|
agent_dict = {
|
||||||
|
'name_or_uuid': agent.key.portname,
|
||||||
|
'state': agent.state,
|
||||||
|
'status': agent.status,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_CHASSIS_ID: agent.key.chassisid,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_PORT_ID: agent.key.portid,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_TTL: agent.ttl,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_SYSTEM_NAME: agent.system_name,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_SYSTEM_DESC: agent.system_desc,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_SYSTEM_CAP: agent.capabilities,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_MGMT_ADDR: agent.mgmt_addr,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_PORT_DESC: agent.port_desc,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT1_LAG: agent.dot1_lag,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT1_VLAN_NAMES: agent.dot1_vlan_names,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT3_MAX_FRAME: agent.dot3_max_frame,
|
||||||
|
}
|
||||||
|
agent_dict_array.append(agent_dict)
|
||||||
|
|
||||||
|
if agent_dict_array:
|
||||||
|
try:
|
||||||
|
rpcapi.lldp_agent_update_by_host(context,
|
||||||
|
host_uuid,
|
||||||
|
agent_dict_array)
|
||||||
|
except exception.InventoryException:
|
||||||
|
LOG.exception("Inventory Agent exception updating "
|
||||||
|
"lldp agents.")
|
||||||
|
self._lldp_operator.lldp_agents_clear()
|
||||||
|
pass
|
||||||
|
|
||||||
|
def synchronized_network_config(func):
|
||||||
|
"""Synchronization decorator to acquire and release
|
||||||
|
network_config_lock.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def wrap(self, *args, **kwargs):
|
||||||
|
try:
|
||||||
|
# Get lock to avoid conflict with apply_network_config.sh
|
||||||
|
lockfd = self._acquire_network_config_lock()
|
||||||
|
return func(self, *args, **kwargs)
|
||||||
|
finally:
|
||||||
|
self._release_network_config_lock(lockfd)
|
||||||
|
|
||||||
|
return wrap
|
||||||
|
|
||||||
|
@synchronized_network_config
|
||||||
|
def _lldp_enable_and_report(self, context, rpcapi, host_uuid):
|
||||||
|
"""Temporarily enable interfaces and get lldp neighbor information.
|
||||||
|
This method should only be called before
|
||||||
|
INITIAL_CONFIG_COMPLETE_FLAG is set.
|
||||||
|
"""
|
||||||
|
links_down = []
|
||||||
|
try:
|
||||||
|
# Turn on interfaces, so that lldpd can show all neighbors
|
||||||
|
for interface in self._ipci_operator.pci_get_net_names():
|
||||||
|
flag = self._ipci_operator.pci_get_net_flags(interface)
|
||||||
|
# If administrative state is down, bring it up momentarily
|
||||||
|
if not (flag & pci.IFF_UP):
|
||||||
|
subprocess.call(['ip', 'link', 'set', interface, 'up'])
|
||||||
|
links_down.append(interface)
|
||||||
|
LOG.info('interface %s enabled to receive LLDP PDUs' %
|
||||||
|
interface)
|
||||||
|
self._lldp_operator.lldp_update()
|
||||||
|
|
||||||
|
# delay maximum 30 seconds for lldpd to receive LLDP PDU
|
||||||
|
timeout = 0
|
||||||
|
link_wait_for_lldp = True
|
||||||
|
while timeout < 30 and link_wait_for_lldp and links_down:
|
||||||
|
time.sleep(5)
|
||||||
|
timeout = timeout + 5
|
||||||
|
link_wait_for_lldp = False
|
||||||
|
|
||||||
|
for link in links_down:
|
||||||
|
if not self._lldp_operator.lldp_has_neighbour(link):
|
||||||
|
link_wait_for_lldp = True
|
||||||
|
break
|
||||||
|
self.host_lldp_get_and_report(context, rpcapi, host_uuid)
|
||||||
|
except Exception as e:
|
||||||
|
LOG.exception(e)
|
||||||
|
pass
|
||||||
|
finally:
|
||||||
|
# restore interface administrative state
|
||||||
|
for interface in links_down:
|
||||||
|
subprocess.call(['ip', 'link', 'set', interface, 'down'])
|
||||||
|
LOG.info('interface %s disabled after querying LLDP neighbors'
|
||||||
|
% interface)
|
||||||
|
|
||||||
|
def platform_update_by_host(self, rpcapi, context, host_uuid, msg_dict):
|
||||||
|
"""Update host platform information.
|
||||||
|
If this is the first boot (kickstart), then also update the Host
|
||||||
|
Action State to reinstalled, and remove the flag.
|
||||||
|
"""
|
||||||
|
if os.path.exists(FIRST_BOOT_FLAG):
|
||||||
|
msg_dict.update({k_host.HOST_ACTION_STATE:
|
||||||
|
k_host.HAS_REINSTALLED})
|
||||||
|
|
||||||
|
try:
|
||||||
|
rpcapi.platform_update_by_host(context,
|
||||||
|
host_uuid,
|
||||||
|
msg_dict)
|
||||||
|
if os.path.exists(FIRST_BOOT_FLAG):
|
||||||
|
os.remove(FIRST_BOOT_FLAG)
|
||||||
|
LOG.info("Removed %s" % FIRST_BOOT_FLAG)
|
||||||
|
except exception.InventoryException:
|
||||||
|
LOG.warn("platform_update_by_host exception "
|
||||||
|
"host_uuid=%s msg_dict=%s." %
|
||||||
|
(host_uuid, msg_dict))
|
||||||
|
pass
|
||||||
|
|
||||||
|
LOG.info("Inventory Agent platform update by host: %s" % msg_dict)
|
||||||
|
|
||||||
|
def _acquire_network_config_lock(self):
|
||||||
|
"""Synchronization with apply_network_config.sh
|
||||||
|
|
||||||
|
This method is to acquire the lock to avoid
|
||||||
|
conflict with execution of apply_network_config.sh
|
||||||
|
during puppet manifest application.
|
||||||
|
|
||||||
|
:returns: fd of the lock, if successful. 0 on error.
|
||||||
|
"""
|
||||||
|
lock_file_fd = os.open(
|
||||||
|
constants.NETWORK_CONFIG_LOCK_FILE, os.O_CREAT | os.O_RDONLY)
|
||||||
|
count = 1
|
||||||
|
delay = 5
|
||||||
|
max_count = 5
|
||||||
|
while count <= max_count:
|
||||||
|
try:
|
||||||
|
fcntl.flock(lock_file_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||||
|
return lock_file_fd
|
||||||
|
except IOError as e:
|
||||||
|
# raise on unrelated IOErrors
|
||||||
|
if e.errno != errno.EAGAIN:
|
||||||
|
raise
|
||||||
|
else:
|
||||||
|
LOG.info("Could not acquire lock({}): {} ({}/{}), "
|
||||||
|
"will retry".format(lock_file_fd, str(e),
|
||||||
|
count, max_count))
|
||||||
|
time.sleep(delay)
|
||||||
|
count += 1
|
||||||
|
LOG.error("Failed to acquire lock (fd={})".format(lock_file_fd))
|
||||||
|
return 0
|
||||||
|
|
||||||
|
def _release_network_config_lock(self, lockfd):
|
||||||
|
"""Release the lock guarding apply_network_config.sh """
|
||||||
|
if lockfd:
|
||||||
|
fcntl.flock(lockfd, fcntl.LOCK_UN)
|
||||||
|
os.close(lockfd)
|
||||||
|
|
||||||
|
def ihost_inv_get_and_report(self, icontext):
|
||||||
|
"""Collect data for an ihost.
|
||||||
|
|
||||||
|
This method allows an ihost data to be collected.
|
||||||
|
|
||||||
|
:param: icontext: an admin context
|
||||||
|
:returns: updated ihost object, including all fields.
|
||||||
|
"""
|
||||||
|
|
||||||
|
rpcapi = conductor_rpcapi.ConductorAPI(
|
||||||
|
topic=conductor_rpcapi.MANAGER_TOPIC)
|
||||||
|
|
||||||
|
ihost = None
|
||||||
|
|
||||||
|
# find list of network related inics for this ihost
|
||||||
|
inics = self._ipci_operator.inics_get()
|
||||||
|
|
||||||
|
# create an array of ports for each net entry of the NIC device
|
||||||
|
iports = []
|
||||||
|
for inic in inics:
|
||||||
|
lockfd = 0
|
||||||
|
try:
|
||||||
|
# Get lock to avoid conflict with apply_network_config.sh
|
||||||
|
lockfd = self._acquire_network_config_lock()
|
||||||
|
pci_net_array = \
|
||||||
|
self._ipci_operator.pci_get_net_attrs(inic.pciaddr)
|
||||||
|
finally:
|
||||||
|
self._release_network_config_lock(lockfd)
|
||||||
|
for net in pci_net_array:
|
||||||
|
iports.append(pci.Port(inic, **net))
|
||||||
|
|
||||||
|
# find list of pci devices for this host
|
||||||
|
pci_devices = self._ipci_operator.pci_devices_get()
|
||||||
|
|
||||||
|
# create an array of pci_devs for each net entry of the device
|
||||||
|
pci_devs = []
|
||||||
|
for pci_dev in pci_devices:
|
||||||
|
pci_dev_array = \
|
||||||
|
self._ipci_operator.pci_get_device_attrs(pci_dev.pciaddr)
|
||||||
|
for dev in pci_dev_array:
|
||||||
|
pci_devs.append(pci.PCIDevice(pci_dev, **dev))
|
||||||
|
|
||||||
|
# create a list of MAC addresses that will be used to identify the
|
||||||
|
# inventoried host (one of the MACs should be the management MAC)
|
||||||
|
host_macs = [port.mac for port in iports if port.mac]
|
||||||
|
|
||||||
|
# get my ihost record which should be avail since booted
|
||||||
|
|
||||||
|
LOG.debug('Inventory Agent iports={}, host_macs={}'.format(
|
||||||
|
iports, host_macs))
|
||||||
|
|
||||||
|
slept = 0
|
||||||
|
while slept < MAXSLEEP:
|
||||||
|
# wait for controller to come up first may be a DOR
|
||||||
|
try:
|
||||||
|
ihost = rpcapi.get_host_by_macs(icontext, host_macs)
|
||||||
|
except messaging.MessagingTimeout:
|
||||||
|
LOG.info("get_host_by_macs Messaging Timeout.")
|
||||||
|
except Exception as ex:
|
||||||
|
LOG.warn("Conductor RPC get_host_by_macs exception "
|
||||||
|
"response %s" % ex)
|
||||||
|
|
||||||
|
if not ihost:
|
||||||
|
hostname = socket.gethostname()
|
||||||
|
if hostname != k_host.LOCALHOST_HOSTNAME:
|
||||||
|
try:
|
||||||
|
ihost = rpcapi.get_host_by_hostname(icontext,
|
||||||
|
hostname)
|
||||||
|
except messaging.MessagingTimeout:
|
||||||
|
LOG.info("get_host_by_hostname Messaging Timeout.")
|
||||||
|
return # wait for next audit cycle
|
||||||
|
except Exception as ex:
|
||||||
|
LOG.warn("Conductor RPC get_host_by_hostname "
|
||||||
|
"exception response %s" % ex)
|
||||||
|
|
||||||
|
if ihost and ihost.get('personality'):
|
||||||
|
self.report_to_conductor_required = True
|
||||||
|
self._ihost_uuid = ihost['uuid']
|
||||||
|
self._ihost_personality = ihost['personality']
|
||||||
|
|
||||||
|
if os.path.isfile(tsc.PLATFORM_CONF_FILE):
|
||||||
|
# read the platform config file and check for UUID
|
||||||
|
found = False
|
||||||
|
with open(tsc.PLATFORM_CONF_FILE, "r") as fd:
|
||||||
|
for line in fd:
|
||||||
|
if line.find("UUID=") == 0:
|
||||||
|
found = True
|
||||||
|
if not found:
|
||||||
|
# the UUID is not found, append it
|
||||||
|
with open(tsc.PLATFORM_CONF_FILE, "a") as fd:
|
||||||
|
fd.write("UUID=" + self._ihost_uuid + "\n")
|
||||||
|
|
||||||
|
# Report host install status
|
||||||
|
msg_dict = {}
|
||||||
|
self.platform_update_by_host(rpcapi,
|
||||||
|
icontext,
|
||||||
|
self._ihost_uuid,
|
||||||
|
msg_dict)
|
||||||
|
LOG.info("Agent found matching ihost: %s" % ihost['uuid'])
|
||||||
|
break
|
||||||
|
|
||||||
|
time.sleep(30)
|
||||||
|
slept += 30
|
||||||
|
|
||||||
|
if not self.report_to_conductor_required:
|
||||||
|
# let the audit take care of it instead
|
||||||
|
LOG.info("Inventory no matching ihost found... await Audit")
|
||||||
|
return
|
||||||
|
|
||||||
|
subfunctions = self.subfunctions_get()
|
||||||
|
try:
|
||||||
|
rpcapi.subfunctions_update_by_host(icontext,
|
||||||
|
ihost['uuid'],
|
||||||
|
subfunctions)
|
||||||
|
except exception.InventoryException:
|
||||||
|
LOG.exception("Inventory Agent exception updating "
|
||||||
|
"subfunctions conductor.")
|
||||||
|
pass
|
||||||
|
|
||||||
|
# post to inventory db by ihost['uuid']
|
||||||
|
iport_dict_array = []
|
||||||
|
for port in iports:
|
||||||
|
inic_dict = {'pciaddr': port.ipci.pciaddr,
|
||||||
|
'pclass': port.ipci.pclass,
|
||||||
|
'pvendor': port.ipci.pvendor,
|
||||||
|
'pdevice': port.ipci.pdevice,
|
||||||
|
'prevision': port.ipci.prevision,
|
||||||
|
'psvendor': port.ipci.psvendor,
|
||||||
|
'psdevice': port.ipci.psdevice,
|
||||||
|
'pname': port.name,
|
||||||
|
'numa_node': port.numa_node,
|
||||||
|
'sriov_totalvfs': port.sriov_totalvfs,
|
||||||
|
'sriov_numvfs': port.sriov_numvfs,
|
||||||
|
'sriov_vfs_pci_address': port.sriov_vfs_pci_address,
|
||||||
|
'driver': port.driver,
|
||||||
|
'mac': port.mac,
|
||||||
|
'mtu': port.mtu,
|
||||||
|
'speed': port.speed,
|
||||||
|
'link_mode': port.link_mode,
|
||||||
|
'dev_id': port.dev_id,
|
||||||
|
'dpdksupport': port.dpdksupport}
|
||||||
|
|
||||||
|
LOG.debug('Inventory Agent inic {}'.format(inic_dict))
|
||||||
|
|
||||||
|
iport_dict_array.append(inic_dict)
|
||||||
|
try:
|
||||||
|
# may get duplicate key if already sent on earlier init
|
||||||
|
rpcapi.port_update_by_host(icontext,
|
||||||
|
ihost['uuid'],
|
||||||
|
iport_dict_array)
|
||||||
|
except messaging.MessagingTimeout:
|
||||||
|
LOG.info("pci_device_update_by_host Messaging Timeout.")
|
||||||
|
self.report_to_conductor_required = False
|
||||||
|
return # wait for next audit cycle
|
||||||
|
|
||||||
|
# post to inventory db by ihost['uuid']
|
||||||
|
pci_device_dict_array = []
|
||||||
|
for dev in pci_devs:
|
||||||
|
pci_dev_dict = {'name': dev.name,
|
||||||
|
'pciaddr': dev.pci.pciaddr,
|
||||||
|
'pclass_id': dev.pclass_id,
|
||||||
|
'pvendor_id': dev.pvendor_id,
|
||||||
|
'pdevice_id': dev.pdevice_id,
|
||||||
|
'pclass': dev.pci.pclass,
|
||||||
|
'pvendor': dev.pci.pvendor,
|
||||||
|
'pdevice': dev.pci.pdevice,
|
||||||
|
'prevision': dev.pci.prevision,
|
||||||
|
'psvendor': dev.pci.psvendor,
|
||||||
|
'psdevice': dev.pci.psdevice,
|
||||||
|
'numa_node': dev.numa_node,
|
||||||
|
'sriov_totalvfs': dev.sriov_totalvfs,
|
||||||
|
'sriov_numvfs': dev.sriov_numvfs,
|
||||||
|
'sriov_vfs_pci_address': dev.sriov_vfs_pci_address,
|
||||||
|
'driver': dev.driver,
|
||||||
|
'enabled': dev.enabled,
|
||||||
|
'extra_info': dev.extra_info}
|
||||||
|
LOG.debug('Inventory Agent dev {}'.format(pci_dev_dict))
|
||||||
|
|
||||||
|
pci_device_dict_array.append(pci_dev_dict)
|
||||||
|
try:
|
||||||
|
# may get duplicate key if already sent on earlier init
|
||||||
|
rpcapi.pci_device_update_by_host(icontext,
|
||||||
|
ihost['uuid'],
|
||||||
|
pci_device_dict_array)
|
||||||
|
except messaging.MessagingTimeout:
|
||||||
|
LOG.info("pci_device_update_by_host Messaging Timeout.")
|
||||||
|
self.report_to_conductor_required = True
|
||||||
|
|
||||||
|
# Find list of numa_nodes and cpus for this ihost
|
||||||
|
inumas, icpus = self._inode_operator.inodes_get_inumas_icpus()
|
||||||
|
|
||||||
|
try:
|
||||||
|
# may get duplicate key if already sent on earlier init
|
||||||
|
rpcapi.numas_update_by_host(icontext,
|
||||||
|
ihost['uuid'],
|
||||||
|
inumas)
|
||||||
|
except messaging.RemoteError as e:
|
||||||
|
LOG.error("numas_update_by_host RemoteError exc_type=%s" %
|
||||||
|
e.exc_type)
|
||||||
|
except messaging.MessagingTimeout:
|
||||||
|
LOG.info("pci_device_update_by_host Messaging Timeout.")
|
||||||
|
self.report_to_conductor_required = True
|
||||||
|
except Exception as e:
|
||||||
|
LOG.exception("Inventory Agent exception updating inuma e=%s." % e)
|
||||||
|
pass
|
||||||
|
|
||||||
|
force_grub_update = self._force_grub_update()
|
||||||
|
try:
|
||||||
|
# may get duplicate key if already sent on earlier init
|
||||||
|
rpcapi.cpus_update_by_host(icontext,
|
||||||
|
ihost['uuid'],
|
||||||
|
icpus,
|
||||||
|
force_grub_update)
|
||||||
|
except messaging.RemoteError as e:
|
||||||
|
LOG.error("cpus_update_by_host RemoteError exc_type=%s" %
|
||||||
|
e.exc_type)
|
||||||
|
except messaging.MessagingTimeout:
|
||||||
|
LOG.info("cpus_update_by_host Messaging Timeout.")
|
||||||
|
self.report_to_conductor_required = True
|
||||||
|
except Exception as e:
|
||||||
|
LOG.exception("Inventory exception updating cpus e=%s." % e)
|
||||||
|
self.report_to_conductor_required = True
|
||||||
|
pass
|
||||||
|
except exception.InventoryException:
|
||||||
|
LOG.exception("Inventory exception updating cpus conductor.")
|
||||||
|
pass
|
||||||
|
|
||||||
|
imemory = self._inode_operator.inodes_get_imemory()
|
||||||
|
if imemory:
|
||||||
|
try:
|
||||||
|
# may get duplicate key if already sent on earlier init
|
||||||
|
rpcapi.memory_update_by_host(icontext,
|
||||||
|
ihost['uuid'],
|
||||||
|
imemory)
|
||||||
|
except messaging.MessagingTimeout:
|
||||||
|
LOG.info("memory_update_by_host Messaging Timeout.")
|
||||||
|
except messaging.RemoteError as e:
|
||||||
|
LOG.error("memory_update_by_host RemoteError exc_type=%s" %
|
||||||
|
e.exc_type)
|
||||||
|
except exception.InventoryException:
|
||||||
|
LOG.exception("Inventory Agent exception updating imemory "
|
||||||
|
"conductor.")
|
||||||
|
|
||||||
|
if self._ihost_uuid and \
|
||||||
|
os.path.isfile(tsc.INITIAL_CONFIG_COMPLETE_FLAG):
|
||||||
|
if not self._report_to_conductor_iplatform_avail_flag:
|
||||||
|
# and not self._wait_for_nova_lvg()
|
||||||
|
imsg_dict = {'availability': k_host.AVAILABILITY_AVAILABLE}
|
||||||
|
|
||||||
|
iscsi_initiator_name = self.get_host_iscsi_initiator_name()
|
||||||
|
if iscsi_initiator_name is not None:
|
||||||
|
imsg_dict.update({'iscsi_initiator_name':
|
||||||
|
iscsi_initiator_name})
|
||||||
|
|
||||||
|
# Before setting the host to AVAILABILITY_AVAILABLE make
|
||||||
|
# sure that nova_local aggregates are correctly set
|
||||||
|
self.platform_update_by_host(rpcapi,
|
||||||
|
icontext,
|
||||||
|
self._ihost_uuid,
|
||||||
|
imsg_dict)
|
||||||
|
|
||||||
|
self._report_to_conductor_iplatform_avail()
|
||||||
|
|
||||||
|
def subfunctions_get(self):
|
||||||
|
"""returns subfunctions on this host.
|
||||||
|
"""
|
||||||
|
|
||||||
|
self._subfunctions = ','.join(tsc.subfunctions)
|
||||||
|
|
||||||
|
return self._subfunctions
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def subfunctions_list_get():
|
||||||
|
"""returns list of subfunctions on this host.
|
||||||
|
"""
|
||||||
|
subfunctions = ','.join(tsc.subfunctions)
|
||||||
|
subfunctions_list = subfunctions.split(',')
|
||||||
|
|
||||||
|
return subfunctions_list
|
||||||
|
|
||||||
|
def subfunctions_configured(self, subfunctions_list):
|
||||||
|
"""Determines whether subfunctions configuration is completed.
|
||||||
|
return: Bool whether subfunctions configuration is completed.
|
||||||
|
"""
|
||||||
|
if (k_host.CONTROLLER in subfunctions_list and
|
||||||
|
k_host.COMPUTE in subfunctions_list):
|
||||||
|
if not os.path.exists(tsc.INITIAL_COMPUTE_CONFIG_COMPLETE):
|
||||||
|
self._subfunctions_configured = False
|
||||||
|
return False
|
||||||
|
|
||||||
|
self._subfunctions_configured = True
|
||||||
|
return True
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _wait_for_nova_lvg(icontext, rpcapi, ihost_uuid, nova_lvgs=None):
|
||||||
|
"""See if we wait for a provisioned nova-local volume group
|
||||||
|
|
||||||
|
This method queries the conductor to see if we are provisioning
|
||||||
|
a nova-local volume group on this boot cycle. This check is used
|
||||||
|
to delay sending the platform availability to the conductor.
|
||||||
|
|
||||||
|
:param: icontext: an admin context
|
||||||
|
:param: rpcapi: conductor rpc api
|
||||||
|
:param: ihost_uuid: an admin context
|
||||||
|
:returns: True if we are provisioning false otherwise
|
||||||
|
"""
|
||||||
|
|
||||||
|
return True
|
||||||
|
LOG.info("TODO _wait_for_nova_lvg from systemconfig")
|
||||||
|
|
||||||
|
def _is_config_complete(self):
|
||||||
|
"""Check if this node has completed config
|
||||||
|
|
||||||
|
This method queries node's config flag file to see if it has
|
||||||
|
complete config.
|
||||||
|
:return: True if the complete flag file exists false otherwise
|
||||||
|
"""
|
||||||
|
if not os.path.isfile(tsc.INITIAL_CONFIG_COMPLETE_FLAG):
|
||||||
|
return False
|
||||||
|
subfunctions = self.subfunctions_list_get()
|
||||||
|
if k_host.CONTROLLER in subfunctions:
|
||||||
|
if not os.path.isfile(tsc.INITIAL_CONTROLLER_CONFIG_COMPLETE):
|
||||||
|
return False
|
||||||
|
if k_host.COMPUTE in subfunctions:
|
||||||
|
if not os.path.isfile(tsc.INITIAL_COMPUTE_CONFIG_COMPLETE):
|
||||||
|
return False
|
||||||
|
if k_host.STORAGE in subfunctions:
|
||||||
|
if not os.path.isfile(tsc.INITIAL_STORAGE_CONFIG_COMPLETE):
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
@periodics.periodic(spacing=CONF.agent.audit_interval,
|
||||||
|
run_immediately=True)
|
||||||
|
def _agent_audit(self, context):
|
||||||
|
# periodically, perform inventory audit
|
||||||
|
self.agent_audit(context, host_uuid=self._ihost_uuid,
|
||||||
|
force_updates=None)
|
||||||
|
|
||||||
|
def agent_audit(self, context,
|
||||||
|
host_uuid, force_updates, cinder_device=None):
|
||||||
|
# perform inventory audit
|
||||||
|
if self._ihost_uuid != host_uuid:
|
||||||
|
# The function call is not for this host agent
|
||||||
|
return
|
||||||
|
|
||||||
|
icontext = mycontext.get_admin_context()
|
||||||
|
rpcapi = conductor_rpcapi.ConductorAPI(
|
||||||
|
topic=conductor_rpcapi.MANAGER_TOPIC)
|
||||||
|
|
||||||
|
if not self.report_to_conductor_required:
|
||||||
|
LOG.info("Inventory Agent audit running inv_get_and_report.")
|
||||||
|
self.ihost_inv_get_and_report(icontext)
|
||||||
|
|
||||||
|
if self._ihost_uuid and os.path.isfile(
|
||||||
|
tsc.INITIAL_CONFIG_COMPLETE_FLAG):
|
||||||
|
if (not self._report_to_conductor_iplatform_avail_flag and
|
||||||
|
not self._wait_for_nova_lvg(
|
||||||
|
icontext, rpcapi, self._ihost_uuid)):
|
||||||
|
imsg_dict = {'availability': k_host.AVAILABILITY_AVAILABLE}
|
||||||
|
|
||||||
|
iscsi_initiator_name = self.get_host_iscsi_initiator_name()
|
||||||
|
if iscsi_initiator_name is not None:
|
||||||
|
imsg_dict.update({'iscsi_initiator_name':
|
||||||
|
iscsi_initiator_name})
|
||||||
|
|
||||||
|
# Before setting the host to AVAILABILITY_AVAILABLE make
|
||||||
|
# sure that nova_local aggregates are correctly set
|
||||||
|
self.platform_update_by_host(rpcapi,
|
||||||
|
icontext,
|
||||||
|
self._ihost_uuid,
|
||||||
|
imsg_dict)
|
||||||
|
|
||||||
|
self._report_to_conductor_iplatform_avail()
|
||||||
|
|
||||||
|
if (self._ihost_personality == k_host.CONTROLLER and
|
||||||
|
not self._notify_subfunctions_alarm_clear):
|
||||||
|
|
||||||
|
subfunctions_list = self.subfunctions_list_get()
|
||||||
|
if ((k_host.CONTROLLER in subfunctions_list) and
|
||||||
|
(k_host.COMPUTE in subfunctions_list)):
|
||||||
|
if self.subfunctions_configured(subfunctions_list) and \
|
||||||
|
not self._wait_for_nova_lvg(
|
||||||
|
icontext, rpcapi, self._ihost_uuid):
|
||||||
|
|
||||||
|
ihost_notify_dict = {'subfunctions_configured': True}
|
||||||
|
rpcapi.notify_subfunctions_config(icontext,
|
||||||
|
self._ihost_uuid,
|
||||||
|
ihost_notify_dict)
|
||||||
|
self._notify_subfunctions_alarm_clear = True
|
||||||
|
else:
|
||||||
|
if not self._notify_subfunctions_alarm_raise:
|
||||||
|
ihost_notify_dict = {'subfunctions_configured':
|
||||||
|
False}
|
||||||
|
rpcapi.notify_subfunctions_config(
|
||||||
|
icontext, self._ihost_uuid, ihost_notify_dict)
|
||||||
|
self._notify_subfunctions_alarm_raise = True
|
||||||
|
else:
|
||||||
|
self._notify_subfunctions_alarm_clear = True
|
||||||
|
|
||||||
|
if self._ihost_uuid:
|
||||||
|
LOG.debug("Inventory Agent Audit running.")
|
||||||
|
|
||||||
|
if force_updates:
|
||||||
|
LOG.debug("Inventory Agent Audit force updates: (%s)" %
|
||||||
|
(', '.join(force_updates)))
|
||||||
|
|
||||||
|
self._update_ttys_dcd_status(icontext, self._ihost_uuid)
|
||||||
|
if self._agent_throttle > 5:
|
||||||
|
# throttle updates
|
||||||
|
self._agent_throttle = 0
|
||||||
|
imemory = self._inode_operator.inodes_get_imemory()
|
||||||
|
rpcapi.memory_update_by_host(icontext,
|
||||||
|
self._ihost_uuid,
|
||||||
|
imemory)
|
||||||
|
if self._is_config_complete():
|
||||||
|
self.host_lldp_get_and_report(
|
||||||
|
icontext, rpcapi, self._ihost_uuid)
|
||||||
|
else:
|
||||||
|
self._lldp_enable_and_report(
|
||||||
|
icontext, rpcapi, self._ihost_uuid)
|
||||||
|
self._agent_throttle += 1
|
||||||
|
|
||||||
|
if os.path.isfile(tsc.PLATFORM_CONF_FILE):
|
||||||
|
# read the platform config file and check for UUID
|
||||||
|
if 'UUID' not in open(tsc.PLATFORM_CONF_FILE).read():
|
||||||
|
# the UUID is not in found, append it
|
||||||
|
with open(tsc.PLATFORM_CONF_FILE, "a") as fd:
|
||||||
|
fd.write("UUID=" + self._ihost_uuid)
|
||||||
|
|
||||||
|
def configure_lldp_systemname(self, context, systemname):
|
||||||
|
"""Configure the systemname into the lldp agent with the supplied data.
|
||||||
|
|
||||||
|
:param context: an admin context.
|
||||||
|
:param systemname: the systemname
|
||||||
|
"""
|
||||||
|
|
||||||
|
# TODO(sc): This becomes an inventory-api call from
|
||||||
|
# via systemconfig: configure_isystemname
|
||||||
|
rpcapi = conductor_rpcapi.ConductorAPI(
|
||||||
|
topic=conductor_rpcapi.MANAGER_TOPIC)
|
||||||
|
# Update the lldp agent
|
||||||
|
self._lldp_operator.lldp_update_systemname(systemname)
|
||||||
|
# Trigger an audit to ensure the db is up to date
|
||||||
|
self.host_lldp_get_and_report(context, rpcapi, self._ihost_uuid)
|
||||||
|
|
||||||
|
def configure_ttys_dcd(self, context, uuid, ttys_dcd):
|
||||||
|
"""Configure the getty on the serial device.
|
||||||
|
|
||||||
|
:param context: an admin context.
|
||||||
|
:param uuid: the host uuid
|
||||||
|
:param ttys_dcd: the flag to enable/disable dcd
|
||||||
|
"""
|
||||||
|
|
||||||
|
LOG.debug("AgentManager.configure_ttys_dcd: %s %s" % (uuid, ttys_dcd))
|
||||||
|
if self._ihost_uuid and self._ihost_uuid == uuid:
|
||||||
|
LOG.debug("AgentManager configure getty on serial console")
|
||||||
|
self._config_ttys_login(ttys_dcd)
|
||||||
|
return
|
||||||
|
|
||||||
|
def execute_command(self, context, host_uuid, command):
|
||||||
|
"""Execute a command on behalf of inventory-conductor
|
||||||
|
|
||||||
|
:param context: request context
|
||||||
|
:param host_uuid: the host uuid
|
||||||
|
:param command: the command to execute
|
||||||
|
"""
|
||||||
|
|
||||||
|
LOG.debug("AgentManager.execute_command: (%s)" % command)
|
||||||
|
if self._ihost_uuid and self._ihost_uuid == host_uuid:
|
||||||
|
LOG.info("AgentManager execute_command: (%s)" % command)
|
||||||
|
with open(os.devnull, "w") as fnull:
|
||||||
|
try:
|
||||||
|
subprocess.check_call(command, stdout=fnull, stderr=fnull)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
LOG.error("Failed to execute (%s) (%d)",
|
||||||
|
command, e.returncode)
|
||||||
|
except OSError as e:
|
||||||
|
LOG.error("Failed to execute (%s), OS error:(%d)",
|
||||||
|
command, e.errno)
|
||||||
|
|
||||||
|
LOG.info("(%s) executed.", command)
|
||||||
|
|
||||||
|
def get_host_iscsi_initiator_name(self):
|
||||||
|
iscsi_initiator_name = None
|
||||||
|
try:
|
||||||
|
stdout, __ = utils.execute('cat', '/etc/iscsi/initiatorname.iscsi',
|
||||||
|
run_as_root=True)
|
||||||
|
if stdout:
|
||||||
|
stdout = stdout.strip()
|
||||||
|
iscsi_initiator_name = stdout.split('=')[-1]
|
||||||
|
LOG.info("iscsi initiator name = %s" % iscsi_initiator_name)
|
||||||
|
except Exception:
|
||||||
|
LOG.error("Failed retrieving iscsi initiator name")
|
||||||
|
|
||||||
|
return iscsi_initiator_name
|
||||||
|
|
||||||
|
def update_host_memory(self, context, host_uuid):
|
||||||
|
"""update the host memory
|
||||||
|
|
||||||
|
:param context: an admin context
|
||||||
|
:param host_uuid: ihost uuid unique id
|
||||||
|
:return: None
|
||||||
|
"""
|
||||||
|
if self._ihost_uuid and self._ihost_uuid == host_uuid:
|
||||||
|
rpcapi = conductor_rpcapi.ConductorAPI(
|
||||||
|
topic=conductor_rpcapi.MANAGER_TOPIC)
|
||||||
|
memory = self._inode_operator.inodes_get_imemory()
|
||||||
|
rpcapi.memory_update_by_host(context,
|
||||||
|
self._ihost_uuid,
|
||||||
|
memory,
|
||||||
|
force_update=True)
|
608
inventory/inventory/inventory/agent/node.py
Normal file
608
inventory/inventory/inventory/agent/node.py
Normal file
@ -0,0 +1,608 @@
|
|||||||
|
#
|
||||||
|
# Copyright (c) 2013-2016 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
|
||||||
|
""" inventory numa node Utilities and helper functions."""
|
||||||
|
|
||||||
|
import os
|
||||||
|
from os import listdir
|
||||||
|
from os.path import isfile
|
||||||
|
from os.path import join
|
||||||
|
from oslo_log import log
|
||||||
|
import re
|
||||||
|
import subprocess
|
||||||
|
import tsconfig.tsconfig as tsc
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
# Defines per-socket vswitch memory requirements (in MB)
|
||||||
|
VSWITCH_MEMORY_MB = 1024
|
||||||
|
|
||||||
|
# Defines the size of one kilobyte
|
||||||
|
SIZE_KB = 1024
|
||||||
|
|
||||||
|
# Defines the size of 2 megabytes in kilobyte units
|
||||||
|
SIZE_2M_KB = 2048
|
||||||
|
|
||||||
|
# Defines the size of 1 gigabyte in kilobyte units
|
||||||
|
SIZE_1G_KB = 1048576
|
||||||
|
|
||||||
|
# Defines the size of 2 megabytes in megabyte units
|
||||||
|
SIZE_2M_MB = int(SIZE_2M_KB / SIZE_KB)
|
||||||
|
|
||||||
|
# Defines the size of 1 gigabyte in megabyte units
|
||||||
|
SIZE_1G_MB = int(SIZE_1G_KB / SIZE_KB)
|
||||||
|
|
||||||
|
# Defines the minimum size of memory for a controller node in megabyte units
|
||||||
|
CONTROLLER_MIN_MB = 6000
|
||||||
|
|
||||||
|
# Defines the minimum size of memory for a compute node in megabyte units
|
||||||
|
COMPUTE_MIN_MB = 1600
|
||||||
|
|
||||||
|
# Defines the minimum size of memory for a secondary compute node in megabyte
|
||||||
|
# units
|
||||||
|
COMPUTE_MIN_NON_0_MB = 500
|
||||||
|
|
||||||
|
|
||||||
|
class CPU(object):
|
||||||
|
'''Class to encapsulate CPU data for System Inventory'''
|
||||||
|
|
||||||
|
def __init__(self, cpu, numa_node, core, thread,
|
||||||
|
cpu_family=None, cpu_model=None, revision=None):
|
||||||
|
'''Construct a cpu object with the given values.'''
|
||||||
|
|
||||||
|
self.cpu = cpu
|
||||||
|
self.numa_node = numa_node
|
||||||
|
self.core = core
|
||||||
|
self.thread = thread
|
||||||
|
self.cpu_family = cpu_family
|
||||||
|
self.cpu_model = cpu_model
|
||||||
|
self.revision = revision
|
||||||
|
# self.allocated_functions = mgmt (usu. 0), vswitch
|
||||||
|
|
||||||
|
def __eq__(self, rhs):
|
||||||
|
return (self.cpu == rhs.cpu and
|
||||||
|
self.numa_node == rhs.numa_node and
|
||||||
|
self.core == rhs.core and
|
||||||
|
self.thread == rhs.thread)
|
||||||
|
|
||||||
|
def __ne__(self, rhs):
|
||||||
|
return (self.cpu != rhs.cpu or
|
||||||
|
self.numa_node != rhs.numa_node or
|
||||||
|
self.core != rhs.core or
|
||||||
|
self.thread != rhs.thread)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return "%s [%s] [%s] [%s]" % (self.cpu, self.numa_node,
|
||||||
|
self.core, self.thread)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "<CPU '%s'>" % str(self)
|
||||||
|
|
||||||
|
|
||||||
|
class NodeOperator(object):
|
||||||
|
'''Class to encapsulate CPU operations for System Inventory'''
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
|
||||||
|
self.num_cpus = 0
|
||||||
|
self.num_nodes = 0
|
||||||
|
self.float_cpuset = 0
|
||||||
|
self.total_memory_mb = 0
|
||||||
|
self.free_memory_mb = 0
|
||||||
|
self.total_memory_nodes_mb = []
|
||||||
|
self.free_memory_nodes_mb = []
|
||||||
|
self.topology = {}
|
||||||
|
|
||||||
|
# self._get_cpu_topology()
|
||||||
|
# self._get_total_memory_mb()
|
||||||
|
# self._get_total_memory_nodes_mb()
|
||||||
|
# self._get_free_memory_mb()
|
||||||
|
# self._get_free_memory_nodes_mb()
|
||||||
|
|
||||||
|
def _is_strict(self):
|
||||||
|
with open(os.devnull, "w") as fnull:
|
||||||
|
try:
|
||||||
|
output = subprocess.check_output(
|
||||||
|
["cat", "/proc/sys/vm/overcommit_memory"],
|
||||||
|
stderr=fnull)
|
||||||
|
if int(output) == 2:
|
||||||
|
return True
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
LOG.info("Failed to check for overcommit, error (%s)",
|
||||||
|
e.output)
|
||||||
|
return False
|
||||||
|
|
||||||
|
def convert_range_string_to_list(self, s):
|
||||||
|
olist = []
|
||||||
|
s = s.strip()
|
||||||
|
if s:
|
||||||
|
for part in s.split(','):
|
||||||
|
if '-' in part:
|
||||||
|
a, b = part.split('-')
|
||||||
|
a, b = int(a), int(b)
|
||||||
|
olist.extend(range(a, b + 1))
|
||||||
|
else:
|
||||||
|
a = int(part)
|
||||||
|
olist.append(a)
|
||||||
|
olist.sort()
|
||||||
|
return olist
|
||||||
|
|
||||||
|
def inodes_get_inumas_icpus(self):
|
||||||
|
'''Enumerate logical cpu topology based on parsing /proc/cpuinfo
|
||||||
|
as function of socket_id, core_id, and thread_id. This updates
|
||||||
|
topology.
|
||||||
|
|
||||||
|
:param self
|
||||||
|
:updates self.num_cpus- number of logical cpus
|
||||||
|
:updates self.num_nodes- number of sockets;maps to number of numa nodes
|
||||||
|
:updates self.topology[socket_id][core_id][thread_id] = cpu
|
||||||
|
:returns None
|
||||||
|
'''
|
||||||
|
self.num_cpus = 0
|
||||||
|
self.num_nodes = 0
|
||||||
|
self.topology = {}
|
||||||
|
|
||||||
|
thread_cnt = {}
|
||||||
|
cpu = socket_id = core_id = thread_id = -1
|
||||||
|
re_processor = re.compile(r'^[Pp]rocessor\s+:\s+(\d+)')
|
||||||
|
re_socket = re.compile(r'^physical id\s+:\s+(\d+)')
|
||||||
|
re_core = re.compile(r'^core id\s+:\s+(\d+)')
|
||||||
|
re_cpu_family = re.compile(r'^cpu family\s+:\s+(\d+)')
|
||||||
|
re_cpu_model = re.compile(r'^model name\s+:\s+(\w+)')
|
||||||
|
|
||||||
|
inumas = []
|
||||||
|
icpus = []
|
||||||
|
sockets = []
|
||||||
|
|
||||||
|
with open('/proc/cpuinfo', 'r') as infile:
|
||||||
|
icpu_attrs = {}
|
||||||
|
|
||||||
|
for line in infile:
|
||||||
|
match = re_processor.search(line)
|
||||||
|
if match:
|
||||||
|
cpu = int(match.group(1))
|
||||||
|
socket_id = -1
|
||||||
|
core_id = -1
|
||||||
|
thread_id = -1
|
||||||
|
self.num_cpus += 1
|
||||||
|
continue
|
||||||
|
|
||||||
|
match = re_cpu_family.search(line)
|
||||||
|
if match:
|
||||||
|
name_value = [s.strip() for s in line.split(':', 1)]
|
||||||
|
name, value = name_value
|
||||||
|
icpu_attrs.update({'cpu_family': value})
|
||||||
|
continue
|
||||||
|
|
||||||
|
match = re_cpu_model.search(line)
|
||||||
|
if match:
|
||||||
|
name_value = [s.strip() for s in line.split(':', 1)]
|
||||||
|
name, value = name_value
|
||||||
|
icpu_attrs.update({'cpu_model': value})
|
||||||
|
continue
|
||||||
|
|
||||||
|
match = re_socket.search(line)
|
||||||
|
if match:
|
||||||
|
socket_id = int(match.group(1))
|
||||||
|
if socket_id not in sockets:
|
||||||
|
sockets.append(socket_id)
|
||||||
|
attrs = {
|
||||||
|
'numa_node': socket_id,
|
||||||
|
'capabilities': {},
|
||||||
|
}
|
||||||
|
inumas.append(attrs)
|
||||||
|
continue
|
||||||
|
|
||||||
|
match = re_core.search(line)
|
||||||
|
if match:
|
||||||
|
core_id = int(match.group(1))
|
||||||
|
|
||||||
|
if socket_id not in thread_cnt:
|
||||||
|
thread_cnt[socket_id] = {}
|
||||||
|
if core_id not in thread_cnt[socket_id]:
|
||||||
|
thread_cnt[socket_id][core_id] = 0
|
||||||
|
else:
|
||||||
|
thread_cnt[socket_id][core_id] += 1
|
||||||
|
thread_id = thread_cnt[socket_id][core_id]
|
||||||
|
|
||||||
|
if socket_id not in self.topology:
|
||||||
|
self.topology[socket_id] = {}
|
||||||
|
if core_id not in self.topology[socket_id]:
|
||||||
|
self.topology[socket_id][core_id] = {}
|
||||||
|
|
||||||
|
self.topology[socket_id][core_id][thread_id] = cpu
|
||||||
|
attrs = {
|
||||||
|
'cpu': cpu,
|
||||||
|
'numa_node': socket_id,
|
||||||
|
'core': core_id,
|
||||||
|
'thread': thread_id,
|
||||||
|
'capabilities': {},
|
||||||
|
}
|
||||||
|
icpu_attrs.update(attrs)
|
||||||
|
icpus.append(icpu_attrs)
|
||||||
|
icpu_attrs = {}
|
||||||
|
continue
|
||||||
|
|
||||||
|
self.num_nodes = len(self.topology.keys())
|
||||||
|
|
||||||
|
# In the case topology not detected, hard-code structures
|
||||||
|
if self.num_nodes == 0:
|
||||||
|
n_sockets, n_cores, n_threads = (1, int(self.num_cpus), 1)
|
||||||
|
self.topology = {}
|
||||||
|
for socket_id in range(n_sockets):
|
||||||
|
self.topology[socket_id] = {}
|
||||||
|
if socket_id not in sockets:
|
||||||
|
sockets.append(socket_id)
|
||||||
|
attrs = {
|
||||||
|
'numa_node': socket_id,
|
||||||
|
'capabilities': {},
|
||||||
|
}
|
||||||
|
inumas.append(attrs)
|
||||||
|
for core_id in range(n_cores):
|
||||||
|
self.topology[socket_id][core_id] = {}
|
||||||
|
for thread_id in range(n_threads):
|
||||||
|
self.topology[socket_id][core_id][thread_id] = 0
|
||||||
|
attrs = {
|
||||||
|
'cpu': cpu,
|
||||||
|
'numa_node': socket_id,
|
||||||
|
'core': core_id,
|
||||||
|
'thread': thread_id,
|
||||||
|
'capabilities': {},
|
||||||
|
}
|
||||||
|
icpus.append(attrs)
|
||||||
|
|
||||||
|
# Define Thread-Socket-Core order for logical cpu enumeration
|
||||||
|
cpu = 0
|
||||||
|
for thread_id in range(n_threads):
|
||||||
|
for core_id in range(n_cores):
|
||||||
|
for socket_id in range(n_sockets):
|
||||||
|
if socket_id not in sockets:
|
||||||
|
sockets.append(socket_id)
|
||||||
|
attrs = {
|
||||||
|
'numa_node': socket_id,
|
||||||
|
'capabilities': {},
|
||||||
|
}
|
||||||
|
inumas.append(attrs)
|
||||||
|
self.topology[socket_id][core_id][thread_id] = cpu
|
||||||
|
attrs = {
|
||||||
|
'cpu': cpu,
|
||||||
|
'numa_node': socket_id,
|
||||||
|
'core': core_id,
|
||||||
|
'thread': thread_id,
|
||||||
|
'capabilities': {},
|
||||||
|
}
|
||||||
|
icpus.append(attrs)
|
||||||
|
cpu += 1
|
||||||
|
self.num_nodes = len(self.topology.keys())
|
||||||
|
|
||||||
|
LOG.debug("inumas= %s, cpus = %s" % (inumas, icpus))
|
||||||
|
|
||||||
|
return inumas, icpus
|
||||||
|
|
||||||
|
def _get_immediate_subdirs(self, dir):
|
||||||
|
return [name for name in listdir(dir)
|
||||||
|
if os.path.isdir(join(dir, name))]
|
||||||
|
|
||||||
|
def _inode_get_memory_hugepages(self):
|
||||||
|
"""Collect hugepage info, including vswitch, and vm.
|
||||||
|
Collect platform reserved if config.
|
||||||
|
:param self
|
||||||
|
:returns list of memory nodes and attributes
|
||||||
|
"""
|
||||||
|
|
||||||
|
imemory = []
|
||||||
|
|
||||||
|
initial_compute_config_completed = \
|
||||||
|
os.path.exists(tsc.INITIAL_COMPUTE_CONFIG_COMPLETE)
|
||||||
|
|
||||||
|
# check if it is initial report before the huge pages are allocated
|
||||||
|
initial_report = not initial_compute_config_completed
|
||||||
|
|
||||||
|
# do not send report if the initial compute config is completed and
|
||||||
|
# compute config has not finished, i.e.during subsequent
|
||||||
|
# reboot before the manifest allocates the huge pages
|
||||||
|
compute_config_completed = \
|
||||||
|
os.path.exists(tsc.VOLATILE_COMPUTE_CONFIG_COMPLETE)
|
||||||
|
if (initial_compute_config_completed and
|
||||||
|
not compute_config_completed):
|
||||||
|
return imemory
|
||||||
|
|
||||||
|
for node in range(self.num_nodes):
|
||||||
|
attr = {}
|
||||||
|
total_hp_mb = 0 # Total memory (MB) currently configured in HPs
|
||||||
|
free_hp_mb = 0
|
||||||
|
|
||||||
|
# Check vswitch and libvirt memory
|
||||||
|
# Loop through configured hugepage sizes of this node and record
|
||||||
|
# total number and number free
|
||||||
|
hugepages = "/sys/devices/system/node/node%d/hugepages" % node
|
||||||
|
|
||||||
|
try:
|
||||||
|
subdirs = self._get_immediate_subdirs(hugepages)
|
||||||
|
|
||||||
|
for subdir in subdirs:
|
||||||
|
hp_attr = {}
|
||||||
|
sizesplit = subdir.split('-')
|
||||||
|
if sizesplit[1].startswith("1048576kB"):
|
||||||
|
size = SIZE_1G_MB
|
||||||
|
else:
|
||||||
|
size = SIZE_2M_MB
|
||||||
|
|
||||||
|
nr_hugepages = 0
|
||||||
|
free_hugepages = 0
|
||||||
|
|
||||||
|
mydir = hugepages + '/' + subdir
|
||||||
|
files = [f for f in listdir(mydir)
|
||||||
|
if isfile(join(mydir, f))]
|
||||||
|
|
||||||
|
if files:
|
||||||
|
for file in files:
|
||||||
|
with open(mydir + '/' + file, 'r') as f:
|
||||||
|
if file.startswith("nr_hugepages"):
|
||||||
|
nr_hugepages = int(f.readline())
|
||||||
|
if file.startswith("free_hugepages"):
|
||||||
|
free_hugepages = int(f.readline())
|
||||||
|
|
||||||
|
total_hp_mb = total_hp_mb + int(nr_hugepages * size)
|
||||||
|
free_hp_mb = free_hp_mb + int(free_hugepages * size)
|
||||||
|
|
||||||
|
# Libvirt hugepages can be 1G and 2M
|
||||||
|
if size == SIZE_1G_MB:
|
||||||
|
vswitch_hugepages_nr = VSWITCH_MEMORY_MB / size
|
||||||
|
hp_attr = {
|
||||||
|
'vswitch_hugepages_size_mib': size,
|
||||||
|
'vswitch_hugepages_nr': vswitch_hugepages_nr,
|
||||||
|
'vswitch_hugepages_avail': 0,
|
||||||
|
'vm_hugepages_nr_1G':
|
||||||
|
(nr_hugepages - vswitch_hugepages_nr),
|
||||||
|
'vm_hugepages_avail_1G': free_hugepages,
|
||||||
|
'vm_hugepages_use_1G': 'True'
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
if len(subdirs) == 1:
|
||||||
|
# No 1G hugepage support.
|
||||||
|
vswitch_hugepages_nr = VSWITCH_MEMORY_MB / size
|
||||||
|
hp_attr = {
|
||||||
|
'vswitch_hugepages_size_mib': size,
|
||||||
|
'vswitch_hugepages_nr': vswitch_hugepages_nr,
|
||||||
|
'vswitch_hugepages_avail': 0,
|
||||||
|
}
|
||||||
|
hp_attr.update({'vm_hugepages_use_1G': 'False'})
|
||||||
|
else:
|
||||||
|
# vswitch will use 1G hugpages
|
||||||
|
vswitch_hugepages_nr = 0
|
||||||
|
|
||||||
|
hp_attr.update({
|
||||||
|
'vm_hugepages_avail_2M': free_hugepages,
|
||||||
|
'vm_hugepages_nr_2M':
|
||||||
|
(nr_hugepages - vswitch_hugepages_nr)
|
||||||
|
})
|
||||||
|
|
||||||
|
attr.update(hp_attr)
|
||||||
|
|
||||||
|
except IOError:
|
||||||
|
# silently ignore IO errors (eg. file missing)
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Get the free and total memory from meminfo for this node
|
||||||
|
re_node_memtotal = re.compile(r'^Node\s+\d+\s+\MemTotal:\s+(\d+)')
|
||||||
|
re_node_memfree = re.compile(r'^Node\s+\d+\s+\MemFree:\s+(\d+)')
|
||||||
|
re_node_filepages = \
|
||||||
|
re.compile(r'^Node\s+\d+\s+\FilePages:\s+(\d+)')
|
||||||
|
re_node_sreclaim = \
|
||||||
|
re.compile(r'^Node\s+\d+\s+\SReclaimable:\s+(\d+)')
|
||||||
|
re_node_commitlimit = \
|
||||||
|
re.compile(r'^Node\s+\d+\s+\CommitLimit:\s+(\d+)')
|
||||||
|
re_node_committed_as = \
|
||||||
|
re.compile(r'^Node\s+\d+\s+\'Committed_AS:\s+(\d+)')
|
||||||
|
|
||||||
|
free_kb = 0 # Free Memory (KB) available
|
||||||
|
total_kb = 0 # Total Memory (KB)
|
||||||
|
limit = 0 # only used in strict accounting
|
||||||
|
committed = 0 # only used in strict accounting
|
||||||
|
|
||||||
|
meminfo = "/sys/devices/system/node/node%d/meminfo" % node
|
||||||
|
try:
|
||||||
|
with open(meminfo, 'r') as infile:
|
||||||
|
for line in infile:
|
||||||
|
match = re_node_memtotal.search(line)
|
||||||
|
if match:
|
||||||
|
total_kb += int(match.group(1))
|
||||||
|
continue
|
||||||
|
match = re_node_memfree.search(line)
|
||||||
|
if match:
|
||||||
|
free_kb += int(match.group(1))
|
||||||
|
continue
|
||||||
|
match = re_node_filepages.search(line)
|
||||||
|
if match:
|
||||||
|
free_kb += int(match.group(1))
|
||||||
|
continue
|
||||||
|
match = re_node_sreclaim.search(line)
|
||||||
|
if match:
|
||||||
|
free_kb += int(match.group(1))
|
||||||
|
continue
|
||||||
|
match = re_node_commitlimit.search(line)
|
||||||
|
if match:
|
||||||
|
limit = int(match.group(1))
|
||||||
|
continue
|
||||||
|
match = re_node_committed_as.search(line)
|
||||||
|
if match:
|
||||||
|
committed = int(match.group(1))
|
||||||
|
continue
|
||||||
|
|
||||||
|
if self._is_strict():
|
||||||
|
free_kb = limit - committed
|
||||||
|
|
||||||
|
except IOError:
|
||||||
|
# silently ignore IO errors (eg. file missing)
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Calculate PSS
|
||||||
|
pss_mb = 0
|
||||||
|
if node == 0:
|
||||||
|
cmd = 'cat /proc/*/smaps 2>/dev/null | awk \'/^Pss:/ ' \
|
||||||
|
'{a += $2;} END {printf "%d\\n", a/1024.0;}\''
|
||||||
|
try:
|
||||||
|
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
|
||||||
|
shell=True)
|
||||||
|
result = proc.stdout.read().strip()
|
||||||
|
pss_mb = int(result)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
LOG.error("Cannot calculate PSS (%s) (%d)", cmd,
|
||||||
|
e.returncode)
|
||||||
|
except OSError as e:
|
||||||
|
LOG.error("Failed to execute (%s) OS error (%d)", cmd,
|
||||||
|
e.errno)
|
||||||
|
|
||||||
|
# need to multiply total_mb by 1024 to match compute_huge
|
||||||
|
node_total_kb = total_hp_mb * SIZE_KB + free_kb + pss_mb * SIZE_KB
|
||||||
|
|
||||||
|
# Read base memory from compute_reserved.conf
|
||||||
|
base_mem_mb = 0
|
||||||
|
with open('/etc/nova/compute_reserved.conf', 'r') as infile:
|
||||||
|
for line in infile:
|
||||||
|
if "COMPUTE_BASE_RESERVED" in line:
|
||||||
|
val = line.split("=")
|
||||||
|
base_reserves = val[1].strip('\n')[1:-1]
|
||||||
|
for reserve in base_reserves.split():
|
||||||
|
reserve = reserve.split(":")
|
||||||
|
if reserve[0].strip('"') == "node%d" % node:
|
||||||
|
base_mem_mb = int(reserve[1].strip('MB'))
|
||||||
|
|
||||||
|
# On small systems, clip memory overhead to more reasonable minimal
|
||||||
|
# settings
|
||||||
|
if (total_kb / SIZE_KB - base_mem_mb) < 1000:
|
||||||
|
if node == 0:
|
||||||
|
base_mem_mb = COMPUTE_MIN_MB
|
||||||
|
if tsc.nodetype == 'controller':
|
||||||
|
base_mem_mb += CONTROLLER_MIN_MB
|
||||||
|
else:
|
||||||
|
base_mem_mb = COMPUTE_MIN_NON_0_MB
|
||||||
|
|
||||||
|
eng_kb = node_total_kb - base_mem_mb * SIZE_KB
|
||||||
|
|
||||||
|
vswitch_mem_kb = (attr.get('vswitch_hugepages_size_mib', 0) *
|
||||||
|
attr.get('vswitch_hugepages_nr', 0) * SIZE_KB)
|
||||||
|
|
||||||
|
vm_kb = (eng_kb - vswitch_mem_kb)
|
||||||
|
|
||||||
|
max_vm_pages_2mb = vm_kb / SIZE_2M_KB
|
||||||
|
max_vm_pages_1gb = vm_kb / SIZE_1G_KB
|
||||||
|
|
||||||
|
attr.update({
|
||||||
|
'vm_hugepages_possible_2M': max_vm_pages_2mb,
|
||||||
|
'vm_hugepages_possible_1G': max_vm_pages_1gb,
|
||||||
|
})
|
||||||
|
|
||||||
|
# calculate 90% 2M pages if it is initial report and the huge
|
||||||
|
# pages have not been allocated
|
||||||
|
if initial_report:
|
||||||
|
max_vm_pages_2mb = max_vm_pages_2mb * 0.9
|
||||||
|
total_hp_mb += int(max_vm_pages_2mb * (SIZE_2M_KB / SIZE_KB))
|
||||||
|
free_hp_mb = total_hp_mb
|
||||||
|
attr.update({
|
||||||
|
'vm_hugepages_nr_2M': max_vm_pages_2mb,
|
||||||
|
'vm_hugepages_avail_2M': max_vm_pages_2mb,
|
||||||
|
'vm_hugepages_nr_1G': 0
|
||||||
|
})
|
||||||
|
|
||||||
|
attr.update({
|
||||||
|
'numa_node': node,
|
||||||
|
'memtotal_mib': total_hp_mb,
|
||||||
|
'memavail_mib': free_hp_mb,
|
||||||
|
'hugepages_configured': 'True',
|
||||||
|
'node_memtotal_mib': node_total_kb / 1024,
|
||||||
|
})
|
||||||
|
|
||||||
|
imemory.append(attr)
|
||||||
|
|
||||||
|
return imemory
|
||||||
|
|
||||||
|
def _inode_get_memory_nonhugepages(self):
|
||||||
|
'''Collect nonhugepage info, including platform reserved if config.
|
||||||
|
:param self
|
||||||
|
:returns list of memory nodes and attributes
|
||||||
|
'''
|
||||||
|
|
||||||
|
imemory = []
|
||||||
|
self.total_memory_mb = 0
|
||||||
|
|
||||||
|
re_node_memtotal = re.compile(r'^Node\s+\d+\s+\MemTotal:\s+(\d+)')
|
||||||
|
re_node_memfree = re.compile(r'^Node\s+\d+\s+\MemFree:\s+(\d+)')
|
||||||
|
re_node_filepages = re.compile(r'^Node\s+\d+\s+\FilePages:\s+(\d+)')
|
||||||
|
re_node_sreclaim = re.compile(r'^Node\s+\d+\s+\SReclaimable:\s+(\d+)')
|
||||||
|
|
||||||
|
for node in range(self.num_nodes):
|
||||||
|
attr = {}
|
||||||
|
total_mb = 0
|
||||||
|
free_mb = 0
|
||||||
|
|
||||||
|
meminfo = "/sys/devices/system/node/node%d/meminfo" % node
|
||||||
|
try:
|
||||||
|
with open(meminfo, 'r') as infile:
|
||||||
|
for line in infile:
|
||||||
|
match = re_node_memtotal.search(line)
|
||||||
|
if match:
|
||||||
|
total_mb += int(match.group(1))
|
||||||
|
continue
|
||||||
|
|
||||||
|
match = re_node_memfree.search(line)
|
||||||
|
if match:
|
||||||
|
free_mb += int(match.group(1))
|
||||||
|
continue
|
||||||
|
match = re_node_filepages.search(line)
|
||||||
|
if match:
|
||||||
|
free_mb += int(match.group(1))
|
||||||
|
continue
|
||||||
|
match = re_node_sreclaim.search(line)
|
||||||
|
if match:
|
||||||
|
free_mb += int(match.group(1))
|
||||||
|
continue
|
||||||
|
|
||||||
|
except IOError:
|
||||||
|
# silently ignore IO errors (eg. file missing)
|
||||||
|
pass
|
||||||
|
|
||||||
|
total_mb /= 1024
|
||||||
|
free_mb /= 1024
|
||||||
|
self.total_memory_nodes_mb.append(total_mb)
|
||||||
|
attr = {
|
||||||
|
'numa_node': node,
|
||||||
|
'memtotal_mib': total_mb,
|
||||||
|
'memavail_mib': free_mb,
|
||||||
|
'hugepages_configured': 'False',
|
||||||
|
}
|
||||||
|
|
||||||
|
imemory.append(attr)
|
||||||
|
|
||||||
|
return imemory
|
||||||
|
|
||||||
|
def inodes_get_imemory(self):
|
||||||
|
'''Enumerate logical memory topology based on:
|
||||||
|
if CONF.compute_hugepages:
|
||||||
|
self._inode_get_memory_hugepages()
|
||||||
|
else:
|
||||||
|
self._inode_get_memory_nonhugepages()
|
||||||
|
|
||||||
|
:param self
|
||||||
|
:returns list of memory nodes and attributes
|
||||||
|
'''
|
||||||
|
imemory = []
|
||||||
|
|
||||||
|
# if CONF.compute_hugepages:
|
||||||
|
if os.path.isfile("/etc/nova/compute_reserved.conf"):
|
||||||
|
imemory = self._inode_get_memory_hugepages()
|
||||||
|
else:
|
||||||
|
imemory = self._inode_get_memory_nonhugepages()
|
||||||
|
|
||||||
|
LOG.debug("imemory= %s" % imemory)
|
||||||
|
|
||||||
|
return imemory
|
621
inventory/inventory/inventory/agent/pci.py
Normal file
621
inventory/inventory/inventory/agent/pci.py
Normal file
@ -0,0 +1,621 @@
|
|||||||
|
#
|
||||||
|
# Copyright (c) 2013-2016 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
|
||||||
|
""" inventory pci Utilities and helper functions."""
|
||||||
|
|
||||||
|
import glob
|
||||||
|
import os
|
||||||
|
import shlex
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
from inventory.common import k_pci
|
||||||
|
from inventory.common import utils
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
# Look for PCI class 0x0200 and 0x0280 so that we get generic ethernet
|
||||||
|
# controllers and those that may report as "other" network controllers.
|
||||||
|
ETHERNET_PCI_CLASSES = ['ethernet controller', 'network controller']
|
||||||
|
|
||||||
|
# Look for other devices we may want to inventory.
|
||||||
|
KNOWN_PCI_DEVICES = [
|
||||||
|
{"vendor_id": k_pci.NOVA_PCI_ALIAS_QAT_PF_VENDOR,
|
||||||
|
"device_id": k_pci.NOVA_PCI_ALIAS_QAT_DH895XCC_PF_DEVICE,
|
||||||
|
"class_id": k_pci.NOVA_PCI_ALIAS_QAT_CLASS},
|
||||||
|
{"vendor_id": k_pci.NOVA_PCI_ALIAS_QAT_PF_VENDOR,
|
||||||
|
"device_id": k_pci.NOVA_PCI_ALIAS_QAT_C62X_PF_DEVICE,
|
||||||
|
"class_id": k_pci.NOVA_PCI_ALIAS_QAT_CLASS},
|
||||||
|
{"class_id": k_pci.NOVA_PCI_ALIAS_GPU_CLASS}]
|
||||||
|
|
||||||
|
# PCI-SIG 0x06 bridge devices to not inventory.
|
||||||
|
IGNORE_BRIDGE_PCI_CLASSES = ['bridge', 'isa bridge', 'host bridge']
|
||||||
|
|
||||||
|
# PCI-SIG 0x08 generic peripheral devices to not inventory.
|
||||||
|
IGNORE_PERIPHERAL_PCI_CLASSES = ['system peripheral', 'pic', 'dma controller',
|
||||||
|
'iommu', 'rtc']
|
||||||
|
|
||||||
|
# PCI-SIG 0x11 signal processing devices to not inventory.
|
||||||
|
IGNORE_SIGNAL_PROCESSING_PCI_CLASSES = ['performance counters']
|
||||||
|
|
||||||
|
# Blacklist of devices we do not want to inventory, because they are dealt
|
||||||
|
# with separately (ie. Ethernet devices), or do not make sense to expose
|
||||||
|
# to a guest.
|
||||||
|
IGNORE_PCI_CLASSES = ETHERNET_PCI_CLASSES + IGNORE_BRIDGE_PCI_CLASSES + \
|
||||||
|
IGNORE_PERIPHERAL_PCI_CLASSES + IGNORE_SIGNAL_PROCESSING_PCI_CLASSES
|
||||||
|
|
||||||
|
pciaddr = 0
|
||||||
|
pclass = 1
|
||||||
|
pvendor = 2
|
||||||
|
pdevice = 3
|
||||||
|
prevision = 4
|
||||||
|
psvendor = 5
|
||||||
|
psdevice = 6
|
||||||
|
|
||||||
|
VALID_PORT_SPEED = ['10', '100', '1000', '10000', '40000', '100000']
|
||||||
|
|
||||||
|
# Network device flags (from include/uapi/linux/if.h)
|
||||||
|
IFF_UP = 1 << 0
|
||||||
|
IFF_BROADCAST = 1 << 1
|
||||||
|
IFF_DEBUG = 1 << 2
|
||||||
|
IFF_LOOPBACK = 1 << 3
|
||||||
|
IFF_POINTOPOINT = 1 << 4
|
||||||
|
IFF_NOTRAILERS = 1 << 5
|
||||||
|
IFF_RUNNING = 1 << 6
|
||||||
|
IFF_NOARP = 1 << 7
|
||||||
|
IFF_PROMISC = 1 << 8
|
||||||
|
IFF_ALLMULTI = 1 << 9
|
||||||
|
IFF_MASTER = 1 << 10
|
||||||
|
IFF_SLAVE = 1 << 11
|
||||||
|
IFF_MULTICAST = 1 << 12
|
||||||
|
IFF_PORTSEL = 1 << 13
|
||||||
|
IFF_AUTOMEDIA = 1 << 14
|
||||||
|
IFF_DYNAMIC = 1 << 15
|
||||||
|
|
||||||
|
|
||||||
|
class PCI(object):
|
||||||
|
'''Class to encapsulate PCI data for System Inventory'''
|
||||||
|
|
||||||
|
def __init__(self, pciaddr, pclass, pvendor, pdevice, prevision,
|
||||||
|
psvendor, psdevice):
|
||||||
|
'''Construct a pci object with the given values.'''
|
||||||
|
|
||||||
|
self.pciaddr = pciaddr
|
||||||
|
self.pclass = pclass
|
||||||
|
self.pvendor = pvendor
|
||||||
|
self.pdevice = pdevice
|
||||||
|
self.prevision = prevision
|
||||||
|
self.psvendor = psvendor
|
||||||
|
self.psdevice = psdevice
|
||||||
|
|
||||||
|
def __eq__(self, rhs):
|
||||||
|
return (self.pvendor == rhs.pvendor and
|
||||||
|
self.pdevice == rhs.pdevice)
|
||||||
|
|
||||||
|
def __ne__(self, rhs):
|
||||||
|
return (self.pvendor != rhs.pvendor or
|
||||||
|
self.pdevice != rhs.pdevice)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return "%s [%s] [%s]" % (self.pciaddr, self.pvendor, self.pdevice)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "<PCI '%s'>" % str(self)
|
||||||
|
|
||||||
|
|
||||||
|
class Port(object):
|
||||||
|
'''Class to encapsulate PCI data for System Inventory'''
|
||||||
|
|
||||||
|
def __init__(self, ipci, **kwargs):
|
||||||
|
'''Construct an port object with the given values.'''
|
||||||
|
self.ipci = ipci
|
||||||
|
self.name = kwargs.get('name')
|
||||||
|
self.mac = kwargs.get('mac')
|
||||||
|
self.mtu = kwargs.get('mtu')
|
||||||
|
self.speed = kwargs.get('speed')
|
||||||
|
self.link_mode = kwargs.get('link_mode')
|
||||||
|
self.numa_node = kwargs.get('numa_node')
|
||||||
|
self.dev_id = kwargs.get('dev_id')
|
||||||
|
self.sriov_totalvfs = kwargs.get('sriov_totalvfs')
|
||||||
|
self.sriov_numvfs = kwargs.get('sriov_numvfs')
|
||||||
|
self.sriov_vfs_pci_address = kwargs.get('sriov_vfs_pci_address')
|
||||||
|
self.driver = kwargs.get('driver')
|
||||||
|
self.dpdksupport = kwargs.get('dpdksupport')
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return "%s %s: [%s] [%s] [%s], [%s], [%s], [%s], [%s]" % (
|
||||||
|
self.ipci, self.name, self.mac, self.mtu, self.speed,
|
||||||
|
self.link_mode, self.numa_node, self.dev_id, self.dpdksupport)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "<Port '%s'>" % str(self)
|
||||||
|
|
||||||
|
|
||||||
|
class PCIDevice(object):
|
||||||
|
'''Class to encapsulate extended PCI data for System Inventory'''
|
||||||
|
|
||||||
|
def __init__(self, pci, **kwargs):
|
||||||
|
'''Construct a PciDevice object with the given values.'''
|
||||||
|
self.pci = pci
|
||||||
|
self.name = kwargs.get('name')
|
||||||
|
self.pclass_id = kwargs.get('pclass_id')
|
||||||
|
self.pvendor_id = kwargs.get('pvendor_id')
|
||||||
|
self.pdevice_id = kwargs.get('pdevice_id')
|
||||||
|
self.numa_node = kwargs.get('numa_node')
|
||||||
|
self.sriov_totalvfs = kwargs.get('sriov_totalvfs')
|
||||||
|
self.sriov_numvfs = kwargs.get('sriov_numvfs')
|
||||||
|
self.sriov_vfs_pci_address = kwargs.get('sriov_vfs_pci_address')
|
||||||
|
self.driver = kwargs.get('driver')
|
||||||
|
self.enabled = kwargs.get('enabled')
|
||||||
|
self.extra_info = kwargs.get('extra_info')
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return "%s %s: [%s]" % (
|
||||||
|
self.pci, self.numa_node, self.driver)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "<PCIDevice '%s'>" % str(self)
|
||||||
|
|
||||||
|
|
||||||
|
class PCIOperator(object):
|
||||||
|
'''Class to encapsulate PCI operations for System Inventory'''
|
||||||
|
|
||||||
|
def format_lspci_output(self, device):
|
||||||
|
# hack for now
|
||||||
|
if device[prevision].strip() == device[pvendor].strip():
|
||||||
|
# no revision info
|
||||||
|
device.append(device[psvendor])
|
||||||
|
device[psvendor] = device[prevision]
|
||||||
|
device[prevision] = "0"
|
||||||
|
elif len(device) <= 6: # one less entry, no revision
|
||||||
|
LOG.debug("update psdevice length=%s" % len(device))
|
||||||
|
device.append(device[psvendor])
|
||||||
|
return device
|
||||||
|
|
||||||
|
def get_pci_numa_node(self, pciaddr):
|
||||||
|
fnuma_node = '/sys/bus/pci/devices/' + pciaddr + '/numa_node'
|
||||||
|
try:
|
||||||
|
with open(fnuma_node, 'r') as f:
|
||||||
|
numa_node = f.readline().strip()
|
||||||
|
LOG.debug("ATTR numa_node: %s " % numa_node)
|
||||||
|
except Exception:
|
||||||
|
LOG.debug("ATTR numa_node unknown for: %s " % pciaddr)
|
||||||
|
numa_node = None
|
||||||
|
return numa_node
|
||||||
|
|
||||||
|
def get_pci_sriov_totalvfs(self, pciaddr):
|
||||||
|
fsriov_totalvfs = '/sys/bus/pci/devices/' + pciaddr + '/sriov_totalvfs'
|
||||||
|
try:
|
||||||
|
with open(fsriov_totalvfs, 'r') as f:
|
||||||
|
sriov_totalvfs = f.readline()
|
||||||
|
LOG.debug("ATTR sriov_totalvfs: %s " % sriov_totalvfs)
|
||||||
|
f.close()
|
||||||
|
except Exception:
|
||||||
|
LOG.debug("ATTR sriov_totalvfs unknown for: %s " % pciaddr)
|
||||||
|
sriov_totalvfs = None
|
||||||
|
pass
|
||||||
|
return sriov_totalvfs
|
||||||
|
|
||||||
|
def get_pci_sriov_numvfs(self, pciaddr):
|
||||||
|
fsriov_numvfs = '/sys/bus/pci/devices/' + pciaddr + '/sriov_numvfs'
|
||||||
|
try:
|
||||||
|
with open(fsriov_numvfs, 'r') as f:
|
||||||
|
sriov_numvfs = f.readline()
|
||||||
|
LOG.debug("ATTR sriov_numvfs: %s " % sriov_numvfs)
|
||||||
|
f.close()
|
||||||
|
except Exception:
|
||||||
|
LOG.debug("ATTR sriov_numvfs unknown for: %s " % pciaddr)
|
||||||
|
sriov_numvfs = 0
|
||||||
|
pass
|
||||||
|
LOG.debug("sriov_numvfs: %s" % sriov_numvfs)
|
||||||
|
return sriov_numvfs
|
||||||
|
|
||||||
|
def get_pci_sriov_vfs_pci_address(self, pciaddr, sriov_numvfs):
|
||||||
|
dirpcidev = '/sys/bus/pci/devices/' + pciaddr
|
||||||
|
sriov_vfs_pci_address = []
|
||||||
|
i = 0
|
||||||
|
while i < int(sriov_numvfs):
|
||||||
|
lvf = dirpcidev + '/virtfn' + str(i)
|
||||||
|
try:
|
||||||
|
sriov_vfs_pci_address.append(
|
||||||
|
os.path.basename(os.readlink(lvf)))
|
||||||
|
except Exception:
|
||||||
|
LOG.warning("virtfn link %s non-existent (sriov_numvfs=%s)"
|
||||||
|
% (lvf, sriov_numvfs))
|
||||||
|
pass
|
||||||
|
i += 1
|
||||||
|
LOG.debug("sriov_vfs_pci_address: %s" % sriov_vfs_pci_address)
|
||||||
|
return sriov_vfs_pci_address
|
||||||
|
|
||||||
|
def get_pci_driver_name(self, pciaddr):
|
||||||
|
ddriver = '/sys/bus/pci/devices/' + pciaddr + '/driver/module/drivers'
|
||||||
|
try:
|
||||||
|
drivers = [
|
||||||
|
os.path.basename(os.readlink(ddriver + '/' + d))
|
||||||
|
for d in os.listdir(ddriver)]
|
||||||
|
driver = str(','.join(str(d) for d in drivers))
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
LOG.debug("ATTR driver unknown for: %s " % pciaddr)
|
||||||
|
driver = None
|
||||||
|
pass
|
||||||
|
LOG.debug("driver: %s" % driver)
|
||||||
|
return driver
|
||||||
|
|
||||||
|
def pci_devices_get(self):
|
||||||
|
|
||||||
|
p = subprocess.Popen(["lspci", "-Dm"], stdout=subprocess.PIPE)
|
||||||
|
|
||||||
|
pci_devices = []
|
||||||
|
for line in p.stdout:
|
||||||
|
pci_device = shlex.split(line.strip())
|
||||||
|
pci_device = self.format_lspci_output(pci_device)
|
||||||
|
|
||||||
|
if any(x in pci_device[pclass].lower() for x in
|
||||||
|
IGNORE_PCI_CLASSES):
|
||||||
|
continue
|
||||||
|
|
||||||
|
dirpcidev = '/sys/bus/pci/devices/'
|
||||||
|
physfn = dirpcidev + pci_device[pciaddr] + '/physfn'
|
||||||
|
if not os.path.isdir(physfn):
|
||||||
|
# Do not report VFs
|
||||||
|
pci_devices.append(PCI(pci_device[pciaddr],
|
||||||
|
pci_device[pclass],
|
||||||
|
pci_device[pvendor],
|
||||||
|
pci_device[pdevice],
|
||||||
|
pci_device[prevision],
|
||||||
|
pci_device[psvendor],
|
||||||
|
pci_device[psdevice]))
|
||||||
|
|
||||||
|
p.wait()
|
||||||
|
|
||||||
|
return pci_devices
|
||||||
|
|
||||||
|
def inics_get(self):
|
||||||
|
|
||||||
|
p = subprocess.Popen(["lspci", "-Dm"], stdout=subprocess.PIPE)
|
||||||
|
|
||||||
|
pci_inics = []
|
||||||
|
for line in p.stdout:
|
||||||
|
inic = shlex.split(line.strip())
|
||||||
|
if any(x in inic[pclass].lower() for x in ETHERNET_PCI_CLASSES):
|
||||||
|
# hack for now
|
||||||
|
if inic[prevision].strip() == inic[pvendor].strip():
|
||||||
|
# no revision info
|
||||||
|
inic.append(inic[psvendor])
|
||||||
|
inic[psvendor] = inic[prevision]
|
||||||
|
inic[prevision] = "0"
|
||||||
|
elif len(inic) <= 6: # one less entry, no revision
|
||||||
|
LOG.debug("update psdevice length=%s" % len(inic))
|
||||||
|
inic.append(inic[psvendor])
|
||||||
|
|
||||||
|
dirpcidev = '/sys/bus/pci/devices/'
|
||||||
|
physfn = dirpcidev + inic[pciaddr] + '/physfn'
|
||||||
|
if os.path.isdir(physfn):
|
||||||
|
# Do not report VFs
|
||||||
|
continue
|
||||||
|
pci_inics.append(PCI(inic[pciaddr], inic[pclass],
|
||||||
|
inic[pvendor], inic[pdevice],
|
||||||
|
inic[prevision], inic[psvendor],
|
||||||
|
inic[psdevice]))
|
||||||
|
|
||||||
|
p.wait()
|
||||||
|
|
||||||
|
return pci_inics
|
||||||
|
|
||||||
|
def pci_get_enabled_attr(self, class_id, vendor_id, product_id):
|
||||||
|
for known_device in KNOWN_PCI_DEVICES:
|
||||||
|
if (class_id == known_device.get("class_id", None) or
|
||||||
|
(vendor_id == known_device.get("vendor_id", None) and
|
||||||
|
product_id == known_device.get("device_id", None))):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def pci_get_device_attrs(self, pciaddr):
|
||||||
|
"""For this pciaddr, build a list of device attributes """
|
||||||
|
pci_attrs_array = []
|
||||||
|
|
||||||
|
dirpcidev = '/sys/bus/pci/devices/'
|
||||||
|
pciaddrs = os.listdir(dirpcidev)
|
||||||
|
|
||||||
|
for a in pciaddrs:
|
||||||
|
if ((a == pciaddr) or (a == ("0000:" + pciaddr))):
|
||||||
|
LOG.debug("Found device pci bus: %s " % a)
|
||||||
|
|
||||||
|
dirpcideva = dirpcidev + a
|
||||||
|
|
||||||
|
numa_node = self.get_pci_numa_node(a)
|
||||||
|
sriov_totalvfs = self.get_pci_sriov_totalvfs(a)
|
||||||
|
sriov_numvfs = self.get_pci_sriov_numvfs(a)
|
||||||
|
sriov_vfs_pci_address = \
|
||||||
|
self.get_pci_sriov_vfs_pci_address(a, sriov_numvfs)
|
||||||
|
driver = self.get_pci_driver_name(a)
|
||||||
|
|
||||||
|
fclass = dirpcideva + '/class'
|
||||||
|
fvendor = dirpcideva + '/vendor'
|
||||||
|
fdevice = dirpcideva + '/device'
|
||||||
|
try:
|
||||||
|
with open(fvendor, 'r') as f:
|
||||||
|
pvendor_id = f.readline().strip('0x').strip()
|
||||||
|
except Exception:
|
||||||
|
LOG.debug("ATTR vendor unknown for: %s " % a)
|
||||||
|
pvendor_id = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(fdevice, 'r') as f:
|
||||||
|
pdevice_id = f.readline().replace('0x', '').strip()
|
||||||
|
except Exception:
|
||||||
|
LOG.debug("ATTR device unknown for: %s " % a)
|
||||||
|
pdevice_id = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(fclass, 'r') as f:
|
||||||
|
pclass_id = f.readline().replace('0x', '').strip()
|
||||||
|
except Exception:
|
||||||
|
LOG.debug("ATTR class unknown for: %s " % a)
|
||||||
|
pclass_id = None
|
||||||
|
|
||||||
|
name = "pci_" + a.replace(':', '_').replace('.', '_')
|
||||||
|
|
||||||
|
attrs = {
|
||||||
|
"name": name,
|
||||||
|
"pci_address": a,
|
||||||
|
"pclass_id": pclass_id,
|
||||||
|
"pvendor_id": pvendor_id,
|
||||||
|
"pdevice_id": pdevice_id,
|
||||||
|
"numa_node": numa_node,
|
||||||
|
"sriov_totalvfs": sriov_totalvfs,
|
||||||
|
"sriov_numvfs": sriov_numvfs,
|
||||||
|
"sriov_vfs_pci_address":
|
||||||
|
','.join(str(x) for x in sriov_vfs_pci_address),
|
||||||
|
"driver": driver,
|
||||||
|
"enabled": self.pci_get_enabled_attr(
|
||||||
|
pclass_id, pvendor_id, pdevice_id),
|
||||||
|
}
|
||||||
|
|
||||||
|
pci_attrs_array.append(attrs)
|
||||||
|
|
||||||
|
return pci_attrs_array
|
||||||
|
|
||||||
|
def get_pci_net_directory(self, pciaddr):
|
||||||
|
device_directory = '/sys/bus/pci/devices/' + pciaddr
|
||||||
|
# Look for the standard device 'net' directory
|
||||||
|
net_directory = device_directory + '/net/'
|
||||||
|
if os.path.exists(net_directory):
|
||||||
|
return net_directory
|
||||||
|
# Otherwise check whether this is a virtio based device
|
||||||
|
net_pattern = device_directory + '/virtio*/net/'
|
||||||
|
results = glob.glob(net_pattern)
|
||||||
|
if not results:
|
||||||
|
return None
|
||||||
|
if len(results) > 1:
|
||||||
|
LOG.warning("PCI device {} has multiple virtio "
|
||||||
|
"sub-directories".format(pciaddr))
|
||||||
|
return results[0]
|
||||||
|
|
||||||
|
def _read_flags(self, fflags):
|
||||||
|
try:
|
||||||
|
with open(fflags, 'r') as f:
|
||||||
|
hex_str = f.readline().rstrip()
|
||||||
|
flags = int(hex_str, 16)
|
||||||
|
except Exception:
|
||||||
|
flags = None
|
||||||
|
return flags
|
||||||
|
|
||||||
|
def _get_netdev_flags(self, dirpcinet, pci):
|
||||||
|
fflags = dirpcinet + pci + '/flags'
|
||||||
|
return self._read_flags(fflags)
|
||||||
|
|
||||||
|
def pci_get_net_flags(self, name):
|
||||||
|
fflags = '/sys/class/net/' + name + '/flags'
|
||||||
|
return self._read_flags(fflags)
|
||||||
|
|
||||||
|
def pci_get_net_names(self):
|
||||||
|
'''build a list of network device names.'''
|
||||||
|
names = []
|
||||||
|
for name in os.listdir('/sys/class/net/'):
|
||||||
|
if os.path.isdir('/sys/class/net/' + name):
|
||||||
|
names.append(name)
|
||||||
|
return names
|
||||||
|
|
||||||
|
def pci_get_net_attrs(self, pciaddr):
|
||||||
|
"""For this pciaddr, build a list of network attributes per port"""
|
||||||
|
pci_attrs_array = []
|
||||||
|
|
||||||
|
dirpcidev = '/sys/bus/pci/devices/'
|
||||||
|
pciaddrs = os.listdir(dirpcidev)
|
||||||
|
|
||||||
|
for a in pciaddrs:
|
||||||
|
if ((a == pciaddr) or (a == ("0000:" + pciaddr))):
|
||||||
|
# Look inside net expect to find address,speed,mtu etc. info
|
||||||
|
# There may be more than 1 net device for this NIC.
|
||||||
|
LOG.debug("Found NIC pci bus: %s " % a)
|
||||||
|
|
||||||
|
dirpcideva = dirpcidev + a
|
||||||
|
|
||||||
|
numa_node = self.get_pci_numa_node(a)
|
||||||
|
sriov_totalvfs = self.get_pci_sriov_totalvfs(a)
|
||||||
|
sriov_numvfs = self.get_pci_sriov_numvfs(a)
|
||||||
|
sriov_vfs_pci_address = \
|
||||||
|
self.get_pci_sriov_vfs_pci_address(a, sriov_numvfs)
|
||||||
|
driver = self.get_pci_driver_name(a)
|
||||||
|
|
||||||
|
# Determine DPDK support
|
||||||
|
dpdksupport = False
|
||||||
|
fvendor = dirpcideva + '/vendor'
|
||||||
|
fdevice = dirpcideva + '/device'
|
||||||
|
try:
|
||||||
|
with open(fvendor, 'r') as f:
|
||||||
|
vendor = f.readline().strip()
|
||||||
|
except Exception:
|
||||||
|
LOG.debug("ATTR vendor unknown for: %s " % a)
|
||||||
|
vendor = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(fdevice, 'r') as f:
|
||||||
|
device = f.readline().strip()
|
||||||
|
except Exception:
|
||||||
|
LOG.debug("ATTR device unknown for: %s " % a)
|
||||||
|
device = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(os.devnull, "w") as fnull:
|
||||||
|
subprocess.check_call(
|
||||||
|
["query_pci_id", "-v " + str(vendor),
|
||||||
|
"-d " + str(device)],
|
||||||
|
stdout=fnull, stderr=fnull)
|
||||||
|
dpdksupport = True
|
||||||
|
LOG.debug("DPDK does support NIC "
|
||||||
|
"(vendor: %s device: %s)",
|
||||||
|
vendor, device)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
dpdksupport = False
|
||||||
|
if e.returncode == 1:
|
||||||
|
# NIC is not supprted
|
||||||
|
LOG.debug("DPDK does not support NIC "
|
||||||
|
"(vendor: %s device: %s)",
|
||||||
|
vendor, device)
|
||||||
|
else:
|
||||||
|
# command failed, default to DPDK support to False
|
||||||
|
LOG.info("Could not determine DPDK support for "
|
||||||
|
"NIC (vendor %s device: %s), defaulting "
|
||||||
|
"to False", vendor, device)
|
||||||
|
|
||||||
|
# determine the net directory for this device
|
||||||
|
dirpcinet = self.get_pci_net_directory(a)
|
||||||
|
if dirpcinet is None:
|
||||||
|
LOG.warning("no /net for PCI device: %s " % a)
|
||||||
|
continue # go to next PCI device
|
||||||
|
|
||||||
|
# determine which netdevs are associated to this device
|
||||||
|
netdevs = os.listdir(dirpcinet)
|
||||||
|
for n in netdevs:
|
||||||
|
mac = None
|
||||||
|
fmac = dirpcinet + n + '/' + "address"
|
||||||
|
fmaster = dirpcinet + n + '/' + "master"
|
||||||
|
# if a port is a member of a bond the port MAC address
|
||||||
|
# must be retrieved from /proc/net/bonding/<bond_name>
|
||||||
|
if os.path.exists(fmaster):
|
||||||
|
dirmaster = os.path.realpath(fmaster)
|
||||||
|
master_name = os.path.basename(dirmaster)
|
||||||
|
procnetbonding = '/proc/net/bonding/' + master_name
|
||||||
|
found_interface = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(procnetbonding, 'r') as f:
|
||||||
|
for line in f:
|
||||||
|
if 'Slave Interface: ' + n in line:
|
||||||
|
found_interface = True
|
||||||
|
if (found_interface and
|
||||||
|
'Permanent HW addr:' in line):
|
||||||
|
mac = line.split(': ')[1].rstrip()
|
||||||
|
mac = utils.validate_and_normalize_mac(
|
||||||
|
mac)
|
||||||
|
break
|
||||||
|
if not mac:
|
||||||
|
LOG.info("ATTR mac could not be determined"
|
||||||
|
" for slave interface %s" % n)
|
||||||
|
except Exception:
|
||||||
|
LOG.info("ATTR mac could not be determined, "
|
||||||
|
"could not open %s" % procnetbonding)
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
with open(fmac, 'r') as f:
|
||||||
|
mac = f.readline().rstrip()
|
||||||
|
mac = utils.validate_and_normalize_mac(mac)
|
||||||
|
except Exception:
|
||||||
|
LOG.info("ATTR mac unknown for: %s " % n)
|
||||||
|
|
||||||
|
fmtu = dirpcinet + n + '/' + "mtu"
|
||||||
|
try:
|
||||||
|
with open(fmtu, 'r') as f:
|
||||||
|
mtu = f.readline().rstrip()
|
||||||
|
except Exception:
|
||||||
|
LOG.debug("ATTR mtu unknown for: %s " % n)
|
||||||
|
mtu = None
|
||||||
|
|
||||||
|
# Check the administrative state before reading the speed
|
||||||
|
flags = self._get_netdev_flags(dirpcinet, n)
|
||||||
|
|
||||||
|
# If administrative state is down, bring it up momentarily
|
||||||
|
if not(flags & IFF_UP):
|
||||||
|
LOG.warning("Enabling device %s to query link speed" %
|
||||||
|
n)
|
||||||
|
cmd = 'ip link set dev %s up' % n
|
||||||
|
subprocess.Popen(cmd, stdout=subprocess.PIPE,
|
||||||
|
shell=True)
|
||||||
|
# Read the speed
|
||||||
|
fspeed = dirpcinet + n + '/' + "speed"
|
||||||
|
try:
|
||||||
|
with open(fspeed, 'r') as f:
|
||||||
|
speed = f.readline().rstrip()
|
||||||
|
if speed not in VALID_PORT_SPEED:
|
||||||
|
LOG.error("Invalid port speed = %s for %s " %
|
||||||
|
(speed, n))
|
||||||
|
speed = None
|
||||||
|
except Exception:
|
||||||
|
LOG.warning("ATTR speed unknown for: %s "
|
||||||
|
"(flags: %s)" % (n, hex(flags)))
|
||||||
|
speed = None
|
||||||
|
# If the administrative state was down, take it back down
|
||||||
|
if not(flags & IFF_UP):
|
||||||
|
LOG.warning("Disabling device %s after querying "
|
||||||
|
"link speed" % n)
|
||||||
|
cmd = 'ip link set dev %s down' % n
|
||||||
|
subprocess.Popen(cmd, stdout=subprocess.PIPE,
|
||||||
|
shell=True)
|
||||||
|
|
||||||
|
flink_mode = dirpcinet + n + '/' + "link_mode"
|
||||||
|
try:
|
||||||
|
with open(flink_mode, 'r') as f:
|
||||||
|
link_mode = f.readline().rstrip()
|
||||||
|
except Exception:
|
||||||
|
LOG.debug("ATTR link_mode unknown for: %s " % n)
|
||||||
|
link_mode = None
|
||||||
|
|
||||||
|
fdevport = dirpcinet + n + '/' + "dev_port"
|
||||||
|
try:
|
||||||
|
with open(fdevport, 'r') as f:
|
||||||
|
dev_port = int(f.readline().rstrip(), 0)
|
||||||
|
except Exception:
|
||||||
|
LOG.debug("ATTR dev_port unknown for: %s " % n)
|
||||||
|
# Kernel versions older than 3.15 used dev_id
|
||||||
|
# (incorrectly) to identify the network devices,
|
||||||
|
# therefore support the fallback if dev_port is not
|
||||||
|
# available
|
||||||
|
try:
|
||||||
|
fdevid = dirpcinet + n + '/' + "dev_id"
|
||||||
|
with open(fdevid, 'r') as f:
|
||||||
|
dev_port = int(f.readline().rstrip(), 0)
|
||||||
|
except Exception:
|
||||||
|
LOG.debug("ATTR dev_id unknown for: %s " % n)
|
||||||
|
dev_port = 0
|
||||||
|
|
||||||
|
attrs = {
|
||||||
|
"name": n,
|
||||||
|
"numa_node": numa_node,
|
||||||
|
"sriov_totalvfs": sriov_totalvfs,
|
||||||
|
"sriov_numvfs": sriov_numvfs,
|
||||||
|
"sriov_vfs_pci_address":
|
||||||
|
','.join(str(x) for x in sriov_vfs_pci_address),
|
||||||
|
"driver": driver,
|
||||||
|
"pci_address": a,
|
||||||
|
"mac": mac,
|
||||||
|
"mtu": mtu,
|
||||||
|
"speed": speed,
|
||||||
|
"link_mode": link_mode,
|
||||||
|
"dev_id": dev_port,
|
||||||
|
"dpdksupport": dpdksupport
|
||||||
|
}
|
||||||
|
|
||||||
|
pci_attrs_array.append(attrs)
|
||||||
|
|
||||||
|
return pci_attrs_array
|
161
inventory/inventory/inventory/agent/rpcapi.py
Normal file
161
inventory/inventory/inventory/agent/rpcapi.py
Normal file
@ -0,0 +1,161 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
# coding=utf-8
|
||||||
|
|
||||||
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
"""
|
||||||
|
Client side of the agent RPC API.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from oslo_log import log
|
||||||
|
import oslo_messaging as messaging
|
||||||
|
|
||||||
|
from inventory.common import rpc
|
||||||
|
from inventory.objects import base as objects_base
|
||||||
|
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
MANAGER_TOPIC = 'inventory.agent_manager'
|
||||||
|
|
||||||
|
|
||||||
|
class AgentAPI(object):
|
||||||
|
"""Client side of the agent RPC API.
|
||||||
|
|
||||||
|
API version history:
|
||||||
|
|
||||||
|
1.0 - Initial version.
|
||||||
|
"""
|
||||||
|
|
||||||
|
RPC_API_VERSION = '1.0'
|
||||||
|
|
||||||
|
def __init__(self, topic=None):
|
||||||
|
|
||||||
|
super(AgentAPI, self).__init__()
|
||||||
|
self.topic = topic
|
||||||
|
if self.topic is None:
|
||||||
|
self.topic = MANAGER_TOPIC
|
||||||
|
target = messaging.Target(topic=self.topic,
|
||||||
|
version='1.0')
|
||||||
|
serializer = objects_base.InventoryObjectSerializer()
|
||||||
|
version_cap = self.RPC_API_VERSION
|
||||||
|
self.client = rpc.get_client(target,
|
||||||
|
version_cap=version_cap,
|
||||||
|
serializer=serializer)
|
||||||
|
|
||||||
|
def host_inventory(self, context, values, topic=None):
|
||||||
|
"""Synchronously, have a agent collect inventory for this host.
|
||||||
|
|
||||||
|
Collect ihost inventory and report to conductor.
|
||||||
|
|
||||||
|
:param context: request context.
|
||||||
|
:param values: dictionary with initial values for new host object
|
||||||
|
:returns: created ihost object, including all fields.
|
||||||
|
"""
|
||||||
|
cctxt = self.client.prepare(topic=topic or self.topic, version='1.0')
|
||||||
|
return cctxt.call(context,
|
||||||
|
'host_inventory',
|
||||||
|
values=values)
|
||||||
|
|
||||||
|
def configure_ttys_dcd(self, context, uuid, ttys_dcd, topic=None):
|
||||||
|
"""Asynchronously, have the agent configure the getty on the serial
|
||||||
|
console.
|
||||||
|
|
||||||
|
:param context: request context.
|
||||||
|
:param uuid: the host uuid
|
||||||
|
:param ttys_dcd: the flag to enable/disable dcd
|
||||||
|
:returns: none ... uses asynchronous cast().
|
||||||
|
"""
|
||||||
|
# fanout / broadcast message to all inventory agents
|
||||||
|
LOG.debug("AgentApi.configure_ttys_dcd: fanout_cast: sending "
|
||||||
|
"dcd update to agent: (%s) (%s" % (uuid, ttys_dcd))
|
||||||
|
cctxt = self.client.prepare(topic=topic or self.topic, version='1.0',
|
||||||
|
fanout=True)
|
||||||
|
retval = cctxt.cast(context,
|
||||||
|
'configure_ttys_dcd',
|
||||||
|
uuid=uuid,
|
||||||
|
ttys_dcd=ttys_dcd)
|
||||||
|
|
||||||
|
return retval
|
||||||
|
|
||||||
|
def execute_command(self, context, host_uuid, command, topic=None):
|
||||||
|
"""Asynchronously, have the agent execute a command
|
||||||
|
|
||||||
|
:param context: request context.
|
||||||
|
:param host_uuid: the host uuid
|
||||||
|
:param command: the command to execute
|
||||||
|
:returns: none ... uses asynchronous cast().
|
||||||
|
"""
|
||||||
|
# fanout / broadcast message to all inventory agents
|
||||||
|
LOG.debug("AgentApi.update_cpu_config: fanout_cast: sending "
|
||||||
|
"host uuid: (%s) " % host_uuid)
|
||||||
|
cctxt = self.client.prepare(topic=topic or self.topic, version='1.0',
|
||||||
|
fanout=True)
|
||||||
|
retval = cctxt.cast(context,
|
||||||
|
'execute_command',
|
||||||
|
host_uuid=host_uuid,
|
||||||
|
command=command)
|
||||||
|
return retval
|
||||||
|
|
||||||
|
def agent_update(self, context, host_uuid, force_updates,
|
||||||
|
cinder_device=None,
|
||||||
|
topic=None):
|
||||||
|
"""
|
||||||
|
Asynchronously, have the agent update partitions, ipv and ilvg state
|
||||||
|
|
||||||
|
:param context: request context
|
||||||
|
:param host_uuid: the host uuid
|
||||||
|
:param force_updates: list of inventory objects to update
|
||||||
|
:param cinder_device: device by path of cinder volumes
|
||||||
|
:return: none ... uses asynchronous cast().
|
||||||
|
"""
|
||||||
|
|
||||||
|
# fanout / broadcast message to all inventory agents
|
||||||
|
LOG.info("AgentApi.agent_update: fanout_cast: sending "
|
||||||
|
"update request to agent for: (%s)" %
|
||||||
|
(', '.join(force_updates)))
|
||||||
|
cctxt = self.client.prepare(topic=topic or self.topic, version='1.0',
|
||||||
|
fanout=True)
|
||||||
|
retval = cctxt.cast(context,
|
||||||
|
'agent_audit',
|
||||||
|
host_uuid=host_uuid,
|
||||||
|
force_updates=force_updates,
|
||||||
|
cinder_device=cinder_device)
|
||||||
|
return retval
|
||||||
|
|
||||||
|
def disk_format_gpt(self, context, host_uuid, idisk_dict,
|
||||||
|
is_cinder_device, topic=None):
|
||||||
|
"""Asynchronously, GPT format a disk.
|
||||||
|
|
||||||
|
:param context: an admin context
|
||||||
|
:param host_uuid: ihost uuid unique id
|
||||||
|
:param idisk_dict: values for disk object
|
||||||
|
:param is_cinder_device: bool value tells if the idisk is for cinder
|
||||||
|
:returns: pass or fail
|
||||||
|
"""
|
||||||
|
cctxt = self.client.prepare(topic=topic or self.topic, version='1.0',
|
||||||
|
fanout=True)
|
||||||
|
|
||||||
|
return cctxt.cast(context,
|
||||||
|
'disk_format_gpt',
|
||||||
|
host_uuid=host_uuid,
|
||||||
|
idisk_dict=idisk_dict,
|
||||||
|
is_cinder_device=is_cinder_device)
|
0
inventory/inventory/inventory/api/__init__.py
Normal file
0
inventory/inventory/inventory/api/__init__.py
Normal file
90
inventory/inventory/inventory/api/app.py
Normal file
90
inventory/inventory/inventory/api/app.py
Normal file
@ -0,0 +1,90 @@
|
|||||||
|
#
|
||||||
|
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log
|
||||||
|
from oslo_service import service
|
||||||
|
from oslo_service import wsgi
|
||||||
|
import pecan
|
||||||
|
|
||||||
|
from inventory.api import config
|
||||||
|
from inventory.api import middleware
|
||||||
|
from inventory.common.i18n import _
|
||||||
|
from inventory.common import policy
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
_launcher = None
|
||||||
|
_launcher_pxe = None
|
||||||
|
|
||||||
|
|
||||||
|
def get_pecan_config():
|
||||||
|
# Set up the pecan configuration
|
||||||
|
filename = config.__file__.replace('.pyc', '.py')
|
||||||
|
return pecan.configuration.conf_from_file(filename)
|
||||||
|
|
||||||
|
|
||||||
|
def setup_app(config=None):
|
||||||
|
policy.init_enforcer()
|
||||||
|
|
||||||
|
if not config:
|
||||||
|
config = get_pecan_config()
|
||||||
|
|
||||||
|
pecan.configuration.set_config(dict(config), overwrite=True)
|
||||||
|
app_conf = dict(config.app)
|
||||||
|
|
||||||
|
app = pecan.make_app(
|
||||||
|
app_conf.pop('root'),
|
||||||
|
debug=CONF.debug,
|
||||||
|
logging=getattr(config, 'logging', {}),
|
||||||
|
force_canonical=getattr(config.app, 'force_canonical', True),
|
||||||
|
guess_content_type_from_ext=False,
|
||||||
|
wrap_app=middleware.ParsableErrorMiddleware,
|
||||||
|
**app_conf
|
||||||
|
)
|
||||||
|
return app
|
||||||
|
|
||||||
|
|
||||||
|
def load_paste_app(app_name=None):
|
||||||
|
"""Loads a WSGI app from a paste config file."""
|
||||||
|
if app_name is None:
|
||||||
|
app_name = cfg.CONF.prog
|
||||||
|
|
||||||
|
loader = wsgi.Loader(cfg.CONF)
|
||||||
|
app = loader.load_app(app_name)
|
||||||
|
return app
|
||||||
|
|
||||||
|
|
||||||
|
def app_factory(global_config, **local_conf):
|
||||||
|
return setup_app()
|
||||||
|
|
||||||
|
|
||||||
|
def serve(api_service, conf, workers=1):
|
||||||
|
global _launcher
|
||||||
|
|
||||||
|
if _launcher:
|
||||||
|
raise RuntimeError(_('serve() _launcher can only be called once'))
|
||||||
|
|
||||||
|
_launcher = service.launch(conf, api_service, workers=workers)
|
||||||
|
|
||||||
|
|
||||||
|
def serve_pxe(api_service, conf, workers=1):
|
||||||
|
global _launcher_pxe
|
||||||
|
|
||||||
|
if _launcher_pxe:
|
||||||
|
raise RuntimeError(_('serve() _launcher_pxe can only be called once'))
|
||||||
|
|
||||||
|
_launcher_pxe = service.launch(conf, api_service, workers=workers)
|
||||||
|
|
||||||
|
|
||||||
|
def wait():
|
||||||
|
_launcher.wait()
|
||||||
|
|
||||||
|
|
||||||
|
def wait_pxe():
|
||||||
|
_launcher_pxe.wait()
|
73
inventory/inventory/inventory/api/config.py
Normal file
73
inventory/inventory/inventory/api/config.py
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
#
|
||||||
|
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
from inventory.api import hooks
|
||||||
|
from inventory.common import config
|
||||||
|
from inventory import objects
|
||||||
|
from keystoneauth1 import loading as ks_loading
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log as logging
|
||||||
|
import pbr.version
|
||||||
|
import sys
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
sysinv_group = cfg.OptGroup(
|
||||||
|
'sysinv',
|
||||||
|
title='Sysinv Options',
|
||||||
|
help="Configuration options for the platform service")
|
||||||
|
|
||||||
|
sysinv_opts = [
|
||||||
|
cfg.StrOpt('catalog_info',
|
||||||
|
default='platform:sysinv:internalURL',
|
||||||
|
help="Service catalog Look up info."),
|
||||||
|
cfg.StrOpt('os_region_name',
|
||||||
|
default='RegionOne',
|
||||||
|
help="Region name of this node. It is used for catalog lookup"),
|
||||||
|
]
|
||||||
|
|
||||||
|
version_info = pbr.version.VersionInfo('inventory')
|
||||||
|
|
||||||
|
# Pecan Application Configurations
|
||||||
|
app = {
|
||||||
|
'root': 'inventory.api.controllers.root.RootController',
|
||||||
|
'modules': ['inventory.api'],
|
||||||
|
'hooks': [
|
||||||
|
hooks.DBHook(),
|
||||||
|
hooks.ContextHook(),
|
||||||
|
hooks.RPCHook(),
|
||||||
|
hooks.SystemConfigHook(),
|
||||||
|
],
|
||||||
|
'acl_public_routes': [
|
||||||
|
'/',
|
||||||
|
'/v1',
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def init(args, **kwargs):
|
||||||
|
cfg.CONF.register_group(sysinv_group)
|
||||||
|
cfg.CONF.register_opts(sysinv_opts, group=sysinv_group)
|
||||||
|
ks_loading.register_session_conf_options(cfg.CONF,
|
||||||
|
sysinv_group.name)
|
||||||
|
logging.register_options(cfg.CONF)
|
||||||
|
|
||||||
|
cfg.CONF(args=args, project='inventory',
|
||||||
|
version='%%(prog)s %s' % version_info.release_string(),
|
||||||
|
**kwargs)
|
||||||
|
objects.register_all()
|
||||||
|
config.parse_args(args)
|
||||||
|
|
||||||
|
|
||||||
|
def setup_logging():
|
||||||
|
"""Sets up the logging options for a log with supplied name."""
|
||||||
|
logging.setup(cfg.CONF, "inventory")
|
||||||
|
LOG.debug("Logging enabled!")
|
||||||
|
LOG.debug("%(prog)s version %(version)s",
|
||||||
|
{'prog': sys.argv[0],
|
||||||
|
'version': version_info.release_string()})
|
||||||
|
LOG.debug("command line: %s", " ".join(sys.argv))
|
115
inventory/inventory/inventory/api/controllers/root.py
Normal file
115
inventory/inventory/inventory/api/controllers/root.py
Normal file
@ -0,0 +1,115 @@
|
|||||||
|
# -*- encoding: utf-8 -*-
|
||||||
|
#
|
||||||
|
# Copyright © 2012 New Dream Network, LLC (DreamHost)
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
import pecan
|
||||||
|
from pecan import rest
|
||||||
|
from wsme import types as wtypes
|
||||||
|
import wsmeext.pecan as wsme_pecan
|
||||||
|
|
||||||
|
from inventory.api.controllers import v1
|
||||||
|
from inventory.api.controllers.v1 import base
|
||||||
|
from inventory.api.controllers.v1 import link
|
||||||
|
|
||||||
|
ID_VERSION = 'v1'
|
||||||
|
|
||||||
|
|
||||||
|
def expose(*args, **kwargs):
|
||||||
|
"""Ensure that only JSON, and not XML, is supported."""
|
||||||
|
if 'rest_content_types' not in kwargs:
|
||||||
|
kwargs['rest_content_types'] = ('json',)
|
||||||
|
return wsme_pecan.wsexpose(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class Version(base.APIBase):
|
||||||
|
"""An API version representation.
|
||||||
|
|
||||||
|
This class represents an API version, including the minimum and
|
||||||
|
maximum minor versions that are supported within the major version.
|
||||||
|
"""
|
||||||
|
|
||||||
|
id = wtypes.text
|
||||||
|
"""The ID of the (major) version, also acts as the release number"""
|
||||||
|
|
||||||
|
links = [link.Link]
|
||||||
|
"""A Link that point to a specific version of the API"""
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def convert(cls, vid):
|
||||||
|
version = Version()
|
||||||
|
version.id = vid
|
||||||
|
version.links = [link.Link.make_link('self', pecan.request.host_url,
|
||||||
|
vid, '', bookmark=True)]
|
||||||
|
return version
|
||||||
|
|
||||||
|
|
||||||
|
class Root(base.APIBase):
|
||||||
|
|
||||||
|
name = wtypes.text
|
||||||
|
"""The name of the API"""
|
||||||
|
|
||||||
|
description = wtypes.text
|
||||||
|
"""Some information about this API"""
|
||||||
|
|
||||||
|
versions = [Version]
|
||||||
|
"""Links to all the versions available in this API"""
|
||||||
|
|
||||||
|
default_version = Version
|
||||||
|
"""A link to the default version of the API"""
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def convert():
|
||||||
|
root = Root()
|
||||||
|
root.name = "Inventory API"
|
||||||
|
root.description = ("Inventory is an OpenStack project which "
|
||||||
|
"provides REST API services for "
|
||||||
|
"system configuration.")
|
||||||
|
root.default_version = Version.convert(ID_VERSION)
|
||||||
|
root.versions = [root.default_version]
|
||||||
|
return root
|
||||||
|
|
||||||
|
|
||||||
|
class RootController(rest.RestController):
|
||||||
|
|
||||||
|
_versions = [ID_VERSION]
|
||||||
|
"""All supported API versions"""
|
||||||
|
|
||||||
|
_default_version = ID_VERSION
|
||||||
|
"""The default API version"""
|
||||||
|
|
||||||
|
v1 = v1.Controller()
|
||||||
|
|
||||||
|
@expose(Root)
|
||||||
|
def get(self):
|
||||||
|
# NOTE: The reason why convert() it's being called for every
|
||||||
|
# request is because we need to get the host url from
|
||||||
|
# the request object to make the links.
|
||||||
|
return Root.convert()
|
||||||
|
|
||||||
|
@pecan.expose()
|
||||||
|
def _route(self, args, request=None):
|
||||||
|
"""Overrides the default routing behavior.
|
||||||
|
|
||||||
|
It redirects the request to the default version of the Inventory API
|
||||||
|
if the version number is not specified in the url.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if args[0] and args[0] not in self._versions:
|
||||||
|
args = [self._default_version] + args
|
||||||
|
return super(RootController, self)._route(args, request)
|
198
inventory/inventory/inventory/api/controllers/v1/__init__.py
Normal file
198
inventory/inventory/inventory/api/controllers/v1/__init__.py
Normal file
@ -0,0 +1,198 @@
|
|||||||
|
#
|
||||||
|
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
import pecan
|
||||||
|
from pecan import rest
|
||||||
|
|
||||||
|
from inventory.api.controllers.v1 import base
|
||||||
|
from inventory.api.controllers.v1 import cpu
|
||||||
|
from inventory.api.controllers.v1 import ethernet_port
|
||||||
|
from inventory.api.controllers.v1 import host
|
||||||
|
from inventory.api.controllers.v1 import link
|
||||||
|
from inventory.api.controllers.v1 import lldp_agent
|
||||||
|
from inventory.api.controllers.v1 import lldp_neighbour
|
||||||
|
from inventory.api.controllers.v1 import memory
|
||||||
|
from inventory.api.controllers.v1 import node
|
||||||
|
from inventory.api.controllers.v1 import pci_device
|
||||||
|
from inventory.api.controllers.v1 import port
|
||||||
|
from inventory.api.controllers.v1 import sensor
|
||||||
|
from inventory.api.controllers.v1 import sensorgroup
|
||||||
|
|
||||||
|
from inventory.api.controllers.v1 import system
|
||||||
|
from wsme import types as wtypes
|
||||||
|
import wsmeext.pecan as wsme_pecan
|
||||||
|
|
||||||
|
|
||||||
|
class MediaType(base.APIBase):
|
||||||
|
"""A media type representation."""
|
||||||
|
|
||||||
|
base = wtypes.text
|
||||||
|
type = wtypes.text
|
||||||
|
|
||||||
|
def __init__(self, base, type):
|
||||||
|
self.base = base
|
||||||
|
self.type = type
|
||||||
|
|
||||||
|
|
||||||
|
class V1(base.APIBase):
|
||||||
|
"""The representation of the version 1 of the API."""
|
||||||
|
|
||||||
|
id = wtypes.text
|
||||||
|
"The ID of the version, also acts as the release number"
|
||||||
|
|
||||||
|
media_types = [MediaType]
|
||||||
|
"An array of supported media types for this version"
|
||||||
|
|
||||||
|
links = [link.Link]
|
||||||
|
"Links that point to a specific URL for this version and documentation"
|
||||||
|
|
||||||
|
systems = [link.Link]
|
||||||
|
"Links to the system resource"
|
||||||
|
|
||||||
|
hosts = [link.Link]
|
||||||
|
"Links to the host resource"
|
||||||
|
|
||||||
|
lldp_agents = [link.Link]
|
||||||
|
"Links to the lldp agents resource"
|
||||||
|
|
||||||
|
lldp_neighbours = [link.Link]
|
||||||
|
"Links to the lldp neighbours resource"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def convert(self):
|
||||||
|
v1 = V1()
|
||||||
|
v1.id = "v1"
|
||||||
|
v1.links = [link.Link.make_link('self', pecan.request.host_url,
|
||||||
|
'v1', '', bookmark=True),
|
||||||
|
link.Link.make_link('describedby',
|
||||||
|
'http://www.starlingx.io/',
|
||||||
|
'developer/inventory/dev',
|
||||||
|
'api-spec-v1.html',
|
||||||
|
bookmark=True, type='text/html')
|
||||||
|
]
|
||||||
|
v1.media_types = [MediaType('application/json',
|
||||||
|
'application/vnd.openstack.inventory.v1+json')]
|
||||||
|
|
||||||
|
v1.systems = [link.Link.make_link('self', pecan.request.host_url,
|
||||||
|
'systems', ''),
|
||||||
|
link.Link.make_link('bookmark',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'systems', '',
|
||||||
|
bookmark=True)
|
||||||
|
]
|
||||||
|
|
||||||
|
v1.hosts = [link.Link.make_link('self', pecan.request.host_url,
|
||||||
|
'hosts', ''),
|
||||||
|
link.Link.make_link('bookmark',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'hosts', '',
|
||||||
|
bookmark=True)
|
||||||
|
]
|
||||||
|
|
||||||
|
v1.nodes = [link.Link.make_link('self', pecan.request.host_url,
|
||||||
|
'nodes', ''),
|
||||||
|
link.Link.make_link('bookmark',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'nodes', '',
|
||||||
|
bookmark=True)
|
||||||
|
]
|
||||||
|
|
||||||
|
v1.cpus = [link.Link.make_link('self', pecan.request.host_url,
|
||||||
|
'cpus', ''),
|
||||||
|
link.Link.make_link('bookmark',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'cpus', '',
|
||||||
|
bookmark=True)
|
||||||
|
]
|
||||||
|
|
||||||
|
v1.memory = [link.Link.make_link('self', pecan.request.host_url,
|
||||||
|
'memory', ''),
|
||||||
|
link.Link.make_link('bookmark',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'memory', '',
|
||||||
|
bookmark=True)
|
||||||
|
]
|
||||||
|
|
||||||
|
v1.ports = [link.Link.make_link('self',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'ports', ''),
|
||||||
|
link.Link.make_link('bookmark',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'ports', '',
|
||||||
|
bookmark=True)
|
||||||
|
]
|
||||||
|
|
||||||
|
v1.ethernet_ports = [link.Link.make_link('self',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'ethernet_ports', ''),
|
||||||
|
link.Link.make_link('bookmark',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'ethernet_ports', '',
|
||||||
|
bookmark=True)
|
||||||
|
]
|
||||||
|
|
||||||
|
v1.lldp_agents = [link.Link.make_link('self',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'lldp_agents', ''),
|
||||||
|
link.Link.make_link('bookmark',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'lldp_agents', '',
|
||||||
|
bookmark=True)
|
||||||
|
]
|
||||||
|
|
||||||
|
v1.lldp_neighbours = [link.Link.make_link('self',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'lldp_neighbours', ''),
|
||||||
|
link.Link.make_link('bookmark',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'lldp_neighbours', '',
|
||||||
|
bookmark=True)
|
||||||
|
]
|
||||||
|
|
||||||
|
v1.sensors = [link.Link.make_link('self',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'sensors', ''),
|
||||||
|
link.Link.make_link('bookmark',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'sensors', '',
|
||||||
|
bookmark=True)
|
||||||
|
]
|
||||||
|
|
||||||
|
v1.sensorgroups = [link.Link.make_link('self',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'sensorgroups', ''),
|
||||||
|
link.Link.make_link('bookmark',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'sensorgroups', '',
|
||||||
|
bookmark=True)
|
||||||
|
]
|
||||||
|
|
||||||
|
return v1
|
||||||
|
|
||||||
|
|
||||||
|
class Controller(rest.RestController):
|
||||||
|
"""Version 1 API controller root."""
|
||||||
|
|
||||||
|
systems = system.SystemController()
|
||||||
|
hosts = host.HostController()
|
||||||
|
nodes = node.NodeController()
|
||||||
|
cpus = cpu.CPUController()
|
||||||
|
memorys = memory.MemoryController()
|
||||||
|
ports = port.PortController()
|
||||||
|
ethernet_ports = ethernet_port.EthernetPortController()
|
||||||
|
lldp_agents = lldp_agent.LLDPAgentController()
|
||||||
|
lldp_neighbours = lldp_neighbour.LLDPNeighbourController()
|
||||||
|
pci_devices = pci_device.PCIDeviceController()
|
||||||
|
sensors = sensor.SensorController()
|
||||||
|
sensorgroups = sensorgroup.SensorGroupController()
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(V1)
|
||||||
|
def get(self):
|
||||||
|
return V1.convert()
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = ('Controller',)
|
130
inventory/inventory/inventory/api/controllers/v1/base.py
Normal file
130
inventory/inventory/inventory/api/controllers/v1/base.py
Normal file
@ -0,0 +1,130 @@
|
|||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
import functools
|
||||||
|
from oslo_utils._i18n import _
|
||||||
|
from webob import exc
|
||||||
|
import wsme
|
||||||
|
from wsme import types as wtypes
|
||||||
|
|
||||||
|
|
||||||
|
class APIBase(wtypes.Base):
|
||||||
|
|
||||||
|
created_at = wsme.wsattr(datetime.datetime, readonly=True)
|
||||||
|
"""The time in UTC at which the object is created"""
|
||||||
|
|
||||||
|
updated_at = wsme.wsattr(datetime.datetime, readonly=True)
|
||||||
|
"""The time in UTC at which the object is updated"""
|
||||||
|
|
||||||
|
def as_dict(self):
|
||||||
|
"""Render this object as a dict of its fields."""
|
||||||
|
return dict((k, getattr(self, k))
|
||||||
|
for k in self.fields
|
||||||
|
if hasattr(self, k) and
|
||||||
|
getattr(self, k) != wsme.Unset)
|
||||||
|
|
||||||
|
def unset_fields_except(self, except_list=None):
|
||||||
|
"""Unset fields so they don't appear in the message body.
|
||||||
|
|
||||||
|
:param except_list: A list of fields that won't be touched.
|
||||||
|
|
||||||
|
"""
|
||||||
|
if except_list is None:
|
||||||
|
except_list = []
|
||||||
|
|
||||||
|
for k in self.as_dict():
|
||||||
|
if k not in except_list:
|
||||||
|
setattr(self, k, wsme.Unset)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_rpc_object(cls, m, fields=None):
|
||||||
|
"""Convert a RPC object to an API object."""
|
||||||
|
obj_dict = m.as_dict()
|
||||||
|
# Unset non-required fields so they do not appear
|
||||||
|
# in the message body
|
||||||
|
obj_dict.update(dict((k, wsme.Unset)
|
||||||
|
for k in obj_dict.keys()
|
||||||
|
if fields and k not in fields))
|
||||||
|
return cls(**obj_dict)
|
||||||
|
|
||||||
|
|
||||||
|
@functools.total_ordering
|
||||||
|
class Version(object):
|
||||||
|
"""API Version object."""
|
||||||
|
|
||||||
|
string = 'X-OpenStack-Inventory-API-Version'
|
||||||
|
"""HTTP Header string carrying the requested version"""
|
||||||
|
|
||||||
|
min_string = 'X-OpenStack-Inventory-API-Minimum-Version'
|
||||||
|
"""HTTP response header"""
|
||||||
|
|
||||||
|
max_string = 'X-OpenStack-Inventory-API-Maximum-Version'
|
||||||
|
"""HTTP response header"""
|
||||||
|
|
||||||
|
def __init__(self, headers, default_version, latest_version):
|
||||||
|
"""Create an API Version object from the supplied headers.
|
||||||
|
|
||||||
|
:param headers: webob headers
|
||||||
|
:param default_version: version to use if not specified in headers
|
||||||
|
:param latest_version: version to use if latest is requested
|
||||||
|
:raises: webob.HTTPNotAcceptable
|
||||||
|
"""
|
||||||
|
(self.major, self.minor) = Version.parse_headers(
|
||||||
|
headers, default_version, latest_version)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return '%s.%s' % (self.major, self.minor)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def parse_headers(headers, default_version, latest_version):
|
||||||
|
"""Determine the API version requested based on the headers supplied.
|
||||||
|
|
||||||
|
:param headers: webob headers
|
||||||
|
:param default_version: version to use if not specified in headers
|
||||||
|
:param latest_version: version to use if latest is requested
|
||||||
|
:returns: a tupe of (major, minor) version numbers
|
||||||
|
:raises: webob.HTTPNotAcceptable
|
||||||
|
"""
|
||||||
|
version_str = headers.get(Version.string, default_version)
|
||||||
|
|
||||||
|
if version_str.lower() == 'latest':
|
||||||
|
parse_str = latest_version
|
||||||
|
else:
|
||||||
|
parse_str = version_str
|
||||||
|
|
||||||
|
try:
|
||||||
|
version = tuple(int(i) for i in parse_str.split('.'))
|
||||||
|
except ValueError:
|
||||||
|
version = ()
|
||||||
|
|
||||||
|
if len(version) != 2:
|
||||||
|
raise exc.HTTPNotAcceptable(_(
|
||||||
|
"Invalid value for %s header") % Version.string)
|
||||||
|
return version
|
||||||
|
|
||||||
|
def __gt__(self, other):
|
||||||
|
return (self.major, self.minor) > (other.major, other.minor)
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
return (self.major, self.minor) == (other.major, other.minor)
|
||||||
|
|
||||||
|
def __ne__(self, other):
|
||||||
|
return not self.__eq__(other)
|
@ -0,0 +1,58 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2013 Red Hat, Inc.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
import pecan
|
||||||
|
from wsme import types as wtypes
|
||||||
|
|
||||||
|
from inventory.api.controllers.v1 import base
|
||||||
|
from inventory.api.controllers.v1 import link
|
||||||
|
|
||||||
|
|
||||||
|
class Collection(base.APIBase):
|
||||||
|
|
||||||
|
next = wtypes.text
|
||||||
|
"A link to retrieve the next subset of the collection"
|
||||||
|
|
||||||
|
@property
|
||||||
|
def collection(self):
|
||||||
|
return getattr(self, self._type)
|
||||||
|
|
||||||
|
def has_next(self, limit):
|
||||||
|
"""Return whether collection has more items."""
|
||||||
|
return len(self.collection) and len(self.collection) == limit
|
||||||
|
|
||||||
|
def get_next(self, limit, url=None, **kwargs):
|
||||||
|
"""Return a link to the next subset of the collection."""
|
||||||
|
if not self.has_next(limit):
|
||||||
|
return wtypes.Unset
|
||||||
|
|
||||||
|
resource_url = url or self._type
|
||||||
|
q_args = ''.join(['%s=%s&' % (key, kwargs[key]) for key in kwargs])
|
||||||
|
next_args = '?%(args)slimit=%(limit)d&marker=%(marker)s' % {
|
||||||
|
'args': q_args, 'limit': limit,
|
||||||
|
'marker': self.collection[-1].uuid}
|
||||||
|
|
||||||
|
return link.Link.make_link('next', pecan.request.host_url,
|
||||||
|
resource_url, next_args).href
|
303
inventory/inventory/inventory/api/controllers/v1/cpu.py
Normal file
303
inventory/inventory/inventory/api/controllers/v1/cpu.py
Normal file
@ -0,0 +1,303 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2013 UnitedStack Inc.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
# Copyright (c) 2013-2018 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
import pecan
|
||||||
|
from pecan import rest
|
||||||
|
|
||||||
|
from wsme import types as wtypes
|
||||||
|
import wsmeext.pecan as wsme_pecan
|
||||||
|
|
||||||
|
from inventory.api.controllers.v1 import base
|
||||||
|
from inventory.api.controllers.v1 import collection
|
||||||
|
from inventory.api.controllers.v1 import link
|
||||||
|
from inventory.api.controllers.v1 import types
|
||||||
|
from inventory.api.controllers.v1 import utils
|
||||||
|
from inventory.common import exception
|
||||||
|
from inventory.common.i18n import _
|
||||||
|
from inventory import objects
|
||||||
|
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class CPUPatchType(types.JsonPatchType):
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def mandatory_attrs():
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
class CPU(base.APIBase):
|
||||||
|
"""API representation of a host CPU.
|
||||||
|
|
||||||
|
This class enforces type checking and value constraints, and converts
|
||||||
|
between the internal object model and the API representation of a cpu.
|
||||||
|
"""
|
||||||
|
|
||||||
|
uuid = types.uuid
|
||||||
|
"Unique UUID for this cpu"
|
||||||
|
|
||||||
|
cpu = int
|
||||||
|
"Represent the cpu id cpu"
|
||||||
|
|
||||||
|
core = int
|
||||||
|
"Represent the core id cpu"
|
||||||
|
|
||||||
|
thread = int
|
||||||
|
"Represent the thread id cpu"
|
||||||
|
|
||||||
|
cpu_family = wtypes.text
|
||||||
|
"Represent the cpu family of the cpu"
|
||||||
|
|
||||||
|
cpu_model = wtypes.text
|
||||||
|
"Represent the cpu model of the cpu"
|
||||||
|
|
||||||
|
function = wtypes.text
|
||||||
|
"Represent the function of the cpu"
|
||||||
|
|
||||||
|
num_cores_on_processor0 = wtypes.text
|
||||||
|
"The number of cores on processors 0"
|
||||||
|
|
||||||
|
num_cores_on_processor1 = wtypes.text
|
||||||
|
"The number of cores on processors 1"
|
||||||
|
|
||||||
|
num_cores_on_processor2 = wtypes.text
|
||||||
|
"The number of cores on processors 2"
|
||||||
|
|
||||||
|
num_cores_on_processor3 = wtypes.text
|
||||||
|
"The number of cores on processors 3"
|
||||||
|
|
||||||
|
numa_node = int
|
||||||
|
"The numa node or zone the cpu. API only attribute"
|
||||||
|
|
||||||
|
capabilities = {wtypes.text: utils.ValidTypes(wtypes.text,
|
||||||
|
six.integer_types)}
|
||||||
|
"This cpu's meta data"
|
||||||
|
|
||||||
|
host_id = int
|
||||||
|
"The hostid that this cpu belongs to"
|
||||||
|
|
||||||
|
node_id = int
|
||||||
|
"The nodeId that this cpu belongs to"
|
||||||
|
|
||||||
|
host_uuid = types.uuid
|
||||||
|
"The UUID of the host this cpu belongs to"
|
||||||
|
|
||||||
|
node_uuid = types.uuid
|
||||||
|
"The UUID of the node this cpu belongs to"
|
||||||
|
|
||||||
|
links = [link.Link]
|
||||||
|
"A list containing a self link and associated cpu links"
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self.fields = objects.CPU.fields.keys()
|
||||||
|
for k in self.fields:
|
||||||
|
setattr(self, k, kwargs.get(k))
|
||||||
|
|
||||||
|
# API only attributes
|
||||||
|
self.fields.append('function')
|
||||||
|
setattr(self, 'function', kwargs.get('function', None))
|
||||||
|
self.fields.append('num_cores_on_processor0')
|
||||||
|
setattr(self, 'num_cores_on_processor0',
|
||||||
|
kwargs.get('num_cores_on_processor0', None))
|
||||||
|
self.fields.append('num_cores_on_processor1')
|
||||||
|
setattr(self, 'num_cores_on_processor1',
|
||||||
|
kwargs.get('num_cores_on_processor1', None))
|
||||||
|
self.fields.append('num_cores_on_processor2')
|
||||||
|
setattr(self, 'num_cores_on_processor2',
|
||||||
|
kwargs.get('num_cores_on_processor2', None))
|
||||||
|
self.fields.append('num_cores_on_processor3')
|
||||||
|
setattr(self, 'num_cores_on_processor3',
|
||||||
|
kwargs.get('num_cores_on_processor3', None))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def convert_with_links(cls, rpc_port, expand=True):
|
||||||
|
cpu = CPU(**rpc_port.as_dict())
|
||||||
|
if not expand:
|
||||||
|
cpu.unset_fields_except(
|
||||||
|
['uuid', 'cpu', 'core', 'thread',
|
||||||
|
'cpu_family', 'cpu_model',
|
||||||
|
'numa_node', 'host_uuid', 'node_uuid',
|
||||||
|
'host_id', 'node_id',
|
||||||
|
'capabilities',
|
||||||
|
'created_at', 'updated_at'])
|
||||||
|
|
||||||
|
# never expose the id attribute
|
||||||
|
cpu.host_id = wtypes.Unset
|
||||||
|
cpu.node_id = wtypes.Unset
|
||||||
|
|
||||||
|
cpu.links = [link.Link.make_link('self', pecan.request.host_url,
|
||||||
|
'cpus', cpu.uuid),
|
||||||
|
link.Link.make_link('bookmark',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'cpus', cpu.uuid,
|
||||||
|
bookmark=True)
|
||||||
|
]
|
||||||
|
return cpu
|
||||||
|
|
||||||
|
|
||||||
|
class CPUCollection(collection.Collection):
|
||||||
|
"""API representation of a collection of cpus."""
|
||||||
|
|
||||||
|
cpus = [CPU]
|
||||||
|
"A list containing cpu objects"
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self._type = 'cpus'
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def convert_with_links(cls, rpc_ports, limit, url=None,
|
||||||
|
expand=False, **kwargs):
|
||||||
|
collection = CPUCollection()
|
||||||
|
collection.cpus = [
|
||||||
|
CPU.convert_with_links(p, expand) for p in rpc_ports]
|
||||||
|
collection.next = collection.get_next(limit, url=url, **kwargs)
|
||||||
|
return collection
|
||||||
|
|
||||||
|
|
||||||
|
class CPUController(rest.RestController):
|
||||||
|
"""REST controller for cpus."""
|
||||||
|
|
||||||
|
_custom_actions = {
|
||||||
|
'detail': ['GET'],
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(self, from_hosts=False, from_node=False):
|
||||||
|
self._from_hosts = from_hosts
|
||||||
|
self._from_node = from_node
|
||||||
|
|
||||||
|
def _get_cpus_collection(self, i_uuid, node_uuid, marker,
|
||||||
|
limit, sort_key, sort_dir,
|
||||||
|
expand=False, resource_url=None):
|
||||||
|
|
||||||
|
if self._from_hosts and not i_uuid:
|
||||||
|
raise exception.InvalidParameterValue(_(
|
||||||
|
"Host id not specified."))
|
||||||
|
|
||||||
|
if self._from_node and not i_uuid:
|
||||||
|
raise exception.InvalidParameterValue(_(
|
||||||
|
"Node id not specified."))
|
||||||
|
|
||||||
|
limit = utils.validate_limit(limit)
|
||||||
|
sort_dir = utils.validate_sort_dir(sort_dir)
|
||||||
|
|
||||||
|
marker_obj = None
|
||||||
|
if marker:
|
||||||
|
marker_obj = objects.CPU.get_by_uuid(pecan.request.context,
|
||||||
|
marker)
|
||||||
|
|
||||||
|
if self._from_hosts:
|
||||||
|
# cpus = pecan.request.dbapi.cpu_get_by_host(
|
||||||
|
cpus = objects.CPU.get_by_host(
|
||||||
|
pecan.request.context,
|
||||||
|
i_uuid, limit,
|
||||||
|
marker_obj,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
elif self._from_node:
|
||||||
|
# cpus = pecan.request.dbapi.cpu_get_by_node(
|
||||||
|
cpus = objects.CPU.get_by_node(
|
||||||
|
pecan.request.context,
|
||||||
|
i_uuid, limit,
|
||||||
|
marker_obj,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
else:
|
||||||
|
if i_uuid and not node_uuid:
|
||||||
|
# cpus = pecan.request.dbapi.cpu_get_by_host(
|
||||||
|
cpus = objects.CPU.get_by_host(
|
||||||
|
pecan.request.context,
|
||||||
|
i_uuid, limit,
|
||||||
|
marker_obj,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
elif i_uuid and node_uuid:
|
||||||
|
# cpus = pecan.request.dbapi.cpu_get_by_host_node(
|
||||||
|
cpus = objects.CPU.get_by_host_node(
|
||||||
|
pecan.request.context,
|
||||||
|
i_uuid,
|
||||||
|
node_uuid,
|
||||||
|
limit,
|
||||||
|
marker_obj,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
|
||||||
|
elif node_uuid:
|
||||||
|
# cpus = pecan.request.dbapi.cpu_get_by_host_node(
|
||||||
|
cpus = objects.CPU.get_by_node(
|
||||||
|
pecan.request.context,
|
||||||
|
i_uuid,
|
||||||
|
node_uuid,
|
||||||
|
limit,
|
||||||
|
marker_obj,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
|
||||||
|
else:
|
||||||
|
# cpus = pecan.request.dbapi.icpu_get_list(
|
||||||
|
cpus = objects.CPU.list(
|
||||||
|
pecan.request.context,
|
||||||
|
limit, marker_obj,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
|
||||||
|
return CPUCollection.convert_with_links(cpus, limit,
|
||||||
|
url=resource_url,
|
||||||
|
expand=expand,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(CPUCollection, types.uuid, types.uuid,
|
||||||
|
types.uuid, int, wtypes.text, wtypes.text)
|
||||||
|
def get_all(self, host_uuid=None, node_uuid=None,
|
||||||
|
marker=None, limit=None, sort_key='id', sort_dir='asc'):
|
||||||
|
"""Retrieve a list of cpus."""
|
||||||
|
return self._get_cpus_collection(host_uuid, node_uuid,
|
||||||
|
marker, limit,
|
||||||
|
sort_key, sort_dir)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(CPUCollection, types.uuid, types.uuid, int,
|
||||||
|
wtypes.text, wtypes.text)
|
||||||
|
def detail(self, host_uuid=None, marker=None, limit=None,
|
||||||
|
sort_key='id', sort_dir='asc'):
|
||||||
|
"""Retrieve a list of cpus with detail."""
|
||||||
|
# NOTE(lucasagomes): /detail should only work agaist collections
|
||||||
|
parent = pecan.request.path.split('/')[:-1][-1]
|
||||||
|
if parent != "cpus":
|
||||||
|
raise exception.HTTPNotFound
|
||||||
|
|
||||||
|
expand = True
|
||||||
|
resource_url = '/'.join(['cpus', 'detail'])
|
||||||
|
return self._get_cpus_collection(host_uuid, marker, limit, sort_key,
|
||||||
|
sort_dir, expand, resource_url)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(CPU, types.uuid)
|
||||||
|
def get_one(self, cpu_uuid):
|
||||||
|
"""Retrieve information about the given cpu."""
|
||||||
|
if self._from_hosts:
|
||||||
|
raise exception.OperationNotPermitted
|
||||||
|
|
||||||
|
rpc_port = objects.CPU.get_by_uuid(pecan.request.context, cpu_uuid)
|
||||||
|
return CPU.convert_with_links(rpc_port)
|
330
inventory/inventory/inventory/api/controllers/v1/cpu_utils.py
Normal file
330
inventory/inventory/inventory/api/controllers/v1/cpu_utils.py
Normal file
@ -0,0 +1,330 @@
|
|||||||
|
# Copyright (c) 2013-2018 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
import pecan
|
||||||
|
|
||||||
|
from inventory.common import constants
|
||||||
|
from inventory.common import k_host
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
CORE_FUNCTIONS = [
|
||||||
|
constants.PLATFORM_FUNCTION,
|
||||||
|
constants.VSWITCH_FUNCTION,
|
||||||
|
constants.SHARED_FUNCTION,
|
||||||
|
constants.VM_FUNCTION,
|
||||||
|
constants.NO_FUNCTION
|
||||||
|
]
|
||||||
|
|
||||||
|
VSWITCH_MIN_CORES = 1
|
||||||
|
VSWITCH_MAX_CORES = 8
|
||||||
|
|
||||||
|
|
||||||
|
class CpuProfile(object):
|
||||||
|
class CpuConfigure(object):
|
||||||
|
def __init__(self):
|
||||||
|
self.platform = 0
|
||||||
|
self.vswitch = 0
|
||||||
|
self.shared = 0
|
||||||
|
self.vms = 0
|
||||||
|
self.numa_node = 0
|
||||||
|
|
||||||
|
# cpus is a list of cpu sorted by numa_node, core and thread
|
||||||
|
# if not, provide a node list sorted by numa_node
|
||||||
|
# (id might not be reliable)
|
||||||
|
def __init__(self, cpus, nodes=None):
|
||||||
|
if nodes is not None:
|
||||||
|
cpus = CpuProfile.sort_cpu_by_numa_node(cpus, nodes)
|
||||||
|
cores = []
|
||||||
|
|
||||||
|
self.number_of_cpu = 0
|
||||||
|
self.cores_per_cpu = 0
|
||||||
|
self.hyper_thread = False
|
||||||
|
self.processors = []
|
||||||
|
cur_processor = None
|
||||||
|
|
||||||
|
for cpu in cpus:
|
||||||
|
key = '{0}-{1}'.format(cpu.numa_node, cpu.core)
|
||||||
|
if key not in cores:
|
||||||
|
cores.append(key)
|
||||||
|
else:
|
||||||
|
self.hyper_thread = True
|
||||||
|
continue
|
||||||
|
|
||||||
|
if (cur_processor is None or
|
||||||
|
cur_processor.numa_node != cpu.numa_node):
|
||||||
|
cur_processor = CpuProfile.CpuConfigure()
|
||||||
|
cur_processor.numa_node = cpu.numa_node
|
||||||
|
self.processors.append(cur_processor)
|
||||||
|
|
||||||
|
if cpu.allocated_function == constants.PLATFORM_FUNCTION:
|
||||||
|
cur_processor.platform += 1
|
||||||
|
elif cpu.allocated_function == constants.VSWITCH_FUNCTION:
|
||||||
|
cur_processor.vswitch += 1
|
||||||
|
elif cpu.allocated_function == constants.SHARED_FUNCTION:
|
||||||
|
cur_processor.shared += 1
|
||||||
|
elif cpu.allocated_function == constants.VM_FUNCTION:
|
||||||
|
cur_processor.vms += 1
|
||||||
|
|
||||||
|
self.number_of_cpu = len(self.processors)
|
||||||
|
self.cores_per_cpu = len(cores) / self.number_of_cpu
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def sort_cpu_by_numa_node(cpus, nodes):
|
||||||
|
newlist = []
|
||||||
|
for node in nodes:
|
||||||
|
for cpu in cpus:
|
||||||
|
if cpu.node_id == node.id:
|
||||||
|
cpu.numa_node = node.numa_node
|
||||||
|
newlist.append(cpu)
|
||||||
|
return newlist
|
||||||
|
|
||||||
|
|
||||||
|
class HostCpuProfile(CpuProfile):
|
||||||
|
def __init__(self, subfunctions, cpus, nodes=None):
|
||||||
|
super(HostCpuProfile, self).__init__(cpus, nodes)
|
||||||
|
self.subfunctions = subfunctions
|
||||||
|
|
||||||
|
# see if a cpu profile is applicable to this host
|
||||||
|
def profile_applicable(self, profile):
|
||||||
|
if self.number_of_cpu == profile.number_of_cpu and \
|
||||||
|
self.cores_per_cpu == profile.cores_per_cpu:
|
||||||
|
return self.check_profile_core_functions(profile)
|
||||||
|
return False # Profile is not applicable to host
|
||||||
|
|
||||||
|
def check_profile_core_functions(self, profile):
|
||||||
|
platform_cores = 0
|
||||||
|
vswitch_cores = 0
|
||||||
|
shared_cores = 0
|
||||||
|
vm_cores = 0
|
||||||
|
for cpu in profile.processors:
|
||||||
|
platform_cores += cpu.platform
|
||||||
|
vswitch_cores += cpu.vswitch
|
||||||
|
shared_cores += cpu.shared
|
||||||
|
vm_cores += cpu.vms
|
||||||
|
|
||||||
|
error_string = ""
|
||||||
|
if platform_cores == 0:
|
||||||
|
error_string = "There must be at least one core for %s." % \
|
||||||
|
constants.PLATFORM_FUNCTION
|
||||||
|
elif k_host.COMPUTE in self.subfunctions and vswitch_cores == 0:
|
||||||
|
error_string = "There must be at least one core for %s." % \
|
||||||
|
constants.VSWITCH_FUNCTION
|
||||||
|
elif k_host.COMPUTE in self.subfunctions and vm_cores == 0:
|
||||||
|
error_string = "There must be at least one core for %s." % \
|
||||||
|
constants.VM_FUNCTION
|
||||||
|
return error_string
|
||||||
|
|
||||||
|
|
||||||
|
def lookup_function(s):
|
||||||
|
for f in CORE_FUNCTIONS:
|
||||||
|
if s.lower() == f.lower():
|
||||||
|
return f
|
||||||
|
return s
|
||||||
|
|
||||||
|
|
||||||
|
def check_profile_core_functions(personality, profile):
|
||||||
|
|
||||||
|
platform_cores = 0
|
||||||
|
vswitch_cores = 0
|
||||||
|
shared_cores = 0
|
||||||
|
vm_cores = 0
|
||||||
|
for cpu in profile.processors:
|
||||||
|
platform_cores += cpu.platform
|
||||||
|
vswitch_cores += cpu.vswitch
|
||||||
|
shared_cores += cpu.shared
|
||||||
|
vm_cores += cpu.vms
|
||||||
|
|
||||||
|
error_string = ""
|
||||||
|
if platform_cores == 0:
|
||||||
|
error_string = "There must be at least one core for %s." % \
|
||||||
|
constants.PLATFORM_FUNCTION
|
||||||
|
elif k_host.COMPUTE in personality and vswitch_cores == 0:
|
||||||
|
error_string = "There must be at least one core for %s." % \
|
||||||
|
constants.VSWITCH_FUNCTION
|
||||||
|
elif k_host.COMPUTE in personality and vm_cores == 0:
|
||||||
|
error_string = "There must be at least one core for %s." % \
|
||||||
|
constants.VM_FUNCTION
|
||||||
|
return error_string
|
||||||
|
|
||||||
|
|
||||||
|
def check_core_functions(personality, icpus):
|
||||||
|
platform_cores = 0
|
||||||
|
vswitch_cores = 0
|
||||||
|
shared_cores = 0
|
||||||
|
vm_cores = 0
|
||||||
|
for cpu in icpus:
|
||||||
|
allocated_function = cpu.allocated_function
|
||||||
|
if allocated_function == constants.PLATFORM_FUNCTION:
|
||||||
|
platform_cores += 1
|
||||||
|
elif allocated_function == constants.VSWITCH_FUNCTION:
|
||||||
|
vswitch_cores += 1
|
||||||
|
elif allocated_function == constants.SHARED_FUNCTION:
|
||||||
|
shared_cores += 1
|
||||||
|
elif allocated_function == constants.VM_FUNCTION:
|
||||||
|
vm_cores += 1
|
||||||
|
|
||||||
|
error_string = ""
|
||||||
|
if platform_cores == 0:
|
||||||
|
error_string = "There must be at least one core for %s." % \
|
||||||
|
constants.PLATFORM_FUNCTION
|
||||||
|
elif k_host.COMPUTE in personality and vswitch_cores == 0:
|
||||||
|
error_string = "There must be at least one core for %s." % \
|
||||||
|
constants.VSWITCH_FUNCTION
|
||||||
|
elif k_host.COMPUTE in personality and vm_cores == 0:
|
||||||
|
error_string = "There must be at least one core for %s." % \
|
||||||
|
constants.VM_FUNCTION
|
||||||
|
return error_string
|
||||||
|
|
||||||
|
|
||||||
|
def get_default_function(host):
|
||||||
|
"""Return the default function to be assigned to cpus on this host"""
|
||||||
|
if k_host.COMPUTE in host.subfunctions:
|
||||||
|
return constants.VM_FUNCTION
|
||||||
|
return constants.PLATFORM_FUNCTION
|
||||||
|
|
||||||
|
|
||||||
|
def get_cpu_function(host, cpu):
|
||||||
|
"""Return the function that is assigned to the specified cpu"""
|
||||||
|
for s in range(0, len(host.nodes)):
|
||||||
|
functions = host.cpu_functions[s]
|
||||||
|
for f in CORE_FUNCTIONS:
|
||||||
|
if cpu.cpu in functions[f]:
|
||||||
|
return f
|
||||||
|
return constants.NO_FUNCTION
|
||||||
|
|
||||||
|
|
||||||
|
def get_cpu_counts(host):
|
||||||
|
"""Return the CPU counts for this host by socket and function."""
|
||||||
|
counts = {}
|
||||||
|
for s in range(0, len(host.nodes)):
|
||||||
|
counts[s] = {}
|
||||||
|
for f in CORE_FUNCTIONS:
|
||||||
|
counts[s][f] = len(host.cpu_functions[s][f])
|
||||||
|
return counts
|
||||||
|
|
||||||
|
|
||||||
|
def init_cpu_counts(host):
|
||||||
|
"""Create empty data structures to track CPU assignments by socket and
|
||||||
|
function.
|
||||||
|
"""
|
||||||
|
host.cpu_functions = {}
|
||||||
|
host.cpu_lists = {}
|
||||||
|
for s in range(0, len(host.nodes)):
|
||||||
|
host.cpu_functions[s] = {}
|
||||||
|
for f in CORE_FUNCTIONS:
|
||||||
|
host.cpu_functions[s][f] = []
|
||||||
|
host.cpu_lists[s] = []
|
||||||
|
|
||||||
|
|
||||||
|
def _sort_by_coreid(cpu):
|
||||||
|
"""Sort a list of cpu database objects such that threads of the same core
|
||||||
|
are adjacent in the list with the lowest thread number appearing first.
|
||||||
|
"""
|
||||||
|
return (int(cpu.core), int(cpu.thread))
|
||||||
|
|
||||||
|
|
||||||
|
def restructure_host_cpu_data(host):
|
||||||
|
"""Reorganize the cpu list by socket and function so that it can more
|
||||||
|
easily be consumed by other utilities.
|
||||||
|
"""
|
||||||
|
init_cpu_counts(host)
|
||||||
|
host.sockets = len(host.nodes or [])
|
||||||
|
host.hyperthreading = False
|
||||||
|
host.physical_cores = 0
|
||||||
|
if not host.cpus:
|
||||||
|
return
|
||||||
|
host.cpu_model = host.cpus[0].cpu_model
|
||||||
|
cpu_list = sorted(host.cpus, key=_sort_by_coreid)
|
||||||
|
for cpu in cpu_list:
|
||||||
|
inode = pecan.request.dbapi.inode_get(inode_id=cpu.node_id)
|
||||||
|
cpu.numa_node = inode.numa_node
|
||||||
|
if cpu.thread == 0:
|
||||||
|
host.physical_cores += 1
|
||||||
|
elif cpu.thread > 0:
|
||||||
|
host.hyperthreading = True
|
||||||
|
function = cpu.allocated_function or get_default_function(host)
|
||||||
|
host.cpu_functions[cpu.numa_node][function].append(int(cpu.cpu))
|
||||||
|
host.cpu_lists[cpu.numa_node].append(int(cpu.cpu))
|
||||||
|
|
||||||
|
|
||||||
|
def check_core_allocations(host, cpu_counts, func):
|
||||||
|
"""Check that minimum and maximum core values are respected."""
|
||||||
|
total_platform_cores = 0
|
||||||
|
total_vswitch_cores = 0
|
||||||
|
total_shared_cores = 0
|
||||||
|
for s in range(0, len(host.nodes)):
|
||||||
|
available_cores = len(host.cpu_lists[s])
|
||||||
|
platform_cores = cpu_counts[s][constants.PLATFORM_FUNCTION]
|
||||||
|
vswitch_cores = cpu_counts[s][constants.VSWITCH_FUNCTION]
|
||||||
|
shared_cores = cpu_counts[s][constants.SHARED_FUNCTION]
|
||||||
|
requested_cores = platform_cores + vswitch_cores + shared_cores
|
||||||
|
if requested_cores > available_cores:
|
||||||
|
return ("More total logical cores requested than present on "
|
||||||
|
"'Processor %s' (%s cores)." % (s, available_cores))
|
||||||
|
total_platform_cores += platform_cores
|
||||||
|
total_vswitch_cores += vswitch_cores
|
||||||
|
total_shared_cores += shared_cores
|
||||||
|
if func.lower() == constants.PLATFORM_FUNCTION.lower():
|
||||||
|
if ((k_host.CONTROLLER in host.subfunctions) and
|
||||||
|
(k_host.COMPUTE in host.subfunctions)):
|
||||||
|
if total_platform_cores < 2:
|
||||||
|
return "%s must have at least two cores." % \
|
||||||
|
constants.PLATFORM_FUNCTION
|
||||||
|
elif total_platform_cores == 0:
|
||||||
|
return "%s must have at least one core." % \
|
||||||
|
constants.PLATFORM_FUNCTION
|
||||||
|
if k_host.COMPUTE in (host.subfunctions or host.personality):
|
||||||
|
if func.lower() == constants.VSWITCH_FUNCTION.lower():
|
||||||
|
if host.hyperthreading:
|
||||||
|
total_physical_cores = total_vswitch_cores / 2
|
||||||
|
else:
|
||||||
|
total_physical_cores = total_vswitch_cores
|
||||||
|
if total_physical_cores < VSWITCH_MIN_CORES:
|
||||||
|
return ("The %s function must have at least %s core(s)." %
|
||||||
|
(constants.VSWITCH_FUNCTION.lower(),
|
||||||
|
VSWITCH_MIN_CORES))
|
||||||
|
elif total_physical_cores > VSWITCH_MAX_CORES:
|
||||||
|
return ("The %s function can only be assigned up to %s cores."
|
||||||
|
% (constants.VSWITCH_FUNCTION.lower(),
|
||||||
|
VSWITCH_MAX_CORES))
|
||||||
|
reserved_for_vms = \
|
||||||
|
len(host.cpus) - total_platform_cores - total_vswitch_cores
|
||||||
|
if reserved_for_vms <= 0:
|
||||||
|
return "There must be at least one unused core for %s." % \
|
||||||
|
constants. VM_FUNCTION
|
||||||
|
else:
|
||||||
|
if total_platform_cores != len(host.cpus):
|
||||||
|
return "All logical cores must be reserved for platform use"
|
||||||
|
return ""
|
||||||
|
|
||||||
|
|
||||||
|
def update_core_allocations(host, cpu_counts):
|
||||||
|
"""Update the per socket/function cpu list based on the newly requested
|
||||||
|
counts.
|
||||||
|
"""
|
||||||
|
# Remove any previous assignments
|
||||||
|
for s in range(0, len(host.nodes)):
|
||||||
|
for f in CORE_FUNCTIONS:
|
||||||
|
host.cpu_functions[s][f] = []
|
||||||
|
# Set new assignments
|
||||||
|
for s in range(0, len(host.nodes)):
|
||||||
|
cpu_list = host.cpu_lists[s] if s in host.cpu_lists else []
|
||||||
|
# Reserve for the platform first
|
||||||
|
for i in range(0, cpu_counts[s][constants.PLATFORM_FUNCTION]):
|
||||||
|
host.cpu_functions[s][constants.PLATFORM_FUNCTION].append(
|
||||||
|
cpu_list.pop(0))
|
||||||
|
# Reserve for the vswitch next
|
||||||
|
for i in range(0, cpu_counts[s][constants.VSWITCH_FUNCTION]):
|
||||||
|
host.cpu_functions[s][constants.VSWITCH_FUNCTION].append(
|
||||||
|
cpu_list.pop(0))
|
||||||
|
# Reserve for the shared next
|
||||||
|
for i in range(0, cpu_counts[s][constants.SHARED_FUNCTION]):
|
||||||
|
host.cpu_functions[s][constants.SHARED_FUNCTION].append(
|
||||||
|
cpu_list.pop(0))
|
||||||
|
# Assign the remaining cpus to the default function for this host
|
||||||
|
host.cpu_functions[s][get_default_function(host)] += cpu_list
|
||||||
|
return
|
@ -0,0 +1,310 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2013 UnitedStack Inc.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
# Copyright (c) 2013-2018 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
import pecan
|
||||||
|
from pecan import rest
|
||||||
|
|
||||||
|
import wsme
|
||||||
|
from wsme import types as wtypes
|
||||||
|
import wsmeext.pecan as wsme_pecan
|
||||||
|
|
||||||
|
from inventory.api.controllers.v1 import base
|
||||||
|
from inventory.api.controllers.v1 import collection
|
||||||
|
from inventory.api.controllers.v1 import link
|
||||||
|
from inventory.api.controllers.v1 import types
|
||||||
|
from inventory.api.controllers.v1 import utils
|
||||||
|
from inventory.common import exception
|
||||||
|
from inventory.common.i18n import _
|
||||||
|
from inventory import objects
|
||||||
|
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class EthernetPortPatchType(types.JsonPatchType):
|
||||||
|
@staticmethod
|
||||||
|
def mandatory_attrs():
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
class EthernetPort(base.APIBase):
|
||||||
|
"""API representation of an Ethernet port
|
||||||
|
|
||||||
|
This class enforces type checking and value constraints, and converts
|
||||||
|
between the internal object model and the API representation of an
|
||||||
|
Ethernet port.
|
||||||
|
"""
|
||||||
|
|
||||||
|
uuid = types.uuid
|
||||||
|
"Unique UUID for this port"
|
||||||
|
|
||||||
|
type = wtypes.text
|
||||||
|
"Represent the type of port"
|
||||||
|
|
||||||
|
name = wtypes.text
|
||||||
|
"Represent the name of the port. Unique per host"
|
||||||
|
|
||||||
|
namedisplay = wtypes.text
|
||||||
|
"Represent the display name of the port. Unique per host"
|
||||||
|
|
||||||
|
pciaddr = wtypes.text
|
||||||
|
"Represent the pci address of the port"
|
||||||
|
|
||||||
|
dev_id = int
|
||||||
|
"The unique identifier of PCI device"
|
||||||
|
|
||||||
|
pclass = wtypes.text
|
||||||
|
"Represent the pci class of the port"
|
||||||
|
|
||||||
|
pvendor = wtypes.text
|
||||||
|
"Represent the pci vendor of the port"
|
||||||
|
|
||||||
|
pdevice = wtypes.text
|
||||||
|
"Represent the pci device of the port"
|
||||||
|
|
||||||
|
psvendor = wtypes.text
|
||||||
|
"Represent the pci svendor of the port"
|
||||||
|
|
||||||
|
psdevice = wtypes.text
|
||||||
|
"Represent the pci sdevice of the port"
|
||||||
|
|
||||||
|
numa_node = int
|
||||||
|
"Represent the numa node or zone sdevice of the port"
|
||||||
|
|
||||||
|
sriov_totalvfs = int
|
||||||
|
"The total number of available SR-IOV VFs"
|
||||||
|
|
||||||
|
sriov_numvfs = int
|
||||||
|
"The number of configured SR-IOV VFs"
|
||||||
|
|
||||||
|
sriov_vfs_pci_address = wtypes.text
|
||||||
|
"The PCI Addresses of the VFs"
|
||||||
|
|
||||||
|
driver = wtypes.text
|
||||||
|
"The kernel driver for this device"
|
||||||
|
|
||||||
|
mac = wsme.wsattr(types.macaddress, mandatory=False)
|
||||||
|
"Represent the MAC Address of the port"
|
||||||
|
|
||||||
|
mtu = int
|
||||||
|
"Represent the MTU size (bytes) of the port"
|
||||||
|
|
||||||
|
speed = int
|
||||||
|
"Represent the speed (MBytes/sec) of the port"
|
||||||
|
|
||||||
|
link_mode = int
|
||||||
|
"Represent the link mode of the port"
|
||||||
|
|
||||||
|
duplex = wtypes.text
|
||||||
|
"Represent the duplex mode of the port"
|
||||||
|
|
||||||
|
autoneg = wtypes.text
|
||||||
|
"Represent the auto-negotiation mode of the port"
|
||||||
|
|
||||||
|
bootp = wtypes.text
|
||||||
|
"Represent the bootp port of the host"
|
||||||
|
|
||||||
|
capabilities = {wtypes.text: utils.ValidTypes(wtypes.text,
|
||||||
|
six.integer_types)}
|
||||||
|
"Represent meta data of the port"
|
||||||
|
|
||||||
|
host_id = int
|
||||||
|
"Represent the host_id the port belongs to"
|
||||||
|
|
||||||
|
bootif = wtypes.text
|
||||||
|
"Represent whether the port is a boot port"
|
||||||
|
|
||||||
|
dpdksupport = bool
|
||||||
|
"Represent whether or not the port supports DPDK acceleration"
|
||||||
|
|
||||||
|
host_uuid = types.uuid
|
||||||
|
"Represent the UUID of the host the port belongs to"
|
||||||
|
|
||||||
|
node_uuid = types.uuid
|
||||||
|
"Represent the UUID of the node the port belongs to"
|
||||||
|
|
||||||
|
links = [link.Link]
|
||||||
|
"Represent a list containing a self link and associated port links"
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self.fields = objects.EthernetPort.fields.keys()
|
||||||
|
for k in self.fields:
|
||||||
|
setattr(self, k, kwargs.get(k))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def convert_with_links(cls, rpc_port, expand=True):
|
||||||
|
port = EthernetPort(**rpc_port.as_dict())
|
||||||
|
if not expand:
|
||||||
|
port.unset_fields_except(['uuid', 'host_id', 'node_id',
|
||||||
|
'type', 'name',
|
||||||
|
'namedisplay', 'pciaddr', 'dev_id',
|
||||||
|
'pclass', 'pvendor', 'pdevice',
|
||||||
|
'psvendor', 'psdevice', 'numa_node',
|
||||||
|
'mac', 'sriov_totalvfs', 'sriov_numvfs',
|
||||||
|
'sriov_vfs_pci_address', 'driver',
|
||||||
|
'mtu', 'speed', 'link_mode',
|
||||||
|
'duplex', 'autoneg', 'bootp',
|
||||||
|
'capabilities',
|
||||||
|
'host_uuid',
|
||||||
|
'node_uuid', 'dpdksupport',
|
||||||
|
'created_at', 'updated_at'])
|
||||||
|
|
||||||
|
# never expose the id attribute
|
||||||
|
port.host_id = wtypes.Unset
|
||||||
|
port.node_id = wtypes.Unset
|
||||||
|
|
||||||
|
port.links = [link.Link.make_link('self', pecan.request.host_url,
|
||||||
|
'ethernet_ports', port.uuid),
|
||||||
|
link.Link.make_link('bookmark',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'ethernet_ports', port.uuid,
|
||||||
|
bookmark=True)
|
||||||
|
]
|
||||||
|
return port
|
||||||
|
|
||||||
|
|
||||||
|
class EthernetPortCollection(collection.Collection):
|
||||||
|
"""API representation of a collection of EthernetPort objects."""
|
||||||
|
|
||||||
|
ethernet_ports = [EthernetPort]
|
||||||
|
"A list containing EthernetPort objects"
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self._type = 'ethernet_ports'
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def convert_with_links(cls, rpc_ports, limit, url=None,
|
||||||
|
expand=False, **kwargs):
|
||||||
|
collection = EthernetPortCollection()
|
||||||
|
collection.ethernet_ports = [EthernetPort.convert_with_links(p, expand)
|
||||||
|
for p in rpc_ports]
|
||||||
|
collection.next = collection.get_next(limit, url=url, **kwargs)
|
||||||
|
return collection
|
||||||
|
|
||||||
|
|
||||||
|
LOCK_NAME = 'EthernetPortController'
|
||||||
|
|
||||||
|
|
||||||
|
class EthernetPortController(rest.RestController):
|
||||||
|
"""REST controller for EthernetPorts."""
|
||||||
|
|
||||||
|
_custom_actions = {
|
||||||
|
'detail': ['GET'],
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(self, from_hosts=False, from_node=False):
|
||||||
|
self._from_hosts = from_hosts
|
||||||
|
self._from_node = from_node
|
||||||
|
|
||||||
|
def _get_ports_collection(self, uuid, node_uuid,
|
||||||
|
marker, limit, sort_key, sort_dir,
|
||||||
|
expand=False, resource_url=None):
|
||||||
|
|
||||||
|
if self._from_hosts and not uuid:
|
||||||
|
raise exception.InvalidParameterValue(_(
|
||||||
|
"Host id not specified."))
|
||||||
|
|
||||||
|
if self._from_node and not uuid:
|
||||||
|
raise exception.InvalidParameterValue(_(
|
||||||
|
"node id not specified."))
|
||||||
|
|
||||||
|
limit = utils.validate_limit(limit)
|
||||||
|
sort_dir = utils.validate_sort_dir(sort_dir)
|
||||||
|
|
||||||
|
marker_obj = None
|
||||||
|
if marker:
|
||||||
|
marker_obj = objects.EthernetPort.get_by_uuid(
|
||||||
|
pecan.request.context,
|
||||||
|
marker)
|
||||||
|
|
||||||
|
if self._from_hosts:
|
||||||
|
ports = objects.EthernetPort.get_by_host(
|
||||||
|
pecan.request.context,
|
||||||
|
uuid, limit,
|
||||||
|
marker=marker_obj,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
elif self._from_node:
|
||||||
|
ports = objects.EthernetPort.get_by_numa_node(
|
||||||
|
pecan.request.context,
|
||||||
|
uuid, limit,
|
||||||
|
marker=marker_obj,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
else:
|
||||||
|
if uuid:
|
||||||
|
ports = objects.EthernetPort.get_by_host(
|
||||||
|
pecan.request.context,
|
||||||
|
uuid, limit,
|
||||||
|
marker=marker_obj,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
else:
|
||||||
|
ports = objects.EthernetPort.list(
|
||||||
|
pecan.request.context,
|
||||||
|
limit, marker=marker_obj,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
|
||||||
|
return EthernetPortCollection.convert_with_links(
|
||||||
|
ports, limit, url=resource_url,
|
||||||
|
expand=expand,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(EthernetPortCollection, types.uuid, types.uuid,
|
||||||
|
types.uuid, int, wtypes.text, wtypes.text)
|
||||||
|
def get_all(self, uuid=None, node_uuid=None,
|
||||||
|
marker=None, limit=None, sort_key='id', sort_dir='asc'):
|
||||||
|
"""Retrieve a list of ports."""
|
||||||
|
|
||||||
|
return self._get_ports_collection(uuid,
|
||||||
|
node_uuid,
|
||||||
|
marker, limit, sort_key, sort_dir)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(EthernetPortCollection, types.uuid, types.uuid, int,
|
||||||
|
wtypes.text, wtypes.text)
|
||||||
|
def detail(self, uuid=None, marker=None, limit=None,
|
||||||
|
sort_key='id', sort_dir='asc'):
|
||||||
|
"""Retrieve a list of ports with detail."""
|
||||||
|
|
||||||
|
parent = pecan.request.path.split('/')[:-1][-1]
|
||||||
|
if parent != "ethernet_ports":
|
||||||
|
raise exception.HTTPNotFound
|
||||||
|
|
||||||
|
expand = True
|
||||||
|
resource_url = '/'.join(['ethernet_ports', 'detail'])
|
||||||
|
return self._get_ports_collection(uuid, marker, limit, sort_key,
|
||||||
|
sort_dir, expand, resource_url)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(EthernetPort, types.uuid)
|
||||||
|
def get_one(self, port_uuid):
|
||||||
|
"""Retrieve information about the given port."""
|
||||||
|
if self._from_hosts:
|
||||||
|
raise exception.OperationNotPermitted
|
||||||
|
|
||||||
|
rpc_port = objects.EthernetPort.get_by_uuid(
|
||||||
|
pecan.request.context, port_uuid)
|
||||||
|
return EthernetPort.convert_with_links(rpc_port)
|
3585
inventory/inventory/inventory/api/controllers/v1/host.py
Normal file
3585
inventory/inventory/inventory/api/controllers/v1/host.py
Normal file
File diff suppressed because it is too large
Load Diff
58
inventory/inventory/inventory/api/controllers/v1/link.py
Normal file
58
inventory/inventory/inventory/api/controllers/v1/link.py
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
# Copyright 2013 Red Hat, Inc.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import pecan
|
||||||
|
from wsme import types as wtypes
|
||||||
|
|
||||||
|
from inventory.api.controllers.v1 import base
|
||||||
|
|
||||||
|
|
||||||
|
def build_url(resource, resource_args, bookmark=False, base_url=None):
|
||||||
|
if base_url is None:
|
||||||
|
base_url = pecan.request.public_url
|
||||||
|
|
||||||
|
template = '%(url)s/%(res)s' if bookmark else '%(url)s/v1/%(res)s'
|
||||||
|
# FIXME(lucasagomes): I'm getting a 404 when doing a GET on
|
||||||
|
# a nested resource that the URL ends with a '/'.
|
||||||
|
# https://groups.google.com/forum/#!topic/pecan-dev/QfSeviLg5qs
|
||||||
|
template += '%(args)s' if resource_args.startswith('?') else '/%(args)s'
|
||||||
|
return template % {'url': base_url, 'res': resource, 'args': resource_args}
|
||||||
|
|
||||||
|
|
||||||
|
class Link(base.APIBase):
|
||||||
|
"""A link representation."""
|
||||||
|
|
||||||
|
href = wtypes.text
|
||||||
|
"""The url of a link."""
|
||||||
|
|
||||||
|
rel = wtypes.text
|
||||||
|
"""The name of a link."""
|
||||||
|
|
||||||
|
type = wtypes.text
|
||||||
|
"""Indicates the type of document/link."""
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def make_link(rel_name, url, resource, resource_args,
|
||||||
|
bookmark=False, type=wtypes.Unset):
|
||||||
|
href = build_url(resource, resource_args,
|
||||||
|
bookmark=bookmark, base_url=url)
|
||||||
|
return Link(href=href, rel=rel_name, type=type)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def sample(cls):
|
||||||
|
sample = cls(href="http://localhost:18002"
|
||||||
|
"eeaca217-e7d8-47b4-bb41-3f99f20ead81",
|
||||||
|
rel="bookmark")
|
||||||
|
return sample
|
366
inventory/inventory/inventory/api/controllers/v1/lldp_agent.py
Normal file
366
inventory/inventory/inventory/api/controllers/v1/lldp_agent.py
Normal file
@ -0,0 +1,366 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2013 UnitedStack Inc.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
# Copyright (c) 2016 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
import jsonpatch
|
||||||
|
|
||||||
|
import pecan
|
||||||
|
from pecan import rest
|
||||||
|
|
||||||
|
import wsme
|
||||||
|
from wsme import types as wtypes
|
||||||
|
import wsmeext.pecan as wsme_pecan
|
||||||
|
|
||||||
|
from inventory.api.controllers.v1 import base
|
||||||
|
from inventory.api.controllers.v1 import collection
|
||||||
|
from inventory.api.controllers.v1 import link
|
||||||
|
from inventory.api.controllers.v1 import lldp_tlv
|
||||||
|
from inventory.api.controllers.v1 import types
|
||||||
|
from inventory.api.controllers.v1 import utils
|
||||||
|
from inventory.common import exception
|
||||||
|
from inventory.common.i18n import _
|
||||||
|
from inventory.common import k_lldp
|
||||||
|
from inventory.common import utils as cutils
|
||||||
|
from inventory import objects
|
||||||
|
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class LLDPAgentPatchType(types.JsonPatchType):
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def mandatory_attrs():
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
class LLDPAgent(base.APIBase):
|
||||||
|
"""API representation of an LLDP Agent
|
||||||
|
|
||||||
|
This class enforces type checking and value constraints, and converts
|
||||||
|
between the internal object model and the API representation of an
|
||||||
|
LLDP agent.
|
||||||
|
"""
|
||||||
|
|
||||||
|
uuid = types.uuid
|
||||||
|
"Unique UUID for this port"
|
||||||
|
|
||||||
|
status = wtypes.text
|
||||||
|
"Represent the status of the lldp agent"
|
||||||
|
|
||||||
|
host_id = int
|
||||||
|
"Represent the host_id the lldp agent belongs to"
|
||||||
|
|
||||||
|
port_id = int
|
||||||
|
"Represent the port_id the lldp agent belongs to"
|
||||||
|
|
||||||
|
host_uuid = types.uuid
|
||||||
|
"Represent the UUID of the host the lldp agent belongs to"
|
||||||
|
|
||||||
|
port_uuid = types.uuid
|
||||||
|
"Represent the UUID of the port the lldp agent belongs to"
|
||||||
|
|
||||||
|
port_name = wtypes.text
|
||||||
|
"Represent the name of the port the lldp neighbour belongs to"
|
||||||
|
|
||||||
|
port_namedisplay = wtypes.text
|
||||||
|
"Represent the display name of the port. Unique per host"
|
||||||
|
|
||||||
|
links = [link.Link]
|
||||||
|
"Represent a list containing a self link and associated lldp agent links"
|
||||||
|
|
||||||
|
tlvs = [link.Link]
|
||||||
|
"Links to the collection of LldpNeighbours on this ihost"
|
||||||
|
|
||||||
|
chassis_id = wtypes.text
|
||||||
|
"Represent the status of the lldp agent"
|
||||||
|
|
||||||
|
port_identifier = wtypes.text
|
||||||
|
"Represent the LLDP port id of the lldp agent"
|
||||||
|
|
||||||
|
port_description = wtypes.text
|
||||||
|
"Represent the port description of the lldp agent"
|
||||||
|
|
||||||
|
system_description = wtypes.text
|
||||||
|
"Represent the status of the lldp agent"
|
||||||
|
|
||||||
|
system_name = wtypes.text
|
||||||
|
"Represent the status of the lldp agent"
|
||||||
|
|
||||||
|
system_capabilities = wtypes.text
|
||||||
|
"Represent the status of the lldp agent"
|
||||||
|
|
||||||
|
management_address = wtypes.text
|
||||||
|
"Represent the status of the lldp agent"
|
||||||
|
|
||||||
|
ttl = wtypes.text
|
||||||
|
"Represent the time-to-live of the lldp agent"
|
||||||
|
|
||||||
|
dot1_lag = wtypes.text
|
||||||
|
"Represent the 802.1 link aggregation status of the lldp agent"
|
||||||
|
|
||||||
|
dot1_vlan_names = wtypes.text
|
||||||
|
"Represent the 802.1 vlan names of the lldp agent"
|
||||||
|
|
||||||
|
dot3_mac_status = wtypes.text
|
||||||
|
"Represent the 802.3 MAC/PHY status of the lldp agent"
|
||||||
|
|
||||||
|
dot3_max_frame = wtypes.text
|
||||||
|
"Represent the 802.3 maximum frame size of the lldp agent"
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self.fields = objects.LLDPAgent.fields.keys()
|
||||||
|
for k in self.fields:
|
||||||
|
setattr(self, k, kwargs.get(k))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def convert_with_links(cls, rpc_lldp_agent, expand=True):
|
||||||
|
lldp_agent = LLDPAgent(**rpc_lldp_agent.as_dict())
|
||||||
|
if not expand:
|
||||||
|
lldp_agent.unset_fields_except([
|
||||||
|
'uuid', 'host_id', 'port_id', 'status', 'host_uuid',
|
||||||
|
'port_uuid', 'port_name', 'port_namedisplay',
|
||||||
|
'created_at', 'updated_at',
|
||||||
|
k_lldp.LLDP_TLV_TYPE_CHASSIS_ID,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_PORT_ID,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_TTL,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_SYSTEM_NAME,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_SYSTEM_DESC,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_SYSTEM_CAP,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_MGMT_ADDR,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_PORT_DESC,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT1_LAG,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT1_VLAN_NAMES,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT3_MAC_STATUS,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT3_MAX_FRAME])
|
||||||
|
|
||||||
|
# never expose the id attribute
|
||||||
|
lldp_agent.host_id = wtypes.Unset
|
||||||
|
lldp_agent.port_id = wtypes.Unset
|
||||||
|
|
||||||
|
lldp_agent.links = [
|
||||||
|
link.Link.make_link('self', pecan.request.host_url,
|
||||||
|
'lldp_agents', lldp_agent.uuid),
|
||||||
|
link.Link.make_link('bookmark', pecan.request.host_url,
|
||||||
|
'lldp_agents', lldp_agent.uuid,
|
||||||
|
bookmark=True)]
|
||||||
|
|
||||||
|
if expand:
|
||||||
|
lldp_agent.tlvs = [
|
||||||
|
link.Link.make_link('self',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'lldp_agents',
|
||||||
|
lldp_agent.uuid + "/tlvs"),
|
||||||
|
link.Link.make_link('bookmark',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'lldp_agents',
|
||||||
|
lldp_agent.uuid + "/tlvs",
|
||||||
|
bookmark=True)]
|
||||||
|
|
||||||
|
return lldp_agent
|
||||||
|
|
||||||
|
|
||||||
|
class LLDPAgentCollection(collection.Collection):
|
||||||
|
"""API representation of a collection of LldpAgent objects."""
|
||||||
|
|
||||||
|
lldp_agents = [LLDPAgent]
|
||||||
|
"A list containing LldpAgent objects"
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self._type = 'lldp_agents'
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def convert_with_links(cls, rpc_lldp_agents, limit, url=None,
|
||||||
|
expand=False, **kwargs):
|
||||||
|
collection = LLDPAgentCollection()
|
||||||
|
collection.lldp_agents = [LLDPAgent.convert_with_links(a, expand)
|
||||||
|
for a in rpc_lldp_agents]
|
||||||
|
collection.next = collection.get_next(limit, url=url, **kwargs)
|
||||||
|
return collection
|
||||||
|
|
||||||
|
|
||||||
|
LOCK_NAME = 'LLDPAgentController'
|
||||||
|
|
||||||
|
|
||||||
|
class LLDPAgentController(rest.RestController):
|
||||||
|
"""REST controller for LldpAgents."""
|
||||||
|
|
||||||
|
tlvs = lldp_tlv.LLDPTLVController(
|
||||||
|
from_lldp_agents=True)
|
||||||
|
"Expose tlvs as a sub-element of LldpAgents"
|
||||||
|
|
||||||
|
_custom_actions = {
|
||||||
|
'detail': ['GET'],
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(self, from_hosts=False, from_ports=False):
|
||||||
|
self._from_hosts = from_hosts
|
||||||
|
self._from_ports = from_ports
|
||||||
|
|
||||||
|
def _get_lldp_agents_collection(self, uuid,
|
||||||
|
marker, limit, sort_key, sort_dir,
|
||||||
|
expand=False, resource_url=None):
|
||||||
|
|
||||||
|
if self._from_hosts and not uuid:
|
||||||
|
raise exception.InvalidParameterValue(_("Host id not specified."))
|
||||||
|
|
||||||
|
if self._from_ports and not uuid:
|
||||||
|
raise exception.InvalidParameterValue(_("Port id not specified."))
|
||||||
|
|
||||||
|
limit = utils.validate_limit(limit)
|
||||||
|
sort_dir = utils.validate_sort_dir(sort_dir)
|
||||||
|
|
||||||
|
marker_obj = None
|
||||||
|
if marker:
|
||||||
|
marker_obj = objects.LLDPAgent.get_by_uuid(pecan.request.context,
|
||||||
|
marker)
|
||||||
|
|
||||||
|
if self._from_hosts:
|
||||||
|
agents = objects.LLDPAgent.get_by_host(
|
||||||
|
pecan.request.context,
|
||||||
|
uuid, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir)
|
||||||
|
|
||||||
|
elif self._from_ports:
|
||||||
|
agents = []
|
||||||
|
agent = objects.LLDPAgent.get_by_port(pecan.request.context, uuid)
|
||||||
|
agents.append(agent)
|
||||||
|
else:
|
||||||
|
agents = objects.LLDPAgent.list(
|
||||||
|
pecan.request.context,
|
||||||
|
limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir)
|
||||||
|
|
||||||
|
return LLDPAgentCollection.convert_with_links(agents, limit,
|
||||||
|
url=resource_url,
|
||||||
|
expand=expand,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(LLDPAgentCollection, types.uuid,
|
||||||
|
types.uuid, int, wtypes.text, wtypes.text)
|
||||||
|
def get_all(self, uuid=None,
|
||||||
|
marker=None, limit=None, sort_key='id', sort_dir='asc'):
|
||||||
|
"""Retrieve a list of lldp agents."""
|
||||||
|
return self._get_lldp_agents_collection(uuid, marker, limit, sort_key,
|
||||||
|
sort_dir)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(LLDPAgentCollection, types.uuid, types.uuid, int,
|
||||||
|
wtypes.text, wtypes.text)
|
||||||
|
def detail(self, uuid=None, marker=None, limit=None,
|
||||||
|
sort_key='id', sort_dir='asc'):
|
||||||
|
"""Retrieve a list of lldp_agents with detail."""
|
||||||
|
|
||||||
|
parent = pecan.request.path.split('/')[:-1][-1]
|
||||||
|
if parent != "lldp_agents":
|
||||||
|
raise exception.HTTPNotFound
|
||||||
|
|
||||||
|
expand = True
|
||||||
|
resource_url = '/'.join(['lldp_agents', 'detail'])
|
||||||
|
return self._get_lldp_agents_collection(uuid, marker, limit, sort_key,
|
||||||
|
sort_dir, expand, resource_url)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(LLDPAgent, types.uuid)
|
||||||
|
def get_one(self, port_uuid):
|
||||||
|
"""Retrieve information about the given lldp agent."""
|
||||||
|
if self._from_hosts:
|
||||||
|
raise exception.OperationNotPermitted
|
||||||
|
|
||||||
|
rpc_lldp_agent = objects.LLDPAgent.get_by_uuid(
|
||||||
|
pecan.request.context, port_uuid)
|
||||||
|
return LLDPAgent.convert_with_links(rpc_lldp_agent)
|
||||||
|
|
||||||
|
@cutils.synchronized(LOCK_NAME)
|
||||||
|
@wsme_pecan.wsexpose(LLDPAgent, body=LLDPAgent)
|
||||||
|
def post(self, agent):
|
||||||
|
"""Create a new lldp agent."""
|
||||||
|
if self._from_hosts:
|
||||||
|
raise exception.OperationNotPermitted
|
||||||
|
|
||||||
|
try:
|
||||||
|
host_uuid = agent.host_uuid
|
||||||
|
port_uuid = agent.port_uuid
|
||||||
|
new_agent = objects.LLDPAgent.create(
|
||||||
|
pecan.request.context,
|
||||||
|
port_uuid,
|
||||||
|
host_uuid,
|
||||||
|
agent.as_dict())
|
||||||
|
except exception.InventoryException as e:
|
||||||
|
LOG.exception(e)
|
||||||
|
raise wsme.exc.ClientSideError(_("Invalid data"))
|
||||||
|
return agent.convert_with_links(new_agent)
|
||||||
|
|
||||||
|
@cutils.synchronized(LOCK_NAME)
|
||||||
|
@wsme.validate(types.uuid, [LLDPAgentPatchType])
|
||||||
|
@wsme_pecan.wsexpose(LLDPAgent, types.uuid,
|
||||||
|
body=[LLDPAgentPatchType])
|
||||||
|
def patch(self, uuid, patch):
|
||||||
|
"""Update an existing lldp agent."""
|
||||||
|
if self._from_hosts:
|
||||||
|
raise exception.OperationNotPermitted
|
||||||
|
if self._from_ports:
|
||||||
|
raise exception.OperationNotPermitted
|
||||||
|
|
||||||
|
rpc_agent = objects.LLDPAgent.get_by_uuid(
|
||||||
|
pecan.request.context, uuid)
|
||||||
|
|
||||||
|
# replace ihost_uuid and port_uuid with corresponding
|
||||||
|
patch_obj = jsonpatch.JsonPatch(patch)
|
||||||
|
for p in patch_obj:
|
||||||
|
if p['path'] == '/host_uuid':
|
||||||
|
p['path'] = '/host_id'
|
||||||
|
host = objects.Host.get_by_uuid(pecan.request.context,
|
||||||
|
p['value'])
|
||||||
|
p['value'] = host.id
|
||||||
|
|
||||||
|
if p['path'] == '/port_uuid':
|
||||||
|
p['path'] = '/port_id'
|
||||||
|
try:
|
||||||
|
port = objects.Port.get_by_uuid(
|
||||||
|
pecan.request.context, p['value'])
|
||||||
|
p['value'] = port.id
|
||||||
|
except exception.InventoryException as e:
|
||||||
|
LOG.exception(e)
|
||||||
|
p['value'] = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
agent = LLDPAgent(**jsonpatch.apply_patch(rpc_agent.as_dict(),
|
||||||
|
patch_obj))
|
||||||
|
|
||||||
|
except utils.JSONPATCH_EXCEPTIONS as e:
|
||||||
|
raise exception.PatchError(patch=patch, reason=e)
|
||||||
|
|
||||||
|
# Update only the fields that have changed
|
||||||
|
for field in objects.LLDPAgent.fields:
|
||||||
|
if rpc_agent[field] != getattr(agent, field):
|
||||||
|
rpc_agent[field] = getattr(agent, field)
|
||||||
|
|
||||||
|
rpc_agent.save()
|
||||||
|
return LLDPAgent.convert_with_links(rpc_agent)
|
||||||
|
|
||||||
|
@cutils.synchronized(LOCK_NAME)
|
||||||
|
@wsme_pecan.wsexpose(None, types.uuid, status_code=204)
|
||||||
|
def delete(self, uuid):
|
||||||
|
"""Delete an lldp agent."""
|
||||||
|
if self._from_hosts:
|
||||||
|
raise exception.OperationNotPermitted
|
||||||
|
if self._from_ports:
|
||||||
|
raise exception.OperationNotPermitted
|
||||||
|
|
||||||
|
pecan.request.dbapi.lldp_agent_destroy(uuid)
|
@ -0,0 +1,390 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2013 UnitedStack Inc.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
# Copyright (c) 2016 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
import jsonpatch
|
||||||
|
|
||||||
|
import pecan
|
||||||
|
from pecan import rest
|
||||||
|
|
||||||
|
import wsme
|
||||||
|
from wsme import types as wtypes
|
||||||
|
import wsmeext.pecan as wsme_pecan
|
||||||
|
|
||||||
|
from inventory.api.controllers.v1 import base
|
||||||
|
from inventory.api.controllers.v1 import collection
|
||||||
|
from inventory.api.controllers.v1 import link
|
||||||
|
from inventory.api.controllers.v1 import lldp_tlv
|
||||||
|
from inventory.api.controllers.v1 import types
|
||||||
|
from inventory.api.controllers.v1 import utils
|
||||||
|
from inventory.common import exception
|
||||||
|
from inventory.common.i18n import _
|
||||||
|
from inventory.common import k_lldp
|
||||||
|
from inventory.common import utils as cutils
|
||||||
|
from inventory import objects
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class LLDPNeighbourPatchType(types.JsonPatchType):
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def mandatory_attrs():
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
class LLDPNeighbour(base.APIBase):
|
||||||
|
"""API representation of an LLDP Neighbour
|
||||||
|
|
||||||
|
This class enforces type checking and value constraints, and converts
|
||||||
|
between the internal object model and the API representation of an
|
||||||
|
LLDP neighbour.
|
||||||
|
"""
|
||||||
|
|
||||||
|
uuid = types.uuid
|
||||||
|
"Unique UUID for this port"
|
||||||
|
|
||||||
|
msap = wtypes.text
|
||||||
|
"Represent the MAC service access point of the lldp neighbour"
|
||||||
|
|
||||||
|
host_id = int
|
||||||
|
"Represent the host_id the lldp neighbour belongs to"
|
||||||
|
|
||||||
|
port_id = int
|
||||||
|
"Represent the port_id the lldp neighbour belongs to"
|
||||||
|
|
||||||
|
host_uuid = types.uuid
|
||||||
|
"Represent the UUID of the host the lldp neighbour belongs to"
|
||||||
|
|
||||||
|
port_uuid = types.uuid
|
||||||
|
"Represent the UUID of the port the lldp neighbour belongs to"
|
||||||
|
|
||||||
|
port_name = wtypes.text
|
||||||
|
"Represent the name of the port the lldp neighbour belongs to"
|
||||||
|
|
||||||
|
port_namedisplay = wtypes.text
|
||||||
|
"Represent the display name of the port. Unique per host"
|
||||||
|
|
||||||
|
links = [link.Link]
|
||||||
|
"Represent a list containing a self link and associated lldp neighbour"
|
||||||
|
"links"
|
||||||
|
|
||||||
|
tlvs = [link.Link]
|
||||||
|
"Links to the collection of LldpNeighbours on this ihost"
|
||||||
|
|
||||||
|
chassis_id = wtypes.text
|
||||||
|
"Represent the status of the lldp neighbour"
|
||||||
|
|
||||||
|
system_description = wtypes.text
|
||||||
|
"Represent the status of the lldp neighbour"
|
||||||
|
|
||||||
|
system_name = wtypes.text
|
||||||
|
"Represent the status of the lldp neighbour"
|
||||||
|
|
||||||
|
system_capabilities = wtypes.text
|
||||||
|
"Represent the status of the lldp neighbour"
|
||||||
|
|
||||||
|
management_address = wtypes.text
|
||||||
|
"Represent the status of the lldp neighbour"
|
||||||
|
|
||||||
|
port_identifier = wtypes.text
|
||||||
|
"Represent the port identifier of the lldp neighbour"
|
||||||
|
|
||||||
|
port_description = wtypes.text
|
||||||
|
"Represent the port description of the lldp neighbour"
|
||||||
|
|
||||||
|
dot1_lag = wtypes.text
|
||||||
|
"Represent the 802.1 link aggregation status of the lldp neighbour"
|
||||||
|
|
||||||
|
dot1_port_vid = wtypes.text
|
||||||
|
"Represent the 802.1 port vlan id of the lldp neighbour"
|
||||||
|
|
||||||
|
dot1_vid_digest = wtypes.text
|
||||||
|
"Represent the 802.1 vlan id digest of the lldp neighbour"
|
||||||
|
|
||||||
|
dot1_management_vid = wtypes.text
|
||||||
|
"Represent the 802.1 management vlan id of the lldp neighbour"
|
||||||
|
|
||||||
|
dot1_vlan_names = wtypes.text
|
||||||
|
"Represent the 802.1 vlan names of the lldp neighbour"
|
||||||
|
|
||||||
|
dot1_proto_vids = wtypes.text
|
||||||
|
"Represent the 802.1 protocol vlan ids of the lldp neighbour"
|
||||||
|
|
||||||
|
dot1_proto_ids = wtypes.text
|
||||||
|
"Represent the 802.1 protocol ids of the lldp neighbour"
|
||||||
|
|
||||||
|
dot3_mac_status = wtypes.text
|
||||||
|
"Represent the 802.3 MAC/PHY status of the lldp neighbour"
|
||||||
|
|
||||||
|
dot3_max_frame = wtypes.text
|
||||||
|
"Represent the 802.3 maximum frame size of the lldp neighbour"
|
||||||
|
|
||||||
|
dot3_power_mdi = wtypes.text
|
||||||
|
"Represent the 802.3 power mdi status of the lldp neighbour"
|
||||||
|
|
||||||
|
ttl = wtypes.text
|
||||||
|
"Represent the neighbour time-to-live"
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self.fields = objects.LLDPNeighbour.fields.keys()
|
||||||
|
for k in self.fields:
|
||||||
|
setattr(self, k, kwargs.get(k))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def convert_with_links(cls, rpc_lldp_neighbour, expand=True):
|
||||||
|
lldp_neighbour = LLDPNeighbour(**rpc_lldp_neighbour.as_dict())
|
||||||
|
|
||||||
|
if not expand:
|
||||||
|
lldp_neighbour.unset_fields_except([
|
||||||
|
'uuid', 'host_id', 'port_id', 'msap', 'host_uuid', 'port_uuid',
|
||||||
|
'port_name', 'port_namedisplay', 'created_at', 'updated_at',
|
||||||
|
k_lldp.LLDP_TLV_TYPE_CHASSIS_ID,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_PORT_ID,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_TTL,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_SYSTEM_NAME,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_SYSTEM_DESC,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_SYSTEM_CAP,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_MGMT_ADDR,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_PORT_DESC,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT1_LAG,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT1_PORT_VID,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT1_VID_DIGEST,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT1_MGMT_VID,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT1_PROTO_VIDS,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT1_PROTO_IDS,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT1_VLAN_NAMES,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT1_VID_DIGEST,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT3_MAC_STATUS,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT3_MAX_FRAME,
|
||||||
|
k_lldp.LLDP_TLV_TYPE_DOT3_POWER_MDI])
|
||||||
|
|
||||||
|
# never expose the id attribute
|
||||||
|
lldp_neighbour.host_id = wtypes.Unset
|
||||||
|
lldp_neighbour.port_id = wtypes.Unset
|
||||||
|
|
||||||
|
lldp_neighbour.links = [
|
||||||
|
link.Link.make_link('self', pecan.request.host_url,
|
||||||
|
'lldp_neighbours', lldp_neighbour.uuid),
|
||||||
|
link.Link.make_link('bookmark',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'lldp_neighbours', lldp_neighbour.uuid,
|
||||||
|
bookmark=True)]
|
||||||
|
|
||||||
|
if expand:
|
||||||
|
lldp_neighbour.tlvs = [
|
||||||
|
link.Link.make_link('self',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'lldp_neighbours',
|
||||||
|
lldp_neighbour.uuid + "/tlvs"),
|
||||||
|
link.Link.make_link('bookmark',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'lldp_neighbours',
|
||||||
|
lldp_neighbour.uuid + "/tlvs",
|
||||||
|
bookmark=True)]
|
||||||
|
|
||||||
|
return lldp_neighbour
|
||||||
|
|
||||||
|
|
||||||
|
class LLDPNeighbourCollection(collection.Collection):
|
||||||
|
"""API representation of a collection of LldpNeighbour objects."""
|
||||||
|
|
||||||
|
lldp_neighbours = [LLDPNeighbour]
|
||||||
|
"A list containing LldpNeighbour objects"
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self._type = 'lldp_neighbours'
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def convert_with_links(cls, rpc_lldp_neighbours, limit, url=None,
|
||||||
|
expand=False, **kwargs):
|
||||||
|
collection = LLDPNeighbourCollection()
|
||||||
|
|
||||||
|
collection.lldp_neighbours = [LLDPNeighbour.convert_with_links(a,
|
||||||
|
expand)
|
||||||
|
for a in rpc_lldp_neighbours]
|
||||||
|
collection.next = collection.get_next(limit, url=url, **kwargs)
|
||||||
|
return collection
|
||||||
|
|
||||||
|
|
||||||
|
LOCK_NAME = 'LLDPNeighbourController'
|
||||||
|
|
||||||
|
|
||||||
|
class LLDPNeighbourController(rest.RestController):
|
||||||
|
"""REST controller for LldpNeighbours."""
|
||||||
|
|
||||||
|
tlvs = lldp_tlv.LLDPTLVController(
|
||||||
|
from_lldp_neighbours=True)
|
||||||
|
"Expose tlvs as a sub-element of LldpNeighbours"
|
||||||
|
|
||||||
|
_custom_actions = {
|
||||||
|
'detail': ['GET'],
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(self, from_hosts=False, from_ports=False):
|
||||||
|
self._from_hosts = from_hosts
|
||||||
|
self._from_ports = from_ports
|
||||||
|
|
||||||
|
def _get_lldp_neighbours_collection(self, uuid, marker, limit, sort_key,
|
||||||
|
sort_dir, expand=False,
|
||||||
|
resource_url=None):
|
||||||
|
|
||||||
|
if self._from_hosts and not uuid:
|
||||||
|
raise exception.InvalidParameterValue(_("Host id not specified."))
|
||||||
|
|
||||||
|
if self._from_ports and not uuid:
|
||||||
|
raise exception.InvalidParameterValue(_("Port id not specified."))
|
||||||
|
|
||||||
|
limit = utils.validate_limit(limit)
|
||||||
|
sort_dir = utils.validate_sort_dir(sort_dir)
|
||||||
|
|
||||||
|
marker_obj = None
|
||||||
|
if marker:
|
||||||
|
marker_obj = objects.LLDPNeighbour.get_by_uuid(
|
||||||
|
pecan.request.context, marker)
|
||||||
|
|
||||||
|
if self._from_hosts:
|
||||||
|
neighbours = pecan.request.dbapi.lldp_neighbour_get_by_host(
|
||||||
|
uuid, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir)
|
||||||
|
|
||||||
|
elif self._from_ports:
|
||||||
|
neighbours = pecan.request.dbapi.lldp_neighbour_get_by_port(
|
||||||
|
uuid, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir)
|
||||||
|
else:
|
||||||
|
neighbours = pecan.request.dbapi.lldp_neighbour_get_list(
|
||||||
|
limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir)
|
||||||
|
|
||||||
|
return LLDPNeighbourCollection.convert_with_links(neighbours, limit,
|
||||||
|
url=resource_url,
|
||||||
|
expand=expand,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(LLDPNeighbourCollection, types.uuid,
|
||||||
|
types.uuid, int, wtypes.text, wtypes.text)
|
||||||
|
def get_all(self, uuid=None,
|
||||||
|
marker=None, limit=None, sort_key='id', sort_dir='asc'):
|
||||||
|
"""Retrieve a list of lldp neighbours."""
|
||||||
|
|
||||||
|
return self._get_lldp_neighbours_collection(uuid, marker, limit,
|
||||||
|
sort_key, sort_dir)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(LLDPNeighbourCollection, types.uuid, types.uuid, int,
|
||||||
|
wtypes.text, wtypes.text)
|
||||||
|
def detail(self, uuid=None, marker=None, limit=None,
|
||||||
|
sort_key='id', sort_dir='asc'):
|
||||||
|
"""Retrieve a list of lldp_neighbours with detail."""
|
||||||
|
|
||||||
|
parent = pecan.request.path.split('/')[:-1][-1]
|
||||||
|
if parent != "lldp_neighbours":
|
||||||
|
raise exception.HTTPNotFound
|
||||||
|
|
||||||
|
expand = True
|
||||||
|
resource_url = '/'.join(['lldp_neighbours', 'detail'])
|
||||||
|
return self._get_lldp_neighbours_collection(uuid, marker, limit,
|
||||||
|
sort_key, sort_dir, expand,
|
||||||
|
resource_url)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(LLDPNeighbour, types.uuid)
|
||||||
|
def get_one(self, port_uuid):
|
||||||
|
"""Retrieve information about the given lldp neighbour."""
|
||||||
|
if self._from_hosts:
|
||||||
|
raise exception.OperationNotPermitted
|
||||||
|
|
||||||
|
rpc_lldp_neighbour = objects.LLDPNeighbour.get_by_uuid(
|
||||||
|
pecan.request.context, port_uuid)
|
||||||
|
return LLDPNeighbour.convert_with_links(rpc_lldp_neighbour)
|
||||||
|
|
||||||
|
@cutils.synchronized(LOCK_NAME)
|
||||||
|
@wsme_pecan.wsexpose(LLDPNeighbour, body=LLDPNeighbour)
|
||||||
|
def post(self, neighbour):
|
||||||
|
"""Create a new lldp neighbour."""
|
||||||
|
if self._from_hosts:
|
||||||
|
raise exception.OperationNotPermitted
|
||||||
|
|
||||||
|
try:
|
||||||
|
host_uuid = neighbour.host_uuid
|
||||||
|
port_uuid = neighbour.port_uuid
|
||||||
|
new_neighbour = pecan.request.dbapi.lldp_neighbour_create(
|
||||||
|
port_uuid, host_uuid, neighbour.as_dict())
|
||||||
|
except exception.InventoryException as e:
|
||||||
|
LOG.exception(e)
|
||||||
|
raise wsme.exc.ClientSideError(_("Invalid data"))
|
||||||
|
return neighbour.convert_with_links(new_neighbour)
|
||||||
|
|
||||||
|
@cutils.synchronized(LOCK_NAME)
|
||||||
|
@wsme.validate(types.uuid, [LLDPNeighbourPatchType])
|
||||||
|
@wsme_pecan.wsexpose(LLDPNeighbour, types.uuid,
|
||||||
|
body=[LLDPNeighbourPatchType])
|
||||||
|
def patch(self, uuid, patch):
|
||||||
|
"""Update an existing lldp neighbour."""
|
||||||
|
if self._from_hosts:
|
||||||
|
raise exception.OperationNotPermitted
|
||||||
|
if self._from_ports:
|
||||||
|
raise exception.OperationNotPermitted
|
||||||
|
|
||||||
|
rpc_neighbour = objects.LLDPNeighbour.get_by_uuid(
|
||||||
|
pecan.request.context, uuid)
|
||||||
|
|
||||||
|
# replace host_uuid and port_uuid with corresponding
|
||||||
|
patch_obj = jsonpatch.JsonPatch(patch)
|
||||||
|
for p in patch_obj:
|
||||||
|
if p['path'] == '/host_uuid':
|
||||||
|
p['path'] = '/host_id'
|
||||||
|
host = objects.Host.get_by_uuid(pecan.request.context,
|
||||||
|
p['value'])
|
||||||
|
p['value'] = host.id
|
||||||
|
|
||||||
|
if p['path'] == '/port_uuid':
|
||||||
|
p['path'] = '/port_id'
|
||||||
|
try:
|
||||||
|
port = objects.Port.get_by_uuid(
|
||||||
|
pecan.request.context, p['value'])
|
||||||
|
p['value'] = port.id
|
||||||
|
except exception.InventoryException as e:
|
||||||
|
LOG.exception(e)
|
||||||
|
p['value'] = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
neighbour = LLDPNeighbour(
|
||||||
|
**jsonpatch.apply_patch(rpc_neighbour.as_dict(), patch_obj))
|
||||||
|
|
||||||
|
except utils.JSONPATCH_EXCEPTIONS as e:
|
||||||
|
raise exception.PatchError(patch=patch, reason=e)
|
||||||
|
|
||||||
|
# Update only the fields that have changed
|
||||||
|
for field in objects.LLDPNeighbour.fields:
|
||||||
|
if rpc_neighbour[field] != getattr(neighbour, field):
|
||||||
|
rpc_neighbour[field] = getattr(neighbour, field)
|
||||||
|
|
||||||
|
rpc_neighbour.save()
|
||||||
|
return LLDPNeighbour.convert_with_links(rpc_neighbour)
|
||||||
|
|
||||||
|
@cutils.synchronized(LOCK_NAME)
|
||||||
|
@wsme_pecan.wsexpose(None, types.uuid, status_code=204)
|
||||||
|
def delete(self, uuid):
|
||||||
|
"""Delete an lldp neighbour."""
|
||||||
|
if self._from_hosts:
|
||||||
|
raise exception.OperationNotPermitted
|
||||||
|
if self._from_ports:
|
||||||
|
raise exception.OperationNotPermitted
|
||||||
|
|
||||||
|
pecan.request.dbapi.lldp_neighbour_destroy(uuid)
|
297
inventory/inventory/inventory/api/controllers/v1/lldp_tlv.py
Normal file
297
inventory/inventory/inventory/api/controllers/v1/lldp_tlv.py
Normal file
@ -0,0 +1,297 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2013 UnitedStack Inc.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
# Copyright (c) 2016-2018 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
import jsonpatch
|
||||||
|
|
||||||
|
import pecan
|
||||||
|
from pecan import rest
|
||||||
|
|
||||||
|
import wsme
|
||||||
|
from wsme import types as wtypes
|
||||||
|
import wsmeext.pecan as wsme_pecan
|
||||||
|
|
||||||
|
from inventory.api.controllers.v1 import base
|
||||||
|
from inventory.api.controllers.v1 import collection
|
||||||
|
from inventory.api.controllers.v1 import link
|
||||||
|
from inventory.api.controllers.v1 import types
|
||||||
|
from inventory.api.controllers.v1 import utils
|
||||||
|
from inventory.common import exception
|
||||||
|
from inventory.common.i18n import _
|
||||||
|
from inventory.common import utils as cutils
|
||||||
|
from inventory import objects
|
||||||
|
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class LLDPTLVPatchType(types.JsonPatchType):
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def mandatory_attrs():
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
class LLDPTLV(base.APIBase):
|
||||||
|
"""API representation of an LldpTlv
|
||||||
|
|
||||||
|
This class enforces type checking and value constraints, and converts
|
||||||
|
between the internal object model and the API representation of an
|
||||||
|
LLDP tlv.
|
||||||
|
"""
|
||||||
|
|
||||||
|
type = wtypes.text
|
||||||
|
"Represent the type of the lldp tlv"
|
||||||
|
|
||||||
|
value = wtypes.text
|
||||||
|
"Represent the value of the lldp tlv"
|
||||||
|
|
||||||
|
agent_id = int
|
||||||
|
"Represent the agent_id the lldp tlv belongs to"
|
||||||
|
|
||||||
|
neighbour_id = int
|
||||||
|
"Represent the neighbour the lldp tlv belongs to"
|
||||||
|
|
||||||
|
agent_uuid = types.uuid
|
||||||
|
"Represent the UUID of the agent the lldp tlv belongs to"
|
||||||
|
|
||||||
|
neighbour_uuid = types.uuid
|
||||||
|
"Represent the UUID of the neighbour the lldp tlv belongs to"
|
||||||
|
|
||||||
|
links = [link.Link]
|
||||||
|
"Represent a list containing a self link and associated lldp tlv links"
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self.fields = objects.LLDPTLV.fields.keys()
|
||||||
|
for k in self.fields:
|
||||||
|
setattr(self, k, kwargs.get(k))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def convert_with_links(cls, rpc_lldp_tlv, expand=True):
|
||||||
|
lldp_tlv = LLDPTLV(**rpc_lldp_tlv.as_dict())
|
||||||
|
if not expand:
|
||||||
|
lldp_tlv.unset_fields_except(['type', 'value'])
|
||||||
|
|
||||||
|
# never expose the id attribute
|
||||||
|
lldp_tlv.agent_id = wtypes.Unset
|
||||||
|
lldp_tlv.neighbour_id = wtypes.Unset
|
||||||
|
|
||||||
|
lldp_tlv.links = [link.Link.make_link('self', pecan.request.host_url,
|
||||||
|
'lldp_tlvs', lldp_tlv.type),
|
||||||
|
link.Link.make_link('bookmark',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'lldp_tlvs', lldp_tlv.type,
|
||||||
|
bookmark=True)]
|
||||||
|
return lldp_tlv
|
||||||
|
|
||||||
|
|
||||||
|
class LLDPTLVCollection(collection.Collection):
|
||||||
|
"""API representation of a collection of LldpTlv objects."""
|
||||||
|
|
||||||
|
lldp_tlvs = [LLDPTLV]
|
||||||
|
"A list containing LldpTlv objects"
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self._type = 'lldp_tlvs'
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def convert_with_links(cls, rpc_lldp_tlvs, limit, url=None,
|
||||||
|
expand=False, **kwargs):
|
||||||
|
collection = LLDPTLVCollection()
|
||||||
|
collection.lldp_tlvs = [LLDPTLV.convert_with_links(a, expand)
|
||||||
|
for a in rpc_lldp_tlvs]
|
||||||
|
collection.next = collection.get_next(limit, url=url, **kwargs)
|
||||||
|
return collection
|
||||||
|
|
||||||
|
|
||||||
|
LOCK_NAME = 'LLDPTLVController'
|
||||||
|
|
||||||
|
|
||||||
|
class LLDPTLVController(rest.RestController):
|
||||||
|
"""REST controller for LldpTlvs."""
|
||||||
|
|
||||||
|
_custom_actions = {
|
||||||
|
'detail': ['GET'],
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(self, from_lldp_agents=False, from_lldp_neighbours=False):
|
||||||
|
self._from_lldp_agents = from_lldp_agents
|
||||||
|
self._from_lldp_neighbours = from_lldp_neighbours
|
||||||
|
|
||||||
|
def _get_lldp_tlvs_collection(self, uuid,
|
||||||
|
marker, limit, sort_key, sort_dir,
|
||||||
|
expand=False, resource_url=None):
|
||||||
|
|
||||||
|
if self._from_lldp_agents and not uuid:
|
||||||
|
raise exception.InvalidParameterValue(
|
||||||
|
_("LLDP agent id not specified."))
|
||||||
|
|
||||||
|
if self._from_lldp_neighbours and not uuid:
|
||||||
|
raise exception.InvalidParameterValue(
|
||||||
|
_("LLDP neighbour id not specified."))
|
||||||
|
|
||||||
|
limit = utils.validate_limit(limit)
|
||||||
|
sort_dir = utils.validate_sort_dir(sort_dir)
|
||||||
|
|
||||||
|
marker_obj = None
|
||||||
|
if marker:
|
||||||
|
marker_obj = objects.LLDPTLV.get_by_id(pecan.request.context,
|
||||||
|
marker)
|
||||||
|
|
||||||
|
if self._from_lldp_agents:
|
||||||
|
tlvs = objects.LLDPTLV.get_by_agent(pecan.request.context,
|
||||||
|
uuid,
|
||||||
|
limit,
|
||||||
|
marker_obj,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
|
||||||
|
elif self._from_lldp_neighbours:
|
||||||
|
tlvs = objects.LLDPTLV.get_by_neighbour(
|
||||||
|
pecan.request.context,
|
||||||
|
uuid, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir)
|
||||||
|
else:
|
||||||
|
tlvs = objects.LLDPTLV.list(
|
||||||
|
pecan.request.context,
|
||||||
|
limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir)
|
||||||
|
|
||||||
|
return LLDPTLVCollection.convert_with_links(tlvs,
|
||||||
|
limit,
|
||||||
|
url=resource_url,
|
||||||
|
expand=expand,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(LLDPTLVCollection, types.uuid,
|
||||||
|
types.uuid, int, wtypes.text, wtypes.text)
|
||||||
|
def get_all(self, uuid=None,
|
||||||
|
marker=None, limit=None, sort_key='id', sort_dir='asc'):
|
||||||
|
"""Retrieve a list of lldp tlvs."""
|
||||||
|
return self._get_lldp_tlvs_collection(uuid, marker, limit, sort_key,
|
||||||
|
sort_dir)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(LLDPTLVCollection, types.uuid, types.uuid, int,
|
||||||
|
wtypes.text, wtypes.text)
|
||||||
|
def detail(self, uuid=None, marker=None, limit=None,
|
||||||
|
sort_key='id', sort_dir='asc'):
|
||||||
|
"""Retrieve a list of lldp_tlvs with detail."""
|
||||||
|
|
||||||
|
parent = pecan.request.path.split('/')[:-1][-1]
|
||||||
|
if parent != "lldp_tlvs":
|
||||||
|
raise exception.HTTPNotFound
|
||||||
|
|
||||||
|
expand = True
|
||||||
|
resource_url = '/'.join(['lldp_tlvs', 'detail'])
|
||||||
|
return self._get_lldp_tlvs_collection(uuid, marker, limit, sort_key,
|
||||||
|
sort_dir, expand, resource_url)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(LLDPTLV, int)
|
||||||
|
def get_one(self, id):
|
||||||
|
"""Retrieve information about the given lldp tlv."""
|
||||||
|
if self._from_hosts:
|
||||||
|
raise exception.OperationNotPermitted
|
||||||
|
|
||||||
|
rpc_lldp_tlv = objects.LLDPTLV.get_by_id(
|
||||||
|
pecan.request.context, id)
|
||||||
|
return LLDPTLV.convert_with_links(rpc_lldp_tlv)
|
||||||
|
|
||||||
|
@cutils.synchronized(LOCK_NAME)
|
||||||
|
@wsme_pecan.wsexpose(LLDPTLV, body=LLDPTLV)
|
||||||
|
def post(self, tlv):
|
||||||
|
"""Create a new lldp tlv."""
|
||||||
|
if self._from_lldp_agents:
|
||||||
|
raise exception.OperationNotPermitted
|
||||||
|
|
||||||
|
if self._from_lldp_neighbours:
|
||||||
|
raise exception.OperationNotPermitted
|
||||||
|
|
||||||
|
try:
|
||||||
|
agent_uuid = tlv.agent_uuid
|
||||||
|
neighbour_uuid = tlv.neighbour_uuid
|
||||||
|
new_tlv = pecan.request.dbapi.lldp_tlv_create(tlv.as_dict(),
|
||||||
|
agent_uuid,
|
||||||
|
neighbour_uuid)
|
||||||
|
except exception.InventoryException as e:
|
||||||
|
LOG.exception(e)
|
||||||
|
raise wsme.exc.ClientSideError(_("Invalid data"))
|
||||||
|
return tlv.convert_with_links(new_tlv)
|
||||||
|
|
||||||
|
@cutils.synchronized(LOCK_NAME)
|
||||||
|
@wsme.validate(types.uuid, [LLDPTLVPatchType])
|
||||||
|
@wsme_pecan.wsexpose(LLDPTLV, int,
|
||||||
|
body=[LLDPTLVPatchType])
|
||||||
|
def patch(self, id, patch):
|
||||||
|
"""Update an existing lldp tlv."""
|
||||||
|
if self._from_lldp_agents:
|
||||||
|
raise exception.OperationNotPermitted
|
||||||
|
if self._from_lldp_neighbours:
|
||||||
|
raise exception.OperationNotPermitted
|
||||||
|
|
||||||
|
rpc_tlv = objects.LLDPTLV.get_by_id(
|
||||||
|
pecan.request.context, id)
|
||||||
|
|
||||||
|
# replace agent_uuid and neighbour_uuid with corresponding
|
||||||
|
patch_obj = jsonpatch.JsonPatch(patch)
|
||||||
|
for p in patch_obj:
|
||||||
|
if p['path'] == '/agent_uuid':
|
||||||
|
p['path'] = '/agent_id'
|
||||||
|
agent = objects.LLDPAgent.get_by_uuid(pecan.request.context,
|
||||||
|
p['value'])
|
||||||
|
p['value'] = agent.id
|
||||||
|
|
||||||
|
if p['path'] == '/neighbour_uuid':
|
||||||
|
p['path'] = '/neighbour_id'
|
||||||
|
try:
|
||||||
|
neighbour = objects.LLDPNeighbour.get_by_uuid(
|
||||||
|
pecan.request.context, p['value'])
|
||||||
|
p['value'] = neighbour.id
|
||||||
|
except exception.InventoryException as e:
|
||||||
|
LOG.exception(e)
|
||||||
|
p['value'] = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
tlv = LLDPTLV(
|
||||||
|
**jsonpatch.apply_patch(rpc_tlv.as_dict(), patch_obj))
|
||||||
|
|
||||||
|
except utils.JSONPATCH_EXCEPTIONS as e:
|
||||||
|
raise exception.PatchError(patch=patch, reason=e)
|
||||||
|
|
||||||
|
# Update only the fields that have changed
|
||||||
|
for field in objects.LLDPTLV.fields:
|
||||||
|
if rpc_tlv[field] != getattr(tlv, field):
|
||||||
|
rpc_tlv[field] = getattr(tlv, field)
|
||||||
|
|
||||||
|
rpc_tlv.save()
|
||||||
|
return LLDPTLV.convert_with_links(rpc_tlv)
|
||||||
|
|
||||||
|
@cutils.synchronized(LOCK_NAME)
|
||||||
|
@wsme_pecan.wsexpose(None, int, status_code=204)
|
||||||
|
def delete(self, id):
|
||||||
|
"""Delete an lldp tlv."""
|
||||||
|
if self._from_lldp_agents:
|
||||||
|
raise exception.OperationNotPermitted
|
||||||
|
if self._from_lldp_neighbours:
|
||||||
|
raise exception.OperationNotPermitted
|
||||||
|
|
||||||
|
tlv = objects.LLDPTLV.get_by_id(pecan.request.context, id)
|
||||||
|
tlv.destroy()
|
||||||
|
# pecan.request.dbapi.lldp_tlv_destroy(id)
|
729
inventory/inventory/inventory/api/controllers/v1/memory.py
Normal file
729
inventory/inventory/inventory/api/controllers/v1/memory.py
Normal file
@ -0,0 +1,729 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2013 UnitedStack Inc.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# Copyright (c) 2013-2018 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
import jsonpatch
|
||||||
|
import six
|
||||||
|
|
||||||
|
import pecan
|
||||||
|
from pecan import rest
|
||||||
|
|
||||||
|
import wsme
|
||||||
|
from wsme import types as wtypes
|
||||||
|
import wsmeext.pecan as wsme_pecan
|
||||||
|
|
||||||
|
from inventory.api.controllers.v1 import base
|
||||||
|
from inventory.api.controllers.v1 import collection
|
||||||
|
from inventory.api.controllers.v1 import link
|
||||||
|
from inventory.api.controllers.v1 import types
|
||||||
|
from inventory.api.controllers.v1 import utils
|
||||||
|
from inventory.common import exception
|
||||||
|
from inventory.common.i18n import _
|
||||||
|
from inventory.common import utils as cutils
|
||||||
|
from inventory import objects
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class MemoryPatchType(types.JsonPatchType):
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def mandatory_attrs():
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
class Memory(base.APIBase):
|
||||||
|
"""API representation of host memory.
|
||||||
|
|
||||||
|
This class enforces type checking and value constraints, and converts
|
||||||
|
between the internal object model and the API representation of a memory.
|
||||||
|
"""
|
||||||
|
|
||||||
|
_minimum_platform_reserved_mib = None
|
||||||
|
|
||||||
|
def _get_minimum_platform_reserved_mib(self):
|
||||||
|
return self._minimum_platform_reserved_mib
|
||||||
|
|
||||||
|
def _set_minimum_platform_reserved_mib(self, value):
|
||||||
|
if self._minimum_platform_reserved_mib is None:
|
||||||
|
try:
|
||||||
|
ihost = objects.Host.get_by_uuid(pecan.request.context, value)
|
||||||
|
self._minimum_platform_reserved_mib = \
|
||||||
|
cutils.get_minimum_platform_reserved_memory(ihost,
|
||||||
|
self.numa_node)
|
||||||
|
except exception.HostNotFound as e:
|
||||||
|
# Change error code because 404 (NotFound) is inappropriate
|
||||||
|
# response for a POST request to create
|
||||||
|
e.code = 400 # BadRequest
|
||||||
|
raise e
|
||||||
|
elif value == wtypes.Unset:
|
||||||
|
self._minimum_platform_reserved_mib = wtypes.Unset
|
||||||
|
|
||||||
|
uuid = types.uuid
|
||||||
|
"Unique UUID for this memory"
|
||||||
|
|
||||||
|
memtotal_mib = int
|
||||||
|
"Represent the imemory total in MiB"
|
||||||
|
|
||||||
|
memavail_mib = int
|
||||||
|
"Represent the imemory available in MiB"
|
||||||
|
|
||||||
|
platform_reserved_mib = int
|
||||||
|
"Represent the imemory platform reserved in MiB"
|
||||||
|
|
||||||
|
hugepages_configured = wtypes.text
|
||||||
|
"Represent whether huge pages are configured"
|
||||||
|
|
||||||
|
vswitch_hugepages_size_mib = int
|
||||||
|
"Represent the imemory vswitch huge pages size in MiB"
|
||||||
|
|
||||||
|
vswitch_hugepages_reqd = int
|
||||||
|
"Represent the imemory vswitch required number of hugepages"
|
||||||
|
|
||||||
|
vswitch_hugepages_nr = int
|
||||||
|
"Represent the imemory vswitch number of hugepages"
|
||||||
|
|
||||||
|
vswitch_hugepages_avail = int
|
||||||
|
"Represent the imemory vswitch number of hugepages available"
|
||||||
|
|
||||||
|
vm_hugepages_nr_2M_pending = int
|
||||||
|
"Represent the imemory vm number of hugepages pending (2M pages)"
|
||||||
|
|
||||||
|
vm_hugepages_nr_2M = int
|
||||||
|
"Represent the imemory vm number of hugepages (2M pages)"
|
||||||
|
|
||||||
|
vm_hugepages_avail_2M = int
|
||||||
|
"Represent the imemory vm number of hugepages available (2M pages)"
|
||||||
|
|
||||||
|
vm_hugepages_nr_1G_pending = int
|
||||||
|
"Represent the imemory vm number of hugepages pending (1G pages)"
|
||||||
|
|
||||||
|
vm_hugepages_nr_1G = int
|
||||||
|
"Represent the imemory vm number of hugepages (1G pages)"
|
||||||
|
|
||||||
|
vm_hugepages_nr_4K = int
|
||||||
|
"Represent the imemory vm number of hugepages (4K pages)"
|
||||||
|
|
||||||
|
vm_hugepages_use_1G = wtypes.text
|
||||||
|
"1G hugepage is supported 'True' or not 'False' "
|
||||||
|
|
||||||
|
vm_hugepages_avail_1G = int
|
||||||
|
"Represent the imemory vm number of hugepages available (1G pages)"
|
||||||
|
|
||||||
|
vm_hugepages_possible_2M = int
|
||||||
|
"Represent the total possible number of vm hugepages available (2M pages)"
|
||||||
|
|
||||||
|
vm_hugepages_possible_1G = int
|
||||||
|
"Represent the total possible number of vm hugepages available (1G pages)"
|
||||||
|
|
||||||
|
minimum_platform_reserved_mib = wsme.wsproperty(
|
||||||
|
int,
|
||||||
|
_get_minimum_platform_reserved_mib,
|
||||||
|
_set_minimum_platform_reserved_mib,
|
||||||
|
mandatory=True)
|
||||||
|
"Represent the default platform reserved memory in MiB. API only attribute"
|
||||||
|
|
||||||
|
numa_node = int
|
||||||
|
"The numa node or zone the imemory. API only attribute"
|
||||||
|
|
||||||
|
capabilities = {wtypes.text: utils.ValidTypes(wtypes.text,
|
||||||
|
six.integer_types)}
|
||||||
|
"This memory's meta data"
|
||||||
|
|
||||||
|
host_id = int
|
||||||
|
"The ihostid that this imemory belongs to"
|
||||||
|
|
||||||
|
node_id = int
|
||||||
|
"The nodeId that this imemory belongs to"
|
||||||
|
|
||||||
|
ihost_uuid = types.uuid
|
||||||
|
"The UUID of the ihost this memory belongs to"
|
||||||
|
|
||||||
|
node_uuid = types.uuid
|
||||||
|
"The UUID of the node this memory belongs to"
|
||||||
|
|
||||||
|
links = [link.Link]
|
||||||
|
"A list containing a self link and associated memory links"
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self.fields = objects.Memory.fields.keys()
|
||||||
|
for k in self.fields:
|
||||||
|
setattr(self, k, kwargs.get(k))
|
||||||
|
|
||||||
|
# API only attributes
|
||||||
|
self.fields.append('minimum_platform_reserved_mib')
|
||||||
|
setattr(self, 'minimum_platform_reserved_mib',
|
||||||
|
kwargs.get('host_id', None))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def convert_with_links(cls, rpc_mem, expand=True):
|
||||||
|
# fields = ['uuid', 'address'] if not expand else None
|
||||||
|
# memory = imemory.from_rpc_object(rpc_mem, fields)
|
||||||
|
|
||||||
|
memory = Memory(**rpc_mem.as_dict())
|
||||||
|
if not expand:
|
||||||
|
memory.unset_fields_except(
|
||||||
|
['uuid', 'memtotal_mib', 'memavail_mib',
|
||||||
|
'platform_reserved_mib', 'hugepages_configured',
|
||||||
|
'vswitch_hugepages_size_mib', 'vswitch_hugepages_nr',
|
||||||
|
'vswitch_hugepages_reqd',
|
||||||
|
'vswitch_hugepages_avail',
|
||||||
|
'vm_hugepages_nr_2M',
|
||||||
|
'vm_hugepages_nr_1G', 'vm_hugepages_use_1G',
|
||||||
|
'vm_hugepages_nr_2M_pending',
|
||||||
|
'vm_hugepages_avail_2M',
|
||||||
|
'vm_hugepages_nr_1G_pending',
|
||||||
|
'vm_hugepages_avail_1G',
|
||||||
|
'vm_hugepages_nr_4K',
|
||||||
|
'vm_hugepages_possible_2M', 'vm_hugepages_possible_1G',
|
||||||
|
'numa_node', 'ihost_uuid', 'node_uuid',
|
||||||
|
'host_id', 'node_id',
|
||||||
|
'capabilities',
|
||||||
|
'created_at', 'updated_at',
|
||||||
|
'minimum_platform_reserved_mib'])
|
||||||
|
|
||||||
|
# never expose the id attribute
|
||||||
|
memory.host_id = wtypes.Unset
|
||||||
|
memory.node_id = wtypes.Unset
|
||||||
|
|
||||||
|
memory.links = [link.Link.make_link('self', pecan.request.host_url,
|
||||||
|
'memorys', memory.uuid),
|
||||||
|
link.Link.make_link('bookmark',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'memorys', memory.uuid,
|
||||||
|
bookmark=True)
|
||||||
|
]
|
||||||
|
return memory
|
||||||
|
|
||||||
|
|
||||||
|
class MemoryCollection(collection.Collection):
|
||||||
|
"""API representation of a collection of memorys."""
|
||||||
|
|
||||||
|
memorys = [Memory]
|
||||||
|
"A list containing memory objects"
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self._type = 'memorys'
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def convert_with_links(cls, memorys, limit, url=None,
|
||||||
|
expand=False, **kwargs):
|
||||||
|
collection = MemoryCollection()
|
||||||
|
collection.memorys = [
|
||||||
|
Memory.convert_with_links(n, expand) for n in memorys]
|
||||||
|
collection.next = collection.get_next(limit, url=url, **kwargs)
|
||||||
|
return collection
|
||||||
|
|
||||||
|
|
||||||
|
LOCK_NAME = 'MemoryController'
|
||||||
|
|
||||||
|
|
||||||
|
class MemoryController(rest.RestController):
|
||||||
|
"""REST controller for memorys."""
|
||||||
|
|
||||||
|
_custom_actions = {
|
||||||
|
'detail': ['GET'],
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(self, from_hosts=False, from_node=False):
|
||||||
|
self._from_hosts = from_hosts
|
||||||
|
self._from_node = from_node
|
||||||
|
|
||||||
|
def _get_memorys_collection(self, i_uuid, node_uuid,
|
||||||
|
marker, limit, sort_key, sort_dir,
|
||||||
|
expand=False, resource_url=None):
|
||||||
|
|
||||||
|
if self._from_hosts and not i_uuid:
|
||||||
|
raise exception.InvalidParameterValue(_(
|
||||||
|
"Host id not specified."))
|
||||||
|
|
||||||
|
if self._from_node and not i_uuid:
|
||||||
|
raise exception.InvalidParameterValue(_(
|
||||||
|
"Node id not specified."))
|
||||||
|
|
||||||
|
limit = utils.validate_limit(limit)
|
||||||
|
sort_dir = utils.validate_sort_dir(sort_dir)
|
||||||
|
|
||||||
|
marker_obj = None
|
||||||
|
if marker:
|
||||||
|
marker_obj = objects.Memory.get_by_uuid(pecan.request.context,
|
||||||
|
marker)
|
||||||
|
|
||||||
|
if self._from_hosts:
|
||||||
|
# memorys = pecan.request.dbapi.imemory_get_by_ihost(
|
||||||
|
memorys = objects.Memory.get_by_host(
|
||||||
|
pecan.request.context,
|
||||||
|
i_uuid, limit,
|
||||||
|
marker_obj,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
|
||||||
|
elif self._from_node:
|
||||||
|
# memorys = pecan.request.dbapi.imemory_get_by_node(
|
||||||
|
memorys = objects.Memory.get_by_node(
|
||||||
|
pecan.request.context,
|
||||||
|
i_uuid, limit,
|
||||||
|
marker_obj,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
else:
|
||||||
|
if i_uuid and not node_uuid:
|
||||||
|
# memorys = pecan.request.dbapi.imemory_get_by_ihost(
|
||||||
|
memorys = objects.Memory.get_by_host(
|
||||||
|
pecan.request.context,
|
||||||
|
i_uuid, limit,
|
||||||
|
marker_obj,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
elif i_uuid and node_uuid: # Need ihost_uuid ?
|
||||||
|
# memorys = pecan.request.dbapi.imemory_get_by_ihost_node(
|
||||||
|
memorys = objects.Memory.get_by_host_node(
|
||||||
|
pecan.request.context,
|
||||||
|
i_uuid,
|
||||||
|
node_uuid,
|
||||||
|
limit,
|
||||||
|
marker_obj,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
elif node_uuid:
|
||||||
|
# memorys = pecan.request.dbapi.imemory_get_by_ihost_node(
|
||||||
|
memorys = objects.Memory.get_by_node(
|
||||||
|
pecan.request.context,
|
||||||
|
node_uuid,
|
||||||
|
limit,
|
||||||
|
marker_obj,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
else:
|
||||||
|
# memorys = pecan.request.dbapi.imemory_get_list(
|
||||||
|
memorys = objects.Memory.list(
|
||||||
|
pecan.request.context,
|
||||||
|
limit,
|
||||||
|
marker_obj,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
|
||||||
|
return MemoryCollection.convert_with_links(memorys, limit,
|
||||||
|
url=resource_url,
|
||||||
|
expand=expand,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(MemoryCollection, types.uuid, types.uuid,
|
||||||
|
types.uuid, int, wtypes.text, wtypes.text)
|
||||||
|
def get_all(self, ihost_uuid=None, node_uuid=None,
|
||||||
|
marker=None, limit=None, sort_key='id', sort_dir='asc'):
|
||||||
|
"""Retrieve a list of memorys."""
|
||||||
|
|
||||||
|
return self._get_memorys_collection(
|
||||||
|
ihost_uuid, node_uuid, marker, limit, sort_key, sort_dir)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(MemoryCollection, types.uuid, types.uuid, int,
|
||||||
|
wtypes.text, wtypes.text)
|
||||||
|
def detail(self, ihost_uuid=None, marker=None, limit=None,
|
||||||
|
sort_key='id', sort_dir='asc'):
|
||||||
|
"""Retrieve a list of memorys with detail."""
|
||||||
|
# NOTE(lucasagomes): /detail should only work agaist collections
|
||||||
|
parent = pecan.request.path.split('/')[:-1][-1]
|
||||||
|
if parent != "memorys":
|
||||||
|
raise exception.HTTPNotFound
|
||||||
|
|
||||||
|
expand = True
|
||||||
|
resource_url = '/'.join(['memorys', 'detail'])
|
||||||
|
return self._get_memorys_collection(ihost_uuid, marker, limit,
|
||||||
|
sort_key, sort_dir,
|
||||||
|
expand, resource_url)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(Memory, types.uuid)
|
||||||
|
def get_one(self, memory_uuid):
|
||||||
|
"""Retrieve information about the given memory."""
|
||||||
|
if self._from_hosts:
|
||||||
|
raise exception.OperationNotPermitted
|
||||||
|
|
||||||
|
rpc_mem = objects.Memory.get_by_uuid(pecan.request.context,
|
||||||
|
memory_uuid)
|
||||||
|
return Memory.convert_with_links(rpc_mem)
|
||||||
|
|
||||||
|
@cutils.synchronized(LOCK_NAME)
|
||||||
|
@wsme_pecan.wsexpose(Memory, body=Memory)
|
||||||
|
def post(self, memory):
|
||||||
|
"""Create a new memory."""
|
||||||
|
if self._from_hosts:
|
||||||
|
raise exception.OperationNotPermitted
|
||||||
|
|
||||||
|
try:
|
||||||
|
ihost_uuid = memory.ihost_uuid
|
||||||
|
new_memory = pecan.request.dbapi.imemory_create(ihost_uuid,
|
||||||
|
memory.as_dict())
|
||||||
|
|
||||||
|
except exception.InventoryException as e:
|
||||||
|
LOG.exception(e)
|
||||||
|
raise wsme.exc.ClientSideError(_("Invalid data"))
|
||||||
|
return Memory.convert_with_links(new_memory)
|
||||||
|
|
||||||
|
@cutils.synchronized(LOCK_NAME)
|
||||||
|
@wsme.validate(types.uuid, [MemoryPatchType])
|
||||||
|
@wsme_pecan.wsexpose(Memory, types.uuid,
|
||||||
|
body=[MemoryPatchType])
|
||||||
|
def patch(self, memory_uuid, patch):
|
||||||
|
"""Update an existing memory."""
|
||||||
|
if self._from_hosts:
|
||||||
|
raise exception.OperationNotPermitted
|
||||||
|
|
||||||
|
rpc_mem = objects.Memory.get_by_uuid(
|
||||||
|
pecan.request.context, memory_uuid)
|
||||||
|
|
||||||
|
if 'host_id' in rpc_mem:
|
||||||
|
ihostId = rpc_mem['host_id']
|
||||||
|
else:
|
||||||
|
ihostId = rpc_mem['ihost_uuid']
|
||||||
|
|
||||||
|
host_id = pecan.request.dbapi.ihost_get(ihostId)
|
||||||
|
|
||||||
|
vm_hugepages_nr_2M_pending = None
|
||||||
|
vm_hugepages_nr_1G_pending = None
|
||||||
|
platform_reserved_mib = None
|
||||||
|
for p in patch:
|
||||||
|
if p['path'] == '/platform_reserved_mib':
|
||||||
|
platform_reserved_mib = p['value']
|
||||||
|
if p['path'] == '/vm_hugepages_nr_2M_pending':
|
||||||
|
vm_hugepages_nr_2M_pending = p['value']
|
||||||
|
|
||||||
|
if p['path'] == '/vm_hugepages_nr_1G_pending':
|
||||||
|
vm_hugepages_nr_1G_pending = p['value']
|
||||||
|
|
||||||
|
# The host must be locked
|
||||||
|
if host_id:
|
||||||
|
_check_host(host_id)
|
||||||
|
else:
|
||||||
|
raise wsme.exc.ClientSideError(_(
|
||||||
|
"Hostname or uuid must be defined"))
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Semantics checks and update hugepage memory accounting
|
||||||
|
patch = _check_huge_values(
|
||||||
|
rpc_mem, patch,
|
||||||
|
vm_hugepages_nr_2M_pending, vm_hugepages_nr_1G_pending)
|
||||||
|
except wsme.exc.ClientSideError as e:
|
||||||
|
node = pecan.request.dbapi.node_get(node_id=rpc_mem.node_id)
|
||||||
|
numa_node = node.numa_node
|
||||||
|
msg = _('Processor {0}:').format(numa_node) + e.message
|
||||||
|
raise wsme.exc.ClientSideError(msg)
|
||||||
|
|
||||||
|
# Semantics checks for platform memory
|
||||||
|
_check_memory(rpc_mem, host_id, platform_reserved_mib,
|
||||||
|
vm_hugepages_nr_2M_pending, vm_hugepages_nr_1G_pending)
|
||||||
|
|
||||||
|
# only allow patching allocated_function and capabilities
|
||||||
|
# replace ihost_uuid and node_uuid with corresponding
|
||||||
|
patch_obj = jsonpatch.JsonPatch(patch)
|
||||||
|
|
||||||
|
for p in patch_obj:
|
||||||
|
if p['path'] == '/ihost_uuid':
|
||||||
|
p['path'] = '/host_id'
|
||||||
|
ihost = objects.Host.get_by_uuid(pecan.request.context,
|
||||||
|
p['value'])
|
||||||
|
p['value'] = ihost.id
|
||||||
|
|
||||||
|
if p['path'] == '/node_uuid':
|
||||||
|
p['path'] = '/node_id'
|
||||||
|
try:
|
||||||
|
node = objects.Node.get_by_uuid(
|
||||||
|
pecan.request.context, p['value'])
|
||||||
|
p['value'] = node.id
|
||||||
|
except exception.InventoryException:
|
||||||
|
p['value'] = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
memory = Memory(**jsonpatch.apply_patch(rpc_mem.as_dict(),
|
||||||
|
patch_obj))
|
||||||
|
|
||||||
|
except utils.JSONPATCH_EXCEPTIONS as e:
|
||||||
|
raise exception.PatchError(patch=patch, reason=e)
|
||||||
|
|
||||||
|
# Update only the fields that have changed
|
||||||
|
for field in objects.Memory.fields:
|
||||||
|
if rpc_mem[field] != getattr(memory, field):
|
||||||
|
rpc_mem[field] = getattr(memory, field)
|
||||||
|
|
||||||
|
rpc_mem.save()
|
||||||
|
return Memory.convert_with_links(rpc_mem)
|
||||||
|
|
||||||
|
@cutils.synchronized(LOCK_NAME)
|
||||||
|
@wsme_pecan.wsexpose(None, types.uuid, status_code=204)
|
||||||
|
def delete(self, memory_uuid):
|
||||||
|
"""Delete a memory."""
|
||||||
|
if self._from_hosts:
|
||||||
|
raise exception.OperationNotPermitted
|
||||||
|
|
||||||
|
pecan.request.dbapi.imemory_destroy(memory_uuid)
|
||||||
|
|
||||||
|
##############
|
||||||
|
# UTILS
|
||||||
|
##############
|
||||||
|
|
||||||
|
|
||||||
|
def _update(mem_uuid, mem_values):
|
||||||
|
|
||||||
|
rpc_mem = objects.Memory.get_by_uuid(pecan.request.context, mem_uuid)
|
||||||
|
if 'host_id' in rpc_mem:
|
||||||
|
ihostId = rpc_mem['host_id']
|
||||||
|
else:
|
||||||
|
ihostId = rpc_mem['ihost_uuid']
|
||||||
|
|
||||||
|
host_id = pecan.request.dbapi.ihost_get(ihostId)
|
||||||
|
|
||||||
|
if 'platform_reserved_mib' in mem_values:
|
||||||
|
platform_reserved_mib = mem_values['platform_reserved_mib']
|
||||||
|
|
||||||
|
if 'vm_hugepages_nr_2M_pending' in mem_values:
|
||||||
|
vm_hugepages_nr_2M_pending = mem_values['vm_hugepages_nr_2M_pending']
|
||||||
|
|
||||||
|
if 'vm_hugepages_nr_1G_pending' in mem_values:
|
||||||
|
vm_hugepages_nr_1G_pending = mem_values['vm_hugepages_nr_1G_pending']
|
||||||
|
|
||||||
|
# The host must be locked
|
||||||
|
if host_id:
|
||||||
|
_check_host(host_id)
|
||||||
|
else:
|
||||||
|
raise wsme.exc.ClientSideError((
|
||||||
|
"Hostname or uuid must be defined"))
|
||||||
|
|
||||||
|
# Semantics checks and update hugepage memory accounting
|
||||||
|
mem_values = _check_huge_values(
|
||||||
|
rpc_mem, mem_values,
|
||||||
|
vm_hugepages_nr_2M_pending, vm_hugepages_nr_1G_pending)
|
||||||
|
|
||||||
|
# Semantics checks for platform memory
|
||||||
|
_check_memory(rpc_mem, host_id, platform_reserved_mib,
|
||||||
|
vm_hugepages_nr_2M_pending, vm_hugepages_nr_1G_pending)
|
||||||
|
|
||||||
|
# update memory values
|
||||||
|
pecan.request.dbapi.imemory_update(mem_uuid, mem_values)
|
||||||
|
|
||||||
|
|
||||||
|
def _check_host(ihost):
|
||||||
|
if utils.is_aio_simplex_host_unlocked(ihost):
|
||||||
|
raise wsme.exc.ClientSideError(_("Host must be locked."))
|
||||||
|
elif ihost['administrative'] != 'locked':
|
||||||
|
unlocked = False
|
||||||
|
current_ihosts = pecan.request.dbapi.ihost_get_list()
|
||||||
|
for h in current_ihosts:
|
||||||
|
if (h['administrative'] != 'locked' and
|
||||||
|
h['hostname'] != ihost['hostname']):
|
||||||
|
unlocked = True
|
||||||
|
if unlocked:
|
||||||
|
raise wsme.exc.ClientSideError(_("Host must be locked."))
|
||||||
|
|
||||||
|
|
||||||
|
def _check_memory(rpc_mem, ihost,
|
||||||
|
platform_reserved_mib=None,
|
||||||
|
vm_hugepages_nr_2M_pending=None,
|
||||||
|
vm_hugepages_nr_1G_pending=None):
|
||||||
|
if platform_reserved_mib:
|
||||||
|
# Check for invalid characters
|
||||||
|
try:
|
||||||
|
val = int(platform_reserved_mib)
|
||||||
|
except ValueError:
|
||||||
|
raise wsme.exc.ClientSideError((
|
||||||
|
"Platform memory must be a number"))
|
||||||
|
if val < 0:
|
||||||
|
raise wsme.exc.ClientSideError((
|
||||||
|
"Platform memory must be greater than zero"))
|
||||||
|
|
||||||
|
# Check for lower limit
|
||||||
|
node_id = rpc_mem['node_id']
|
||||||
|
node = pecan.request.dbapi.node_get(node_id)
|
||||||
|
min_platform_memory = \
|
||||||
|
cutils.get_minimum_platform_reserved_memory(ihost, node.numa_node)
|
||||||
|
if int(platform_reserved_mib) < min_platform_memory:
|
||||||
|
raise wsme.exc.ClientSideError(
|
||||||
|
_("Platform reserved memory for numa node {} "
|
||||||
|
"must be greater than the minimum value {}").format(
|
||||||
|
(node.numa_node, min_platform_memory)))
|
||||||
|
|
||||||
|
# Check if it is within 2/3 percent of the total memory
|
||||||
|
node_memtotal_mib = rpc_mem['node_memtotal_mib']
|
||||||
|
max_platform_reserved = node_memtotal_mib * 2 / 3
|
||||||
|
if int(platform_reserved_mib) > max_platform_reserved:
|
||||||
|
low_core = cutils.is_low_core_system(ihost, pecan.request.dbapi)
|
||||||
|
required_platform_reserved = \
|
||||||
|
cutils.get_required_platform_reserved_memory(
|
||||||
|
ihost, node.numa_node, low_core)
|
||||||
|
msg_platform_over = (
|
||||||
|
_("Platform reserved memory {} MiB on node {} "
|
||||||
|
"is not within range [{}, {}]").format(
|
||||||
|
(int(platform_reserved_mib),
|
||||||
|
node.numa_node,
|
||||||
|
required_platform_reserved,
|
||||||
|
max_platform_reserved)))
|
||||||
|
|
||||||
|
if cutils.is_virtual() or cutils.is_virtual_compute(ihost):
|
||||||
|
LOG.warn(msg_platform_over)
|
||||||
|
else:
|
||||||
|
raise wsme.exc.ClientSideError(msg_platform_over)
|
||||||
|
|
||||||
|
# Check if it is within the total amount of memory
|
||||||
|
mem_alloc = 0
|
||||||
|
if vm_hugepages_nr_2M_pending:
|
||||||
|
mem_alloc += int(vm_hugepages_nr_2M_pending) * 2
|
||||||
|
elif rpc_mem['vm_hugepages_nr_2M']:
|
||||||
|
mem_alloc += int(rpc_mem['vm_hugepages_nr_2M']) * 2
|
||||||
|
if vm_hugepages_nr_1G_pending:
|
||||||
|
mem_alloc += int(vm_hugepages_nr_1G_pending) * 1000
|
||||||
|
elif rpc_mem['vm_hugepages_nr_1G']:
|
||||||
|
mem_alloc += int(rpc_mem['vm_hugepages_nr_1G']) * 1000
|
||||||
|
LOG.debug("vm total=%s" % (mem_alloc))
|
||||||
|
|
||||||
|
vs_hp_size = rpc_mem['vswitch_hugepages_size_mib']
|
||||||
|
vs_hp_nr = rpc_mem['vswitch_hugepages_nr']
|
||||||
|
mem_alloc += vs_hp_size * vs_hp_nr
|
||||||
|
LOG.debug("vs_hp_nr=%s vs_hp_size=%s" % (vs_hp_nr, vs_hp_size))
|
||||||
|
LOG.debug("memTotal %s mem_alloc %s" % (node_memtotal_mib, mem_alloc))
|
||||||
|
|
||||||
|
# Initial configuration defaults mem_alloc to consume 100% of 2M pages,
|
||||||
|
# so we may marginally exceed available non-huge memory.
|
||||||
|
# Note there will be some variability in total available memory,
|
||||||
|
# so we need to allow some tolerance so we do not hit the limit.
|
||||||
|
avail = node_memtotal_mib - mem_alloc
|
||||||
|
delta = int(platform_reserved_mib) - avail
|
||||||
|
mem_thresh = 32
|
||||||
|
if int(platform_reserved_mib) > avail + mem_thresh:
|
||||||
|
msg = (_("Platform reserved memory {} MiB exceeds {} MiB "
|
||||||
|
"available by {} MiB (2M: {} pages; 1G: {} pages). "
|
||||||
|
"total memory={} MiB, allocated={} MiB.").format(
|
||||||
|
(platform_reserved_mib, avail,
|
||||||
|
delta, delta / 2, delta / 1024,
|
||||||
|
node_memtotal_mib, mem_alloc)))
|
||||||
|
raise wsme.exc.ClientSideError(msg)
|
||||||
|
else:
|
||||||
|
msg = (_("Platform reserved memory {} MiB, {} MiB available, "
|
||||||
|
"total memory={} MiB, allocated={} MiB.").format(
|
||||||
|
platform_reserved_mib, avail,
|
||||||
|
node_memtotal_mib, mem_alloc))
|
||||||
|
LOG.info(msg)
|
||||||
|
|
||||||
|
|
||||||
|
def _check_huge_values(rpc_mem, patch, vm_hugepages_nr_2M=None,
|
||||||
|
vm_hugepages_nr_1G=None):
|
||||||
|
|
||||||
|
if rpc_mem['vm_hugepages_use_1G'] == 'False' and vm_hugepages_nr_1G:
|
||||||
|
# cannot provision 1G huge pages if the processor does not support them
|
||||||
|
raise wsme.exc.ClientSideError(_(
|
||||||
|
"Processor does not support 1G huge pages."))
|
||||||
|
|
||||||
|
# Check for invalid characters
|
||||||
|
if vm_hugepages_nr_2M:
|
||||||
|
try:
|
||||||
|
val = int(vm_hugepages_nr_2M)
|
||||||
|
except ValueError:
|
||||||
|
raise wsme.exc.ClientSideError(_(
|
||||||
|
"VM huge pages 2M must be a number"))
|
||||||
|
if int(vm_hugepages_nr_2M) < 0:
|
||||||
|
raise wsme.exc.ClientSideError(_(
|
||||||
|
"VM huge pages 2M must be greater than or equal to zero"))
|
||||||
|
|
||||||
|
if vm_hugepages_nr_1G:
|
||||||
|
try:
|
||||||
|
val = int(vm_hugepages_nr_1G)
|
||||||
|
except ValueError:
|
||||||
|
raise wsme.exc.ClientSideError(_(
|
||||||
|
"VM huge pages 1G must be a number"))
|
||||||
|
if val < 0:
|
||||||
|
raise wsme.exc.ClientSideError(_(
|
||||||
|
"VM huge pages 1G must be greater than or equal to zero"))
|
||||||
|
|
||||||
|
# Check to make sure that the huge pages aren't over committed
|
||||||
|
if rpc_mem['vm_hugepages_possible_2M'] is None and vm_hugepages_nr_2M:
|
||||||
|
raise wsme.exc.ClientSideError(_(
|
||||||
|
"No available space for 2M huge page allocation"))
|
||||||
|
|
||||||
|
if rpc_mem['vm_hugepages_possible_1G'] is None and vm_hugepages_nr_1G:
|
||||||
|
raise wsme.exc.ClientSideError(_(
|
||||||
|
"No available space for 1G huge page allocation"))
|
||||||
|
|
||||||
|
# Update the number of available huge pages
|
||||||
|
num_2M_for_1G = 512
|
||||||
|
|
||||||
|
# None == unchanged
|
||||||
|
if vm_hugepages_nr_1G is not None:
|
||||||
|
new_1G_pages = int(vm_hugepages_nr_1G)
|
||||||
|
elif rpc_mem['vm_hugepages_nr_1G_pending']:
|
||||||
|
new_1G_pages = int(rpc_mem['vm_hugepages_nr_1G_pending'])
|
||||||
|
elif rpc_mem['vm_hugepages_nr_1G']:
|
||||||
|
new_1G_pages = int(rpc_mem['vm_hugepages_nr_1G'])
|
||||||
|
else:
|
||||||
|
new_1G_pages = 0
|
||||||
|
|
||||||
|
# None == unchanged
|
||||||
|
if vm_hugepages_nr_2M is not None:
|
||||||
|
new_2M_pages = int(vm_hugepages_nr_2M)
|
||||||
|
elif rpc_mem['vm_hugepages_nr_2M_pending']:
|
||||||
|
new_2M_pages = int(rpc_mem['vm_hugepages_nr_2M_pending'])
|
||||||
|
elif rpc_mem['vm_hugepages_nr_2M']:
|
||||||
|
new_2M_pages = int(rpc_mem['vm_hugepages_nr_2M'])
|
||||||
|
else:
|
||||||
|
new_2M_pages = 0
|
||||||
|
|
||||||
|
LOG.debug('new 2M pages: %s, 1G pages: %s' % (new_2M_pages, new_1G_pages))
|
||||||
|
vm_possible_2M = 0
|
||||||
|
vm_possible_1G = 0
|
||||||
|
if rpc_mem['vm_hugepages_possible_2M']:
|
||||||
|
vm_possible_2M = int(rpc_mem['vm_hugepages_possible_2M'])
|
||||||
|
|
||||||
|
if rpc_mem['vm_hugepages_possible_1G']:
|
||||||
|
vm_possible_1G = int(rpc_mem['vm_hugepages_possible_1G'])
|
||||||
|
|
||||||
|
LOG.debug("max possible 2M pages: %s, max possible 1G pages: %s" %
|
||||||
|
(vm_possible_2M, vm_possible_1G))
|
||||||
|
|
||||||
|
if vm_possible_2M < new_2M_pages:
|
||||||
|
msg = _("No available space for 2M huge page allocation, "
|
||||||
|
"max 2M pages: %d") % vm_possible_2M
|
||||||
|
raise wsme.exc.ClientSideError(msg)
|
||||||
|
|
||||||
|
if vm_possible_1G < new_1G_pages:
|
||||||
|
msg = _("No available space for 1G huge page allocation, "
|
||||||
|
"max 1G pages: %d") % vm_possible_1G
|
||||||
|
raise wsme.exc.ClientSideError(msg)
|
||||||
|
|
||||||
|
# always use vm_possible_2M to compare,
|
||||||
|
if vm_possible_2M < (new_2M_pages + new_1G_pages * num_2M_for_1G):
|
||||||
|
max_1G = int((vm_possible_2M - new_2M_pages) / num_2M_for_1G)
|
||||||
|
max_2M = vm_possible_2M - new_1G_pages * num_2M_for_1G
|
||||||
|
if new_2M_pages > 0 and new_1G_pages > 0:
|
||||||
|
msg = _("No available space for new settings."
|
||||||
|
"Max 1G pages is {} when 2M is {}, or "
|
||||||
|
"Max 2M pages is %s when 1G is {}.").format(
|
||||||
|
max_1G, new_2M_pages, max_2M, new_1G_pages)
|
||||||
|
elif new_1G_pages > 0:
|
||||||
|
msg = _("No available space for 1G huge page allocation, "
|
||||||
|
"max 1G pages: %d") % vm_possible_1G
|
||||||
|
else:
|
||||||
|
msg = _("No available space for 2M huge page allocation, "
|
||||||
|
"max 2M pages: %d") % vm_possible_2M
|
||||||
|
|
||||||
|
raise wsme.exc.ClientSideError(msg)
|
||||||
|
|
||||||
|
return patch
|
261
inventory/inventory/inventory/api/controllers/v1/node.py
Normal file
261
inventory/inventory/inventory/api/controllers/v1/node.py
Normal file
@ -0,0 +1,261 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
#
|
||||||
|
# Copyright 2013 UnitedStack Inc.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
# Copyright (c) 2013-2016 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
import pecan
|
||||||
|
from pecan import rest
|
||||||
|
|
||||||
|
from wsme import types as wtypes
|
||||||
|
import wsmeext.pecan as wsme_pecan
|
||||||
|
|
||||||
|
from inventory.api.controllers.v1 import base
|
||||||
|
from inventory.api.controllers.v1 import collection
|
||||||
|
from inventory.api.controllers.v1 import cpu
|
||||||
|
from inventory.api.controllers.v1 import link
|
||||||
|
from inventory.api.controllers.v1 import memory
|
||||||
|
from inventory.api.controllers.v1 import port
|
||||||
|
from inventory.api.controllers.v1 import types
|
||||||
|
from inventory.api.controllers.v1 import utils
|
||||||
|
from inventory.common import exception
|
||||||
|
from inventory.common.i18n import _
|
||||||
|
from inventory import objects
|
||||||
|
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class NodePatchType(types.JsonPatchType):
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def mandatory_attrs():
|
||||||
|
return ['/address', '/host_uuid']
|
||||||
|
|
||||||
|
|
||||||
|
class Node(base.APIBase):
|
||||||
|
"""API representation of a host node.
|
||||||
|
|
||||||
|
This class enforces type checking and value constraints, and converts
|
||||||
|
between the internal object model and the API representation of
|
||||||
|
an node.
|
||||||
|
"""
|
||||||
|
|
||||||
|
uuid = types.uuid
|
||||||
|
"Unique UUID for this node"
|
||||||
|
|
||||||
|
numa_node = int
|
||||||
|
"numa node zone for this node"
|
||||||
|
|
||||||
|
capabilities = {wtypes.text: utils.ValidTypes(wtypes.text,
|
||||||
|
six.integer_types)}
|
||||||
|
"This node's meta data"
|
||||||
|
|
||||||
|
host_id = int
|
||||||
|
"The hostid that this node belongs to"
|
||||||
|
|
||||||
|
host_uuid = types.uuid
|
||||||
|
"The UUID of the host this node belongs to"
|
||||||
|
|
||||||
|
links = [link.Link]
|
||||||
|
"A list containing a self link and associated node links"
|
||||||
|
|
||||||
|
icpus = [link.Link]
|
||||||
|
"Links to the collection of cpus on this node"
|
||||||
|
|
||||||
|
imemorys = [link.Link]
|
||||||
|
"Links to the collection of memorys on this node"
|
||||||
|
|
||||||
|
ports = [link.Link]
|
||||||
|
"Links to the collection of ports on this node"
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self.fields = objects.Node.fields.keys()
|
||||||
|
for k in self.fields:
|
||||||
|
setattr(self, k, kwargs.get(k))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def convert_with_links(cls, rpc_node, expand=True):
|
||||||
|
minimum_fields = ['uuid', 'numa_node', 'capabilities',
|
||||||
|
'host_uuid', 'host_id',
|
||||||
|
'created_at'] if not expand else None
|
||||||
|
fields = minimum_fields if not expand else None
|
||||||
|
|
||||||
|
node = Node.from_rpc_object(rpc_node, fields)
|
||||||
|
|
||||||
|
# never expose the host_id attribute
|
||||||
|
node.host_id = wtypes.Unset
|
||||||
|
|
||||||
|
node.links = [link.Link.make_link('self', pecan.request.host_url,
|
||||||
|
'nodes', node.uuid),
|
||||||
|
link.Link.make_link('bookmark',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'nodes', node.uuid,
|
||||||
|
bookmark=True)
|
||||||
|
]
|
||||||
|
if expand:
|
||||||
|
node.icpus = [link.Link.make_link('self',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'nodes',
|
||||||
|
node.uuid + "/cpus"),
|
||||||
|
link.Link.make_link('bookmark',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'nodes',
|
||||||
|
node.uuid + "/cpus",
|
||||||
|
bookmark=True)
|
||||||
|
]
|
||||||
|
|
||||||
|
node.imemorys = [link.Link.make_link('self',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'nodes',
|
||||||
|
node.uuid + "/memorys"),
|
||||||
|
link.Link.make_link('bookmark',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'nodes',
|
||||||
|
node.uuid + "/memorys",
|
||||||
|
bookmark=True)
|
||||||
|
]
|
||||||
|
|
||||||
|
node.ports = [link.Link.make_link('self',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'nodes',
|
||||||
|
node.uuid + "/ports"),
|
||||||
|
link.Link.make_link('bookmark',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'nodes',
|
||||||
|
node.uuid + "/ports",
|
||||||
|
bookmark=True)
|
||||||
|
]
|
||||||
|
|
||||||
|
return node
|
||||||
|
|
||||||
|
|
||||||
|
class NodeCollection(collection.Collection):
|
||||||
|
"""API representation of a collection of nodes."""
|
||||||
|
|
||||||
|
nodes = [Node]
|
||||||
|
"A list containing node objects"
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self._type = 'nodes'
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def convert_with_links(cls, rpc_nodes, limit, url=None,
|
||||||
|
expand=False, **kwargs):
|
||||||
|
collection = NodeCollection()
|
||||||
|
collection.nodes = [Node.convert_with_links(p, expand)
|
||||||
|
for p in rpc_nodes]
|
||||||
|
collection.next = collection.get_next(limit, url=url, **kwargs)
|
||||||
|
return collection
|
||||||
|
|
||||||
|
|
||||||
|
LOCK_NAME = 'NodeController'
|
||||||
|
|
||||||
|
|
||||||
|
class NodeController(rest.RestController):
|
||||||
|
"""REST controller for nodes."""
|
||||||
|
|
||||||
|
icpus = cpu.CPUController(from_node=True)
|
||||||
|
"Expose cpus as a sub-element of nodes"
|
||||||
|
|
||||||
|
imemorys = memory.MemoryController(from_node=True)
|
||||||
|
"Expose memorys as a sub-element of nodes"
|
||||||
|
|
||||||
|
ports = port.PortController(from_node=True)
|
||||||
|
"Expose ports as a sub-element of nodes"
|
||||||
|
|
||||||
|
_custom_actions = {
|
||||||
|
'detail': ['GET'],
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(self, from_hosts=False):
|
||||||
|
self._from_hosts = from_hosts
|
||||||
|
|
||||||
|
def _get_nodes_collection(self, host_uuid, marker, limit, sort_key,
|
||||||
|
sort_dir, expand=False, resource_url=None):
|
||||||
|
if self._from_hosts and not host_uuid:
|
||||||
|
raise exception.InvalidParameterValue(_(
|
||||||
|
"Host id not specified."))
|
||||||
|
|
||||||
|
limit = utils.validate_limit(limit)
|
||||||
|
sort_dir = utils.validate_sort_dir(sort_dir)
|
||||||
|
|
||||||
|
marker_obj = None
|
||||||
|
if marker:
|
||||||
|
marker_obj = objects.Node.get_by_uuid(pecan.request.context,
|
||||||
|
marker)
|
||||||
|
|
||||||
|
if host_uuid:
|
||||||
|
nodes = objects.Node.get_by_host(pecan.request.context,
|
||||||
|
host_uuid,
|
||||||
|
limit,
|
||||||
|
marker=marker_obj,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
else:
|
||||||
|
nodes = objects.Node.list(pecan.request.context,
|
||||||
|
limit,
|
||||||
|
marker=marker_obj,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
|
||||||
|
return NodeCollection.convert_with_links(nodes, limit,
|
||||||
|
url=resource_url,
|
||||||
|
expand=expand,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(NodeCollection,
|
||||||
|
types.uuid, types.uuid, int, wtypes.text, wtypes.text)
|
||||||
|
def get_all(self, host_uuid=None, marker=None, limit=None,
|
||||||
|
sort_key='id', sort_dir='asc'):
|
||||||
|
"""Retrieve a list of nodes."""
|
||||||
|
|
||||||
|
return self._get_nodes_collection(host_uuid, marker, limit,
|
||||||
|
sort_key, sort_dir)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(NodeCollection, types.uuid, types.uuid, int,
|
||||||
|
wtypes.text, wtypes.text)
|
||||||
|
def detail(self, host_uuid=None, marker=None, limit=None,
|
||||||
|
sort_key='id', sort_dir='asc'):
|
||||||
|
"""Retrieve a list of nodes with detail."""
|
||||||
|
# NOTE(lucasagomes): /detail should only work agaist collections
|
||||||
|
parent = pecan.request.path.split('/')[:-1][-1]
|
||||||
|
if parent != "nodes":
|
||||||
|
raise exception.HTTPNotFound
|
||||||
|
|
||||||
|
expand = True
|
||||||
|
resource_url = '/'.join(['nodes', 'detail'])
|
||||||
|
return self._get_nodes_collection(host_uuid,
|
||||||
|
marker, limit,
|
||||||
|
sort_key, sort_dir,
|
||||||
|
expand, resource_url)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(Node, types.uuid)
|
||||||
|
def get_one(self, node_uuid):
|
||||||
|
"""Retrieve information about the given node."""
|
||||||
|
|
||||||
|
if self._from_hosts:
|
||||||
|
raise exception.OperationNotPermitted
|
||||||
|
|
||||||
|
rpc_node = objects.Node.get_by_uuid(pecan.request.context, node_uuid)
|
||||||
|
return Node.convert_with_links(rpc_node)
|
313
inventory/inventory/inventory/api/controllers/v1/pci_device.py
Normal file
313
inventory/inventory/inventory/api/controllers/v1/pci_device.py
Normal file
@ -0,0 +1,313 @@
|
|||||||
|
# Copyright (c) 2015-2016 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
import jsonpatch
|
||||||
|
import pecan
|
||||||
|
from pecan import rest
|
||||||
|
import wsme
|
||||||
|
from wsme import types as wtypes
|
||||||
|
import wsmeext.pecan as wsme_pecan
|
||||||
|
|
||||||
|
from inventory.api.controllers.v1 import base
|
||||||
|
from inventory.api.controllers.v1 import collection
|
||||||
|
from inventory.api.controllers.v1 import link
|
||||||
|
from inventory.api.controllers.v1 import types
|
||||||
|
from inventory.api.controllers.v1 import utils
|
||||||
|
from inventory.common import exception
|
||||||
|
from inventory.common.i18n import _
|
||||||
|
from inventory.common import k_host
|
||||||
|
from inventory.common import utils as cutils
|
||||||
|
from inventory import objects
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class PCIDevicePatchType(types.JsonPatchType):
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def mandatory_attrs():
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
class PCIDevice(base.APIBase):
|
||||||
|
"""API representation of an PCI device
|
||||||
|
|
||||||
|
This class enforces type checking and value constraints, and converts
|
||||||
|
between the internal object model and the API representation of an
|
||||||
|
Pci Device .
|
||||||
|
"""
|
||||||
|
|
||||||
|
uuid = types.uuid
|
||||||
|
"Unique UUID for this device"
|
||||||
|
|
||||||
|
type = wtypes.text
|
||||||
|
"Represent the type of device"
|
||||||
|
|
||||||
|
name = wtypes.text
|
||||||
|
"Represent the name of the device. Unique per host"
|
||||||
|
|
||||||
|
pciaddr = wtypes.text
|
||||||
|
"Represent the pci address of the device"
|
||||||
|
|
||||||
|
pclass_id = wtypes.text
|
||||||
|
"Represent the numerical pci class of the device"
|
||||||
|
|
||||||
|
pvendor_id = wtypes.text
|
||||||
|
"Represent the numerical pci vendor of the device"
|
||||||
|
|
||||||
|
pdevice_id = wtypes.text
|
||||||
|
"Represent the numerical pci device of the device"
|
||||||
|
|
||||||
|
pclass = wtypes.text
|
||||||
|
"Represent the pci class description of the device"
|
||||||
|
|
||||||
|
pvendor = wtypes.text
|
||||||
|
"Represent the pci vendor description of the device"
|
||||||
|
|
||||||
|
pdevice = wtypes.text
|
||||||
|
"Represent the pci device description of the device"
|
||||||
|
|
||||||
|
psvendor = wtypes.text
|
||||||
|
"Represent the pci svendor of the device"
|
||||||
|
|
||||||
|
psdevice = wtypes.text
|
||||||
|
"Represent the pci sdevice of the device"
|
||||||
|
|
||||||
|
numa_node = int
|
||||||
|
"Represent the numa node or zone sdevice of the device"
|
||||||
|
|
||||||
|
sriov_totalvfs = int
|
||||||
|
"The total number of available SR-IOV VFs"
|
||||||
|
|
||||||
|
sriov_numvfs = int
|
||||||
|
"The number of configured SR-IOV VFs"
|
||||||
|
|
||||||
|
sriov_vfs_pci_address = wtypes.text
|
||||||
|
"The PCI Addresses of the VFs"
|
||||||
|
|
||||||
|
driver = wtypes.text
|
||||||
|
"The kernel driver for this device"
|
||||||
|
|
||||||
|
extra_info = wtypes.text
|
||||||
|
"Extra information for this device"
|
||||||
|
|
||||||
|
host_id = int
|
||||||
|
"Represent the host_id the device belongs to"
|
||||||
|
|
||||||
|
host_uuid = types.uuid
|
||||||
|
"Represent the UUID of the host the device belongs to"
|
||||||
|
|
||||||
|
enabled = types.boolean
|
||||||
|
"Represent the enabled status of the device"
|
||||||
|
|
||||||
|
links = [link.Link]
|
||||||
|
"Represent a list containing a self link and associated device links"
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self.fields = objects.PCIDevice.fields.keys()
|
||||||
|
for k in self.fields:
|
||||||
|
setattr(self, k, kwargs.get(k))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def convert_with_links(cls, rpc_device, expand=True):
|
||||||
|
device = PCIDevice(**rpc_device.as_dict())
|
||||||
|
if not expand:
|
||||||
|
device.unset_fields_except(['uuid', 'host_id',
|
||||||
|
'name', 'pciaddr', 'pclass_id',
|
||||||
|
'pvendor_id', 'pdevice_id', 'pclass',
|
||||||
|
'pvendor', 'pdevice', 'psvendor',
|
||||||
|
'psdevice', 'numa_node',
|
||||||
|
'sriov_totalvfs', 'sriov_numvfs',
|
||||||
|
'sriov_vfs_pci_address', 'driver',
|
||||||
|
'host_uuid', 'enabled',
|
||||||
|
'created_at', 'updated_at'])
|
||||||
|
|
||||||
|
# do not expose the id attribute
|
||||||
|
device.host_id = wtypes.Unset
|
||||||
|
device.node_id = wtypes.Unset
|
||||||
|
|
||||||
|
device.links = [link.Link.make_link('self', pecan.request.host_url,
|
||||||
|
'pci_devices', device.uuid),
|
||||||
|
link.Link.make_link('bookmark',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'pci_devices', device.uuid,
|
||||||
|
bookmark=True)
|
||||||
|
]
|
||||||
|
return device
|
||||||
|
|
||||||
|
|
||||||
|
class PCIDeviceCollection(collection.Collection):
|
||||||
|
"""API representation of a collection of PciDevice objects."""
|
||||||
|
|
||||||
|
pci_devices = [PCIDevice]
|
||||||
|
"A list containing PciDevice objects"
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self._type = 'pci_devices'
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def convert_with_links(cls, rpc_devices, limit, url=None,
|
||||||
|
expand=False, **kwargs):
|
||||||
|
collection = PCIDeviceCollection()
|
||||||
|
collection.pci_devices = [PCIDevice.convert_with_links(d, expand)
|
||||||
|
for d in rpc_devices]
|
||||||
|
collection.next = collection.get_next(limit, url=url, **kwargs)
|
||||||
|
return collection
|
||||||
|
|
||||||
|
|
||||||
|
LOCK_NAME = 'PCIDeviceController'
|
||||||
|
|
||||||
|
|
||||||
|
class PCIDeviceController(rest.RestController):
|
||||||
|
"""REST controller for PciDevices."""
|
||||||
|
|
||||||
|
_custom_actions = {
|
||||||
|
'detail': ['GET'],
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(self, from_hosts=False):
|
||||||
|
self._from_hosts = from_hosts
|
||||||
|
|
||||||
|
def _get_pci_devices_collection(self, uuid, marker, limit, sort_key,
|
||||||
|
sort_dir, expand=False, resource_url=None):
|
||||||
|
if self._from_hosts and not uuid:
|
||||||
|
raise exception.InvalidParameterValue(_(
|
||||||
|
"Host id not specified."))
|
||||||
|
|
||||||
|
limit = utils.validate_limit(limit)
|
||||||
|
sort_dir = utils.validate_sort_dir(sort_dir)
|
||||||
|
marker_obj = None
|
||||||
|
if marker:
|
||||||
|
marker_obj = objects.PCIDevice.get_by_uuid(
|
||||||
|
pecan.request.context,
|
||||||
|
marker)
|
||||||
|
if self._from_hosts:
|
||||||
|
# devices = pecan.request.dbapi.pci_device_get_by_host(
|
||||||
|
devices = objects.PCIDevice.get_by_host(
|
||||||
|
pecan.request.context,
|
||||||
|
uuid,
|
||||||
|
limit,
|
||||||
|
marker_obj,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
else:
|
||||||
|
if uuid:
|
||||||
|
# devices = pecan.request.dbapi.pci_device_get_by_host(
|
||||||
|
devices = objects.PCIDevice.get_by_host(
|
||||||
|
pecan.request.context,
|
||||||
|
uuid,
|
||||||
|
limit,
|
||||||
|
marker_obj,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
else:
|
||||||
|
# devices = pecan.request.dbapi.pci_device_get_list(
|
||||||
|
devices = objects.PCIDevice.list(
|
||||||
|
pecan.request.context,
|
||||||
|
limit,
|
||||||
|
marker_obj,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
|
||||||
|
return PCIDeviceCollection.convert_with_links(devices, limit,
|
||||||
|
url=resource_url,
|
||||||
|
expand=expand,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(PCIDeviceCollection, types.uuid, types.uuid,
|
||||||
|
int, wtypes.text, wtypes.text)
|
||||||
|
def get_all(self, uuid=None, marker=None, limit=None,
|
||||||
|
sort_key='id', sort_dir='asc'):
|
||||||
|
"""Retrieve a list of devices."""
|
||||||
|
return self._get_pci_devices_collection(
|
||||||
|
uuid, marker, limit, sort_key, sort_dir)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(PCIDeviceCollection, types.uuid, types.uuid, int,
|
||||||
|
wtypes.text, wtypes.text)
|
||||||
|
def detail(self, uuid=None, marker=None, limit=None,
|
||||||
|
sort_key='id', sort_dir='asc'):
|
||||||
|
"""Retrieve a list of devices with detail."""
|
||||||
|
|
||||||
|
# NOTE: /detail should only work against collections
|
||||||
|
parent = pecan.request.path.split('/')[:-1][-1]
|
||||||
|
if parent != "pci_devices":
|
||||||
|
raise exception.HTTPNotFound
|
||||||
|
|
||||||
|
expand = True
|
||||||
|
resource_url = '/'.join(['pci_devices', 'detail'])
|
||||||
|
return self._get_pci_devices_collection(uuid, marker, limit, sort_key,
|
||||||
|
sort_dir, expand, resource_url)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(PCIDevice, types.uuid)
|
||||||
|
def get_one(self, device_uuid):
|
||||||
|
"""Retrieve information about the given device."""
|
||||||
|
if self._from_hosts:
|
||||||
|
raise exception.OperationNotPermitted
|
||||||
|
|
||||||
|
rpc_device = objects.PCIDevice.get_by_uuid(
|
||||||
|
pecan.request.context, device_uuid)
|
||||||
|
return PCIDevice.convert_with_links(rpc_device)
|
||||||
|
|
||||||
|
@cutils.synchronized(LOCK_NAME)
|
||||||
|
@wsme.validate(types.uuid, [PCIDevicePatchType])
|
||||||
|
@wsme_pecan.wsexpose(PCIDevice, types.uuid,
|
||||||
|
body=[PCIDevicePatchType])
|
||||||
|
def patch(self, device_uuid, patch):
|
||||||
|
"""Update an existing device."""
|
||||||
|
if self._from_hosts:
|
||||||
|
raise exception.OperationNotPermitted
|
||||||
|
|
||||||
|
rpc_device = objects.PCIDevice.get_by_uuid(
|
||||||
|
pecan.request.context, device_uuid)
|
||||||
|
|
||||||
|
# replace host_uuid and with corresponding
|
||||||
|
patch_obj = jsonpatch.JsonPatch(patch)
|
||||||
|
for p in patch_obj:
|
||||||
|
if p['path'] == '/host_uuid':
|
||||||
|
p['path'] = '/host_id'
|
||||||
|
host = objects.Host.get_by_uuid(pecan.request.context,
|
||||||
|
p['value'])
|
||||||
|
p['value'] = host.id
|
||||||
|
|
||||||
|
try:
|
||||||
|
device = PCIDevice(**jsonpatch.apply_patch(rpc_device.as_dict(),
|
||||||
|
patch_obj))
|
||||||
|
|
||||||
|
except utils.JSONPATCH_EXCEPTIONS as e:
|
||||||
|
raise exception.PatchError(patch=patch, reason=e)
|
||||||
|
|
||||||
|
# Semantic checks
|
||||||
|
host = objects.Host.get_by_uuid(pecan.request.context,
|
||||||
|
device.host_id)
|
||||||
|
_check_host(host)
|
||||||
|
|
||||||
|
# Update fields that have changed
|
||||||
|
for field in objects.PCIDevice.fields:
|
||||||
|
if rpc_device[field] != getattr(device, field):
|
||||||
|
_check_field(field)
|
||||||
|
rpc_device[field] = getattr(device, field)
|
||||||
|
|
||||||
|
rpc_device.save()
|
||||||
|
return PCIDevice.convert_with_links(rpc_device)
|
||||||
|
|
||||||
|
|
||||||
|
def _check_host(host):
|
||||||
|
if utils.is_aio_simplex_host_unlocked(host):
|
||||||
|
raise wsme.exc.ClientSideError(_('Host must be locked.'))
|
||||||
|
elif host.administrative != k_host.ADMIN_LOCKED and not \
|
||||||
|
utils.is_host_simplex_controller(host):
|
||||||
|
raise wsme.exc.ClientSideError(_('Host must be locked.'))
|
||||||
|
if k_host.COMPUTE not in host.subfunctions:
|
||||||
|
raise wsme.exc.ClientSideError(
|
||||||
|
_('Can only modify compute node cores.'))
|
||||||
|
|
||||||
|
|
||||||
|
def _check_field(field):
|
||||||
|
if field not in ["enabled", "name"]:
|
||||||
|
raise wsme.exc.ClientSideError(
|
||||||
|
_('Modifying %s attribute restricted') % field)
|
334
inventory/inventory/inventory/api/controllers/v1/port.py
Normal file
334
inventory/inventory/inventory/api/controllers/v1/port.py
Normal file
@ -0,0 +1,334 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2013 UnitedStack Inc.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
# Copyright (c) 2013-2016 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
import pecan
|
||||||
|
from pecan import rest
|
||||||
|
|
||||||
|
from wsme import types as wtypes
|
||||||
|
import wsmeext.pecan as wsme_pecan
|
||||||
|
|
||||||
|
from inventory.api.controllers.v1 import base
|
||||||
|
from inventory.api.controllers.v1 import collection
|
||||||
|
from inventory.api.controllers.v1 import link
|
||||||
|
from inventory.api.controllers.v1 import lldp_agent
|
||||||
|
from inventory.api.controllers.v1 import lldp_neighbour
|
||||||
|
from inventory.api.controllers.v1 import types
|
||||||
|
from inventory.api.controllers.v1 import utils
|
||||||
|
from inventory.common import exception
|
||||||
|
from inventory.common.i18n import _
|
||||||
|
from inventory import objects
|
||||||
|
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class PortPatchType(types.JsonPatchType):
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def mandatory_attrs():
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
class Port(base.APIBase):
|
||||||
|
"""API representation of a host port
|
||||||
|
|
||||||
|
This class enforces type checking and value constraints, and converts
|
||||||
|
between the internal object model and the API representation of an
|
||||||
|
port.
|
||||||
|
"""
|
||||||
|
uuid = types.uuid
|
||||||
|
"Unique UUID for this port"
|
||||||
|
|
||||||
|
type = wtypes.text
|
||||||
|
"Represent the type of port"
|
||||||
|
|
||||||
|
name = wtypes.text
|
||||||
|
"Represent the name of the port. Unique per host"
|
||||||
|
|
||||||
|
namedisplay = wtypes.text
|
||||||
|
"Represent the display name of the port. Unique per host"
|
||||||
|
|
||||||
|
pciaddr = wtypes.text
|
||||||
|
"Represent the pci address of the port"
|
||||||
|
|
||||||
|
dev_id = int
|
||||||
|
"The unique identifier of PCI device"
|
||||||
|
|
||||||
|
pclass = wtypes.text
|
||||||
|
"Represent the pci class of the port"
|
||||||
|
|
||||||
|
pvendor = wtypes.text
|
||||||
|
"Represent the pci vendor of the port"
|
||||||
|
|
||||||
|
pdevice = wtypes.text
|
||||||
|
"Represent the pci device of the port"
|
||||||
|
|
||||||
|
psvendor = wtypes.text
|
||||||
|
"Represent the pci svendor of the port"
|
||||||
|
|
||||||
|
psdevice = wtypes.text
|
||||||
|
"Represent the pci sdevice of the port"
|
||||||
|
|
||||||
|
numa_node = int
|
||||||
|
"Represent the numa node or zone sdevice of the port"
|
||||||
|
|
||||||
|
sriov_totalvfs = int
|
||||||
|
"The total number of available SR-IOV VFs"
|
||||||
|
|
||||||
|
sriov_numvfs = int
|
||||||
|
"The number of configured SR-IOV VFs"
|
||||||
|
|
||||||
|
sriov_vfs_pci_address = wtypes.text
|
||||||
|
"The PCI Addresses of the VFs"
|
||||||
|
|
||||||
|
driver = wtypes.text
|
||||||
|
"The kernel driver for this device"
|
||||||
|
|
||||||
|
capabilities = {wtypes.text: utils.ValidTypes(wtypes.text,
|
||||||
|
six.integer_types)}
|
||||||
|
"Represent meta data of the port"
|
||||||
|
|
||||||
|
host_id = int
|
||||||
|
"Represent the host_id the port belongs to"
|
||||||
|
|
||||||
|
interface_id = int
|
||||||
|
"Represent the interface_id the port belongs to"
|
||||||
|
|
||||||
|
dpdksupport = bool
|
||||||
|
"Represent whether or not the port supports DPDK acceleration"
|
||||||
|
|
||||||
|
host_uuid = types.uuid
|
||||||
|
"Represent the UUID of the host the port belongs to"
|
||||||
|
|
||||||
|
interface_uuid = types.uuid
|
||||||
|
"Represent the UUID of the interface the port belongs to"
|
||||||
|
|
||||||
|
node_uuid = types.uuid
|
||||||
|
"Represent the UUID of the node the port belongs to"
|
||||||
|
|
||||||
|
links = [link.Link]
|
||||||
|
"Represent a list containing a self link and associated port links"
|
||||||
|
|
||||||
|
lldp_agents = [link.Link]
|
||||||
|
"Links to the collection of LldpAgents on this port"
|
||||||
|
|
||||||
|
lldp_neighbours = [link.Link]
|
||||||
|
"Links to the collection of LldpNeighbours on this port"
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self.fields = objects.Port.fields.keys()
|
||||||
|
for k in self.fields:
|
||||||
|
setattr(self, k, kwargs.get(k))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def convert_with_links(cls, rpc_port, expand=True):
|
||||||
|
port = Port(**rpc_port.as_dict())
|
||||||
|
if not expand:
|
||||||
|
port.unset_fields_except(['uuid', 'host_id', 'node_id',
|
||||||
|
'interface_id', 'type', 'name',
|
||||||
|
'namedisplay', 'pciaddr', 'dev_id',
|
||||||
|
'pclass', 'pvendor', 'pdevice',
|
||||||
|
'psvendor', 'psdevice', 'numa_node',
|
||||||
|
'sriov_totalvfs', 'sriov_numvfs',
|
||||||
|
'sriov_vfs_pci_address', 'driver',
|
||||||
|
'capabilities',
|
||||||
|
'host_uuid', 'interface_uuid',
|
||||||
|
'node_uuid', 'dpdksupport',
|
||||||
|
'created_at', 'updated_at'])
|
||||||
|
|
||||||
|
# never expose the id attribute
|
||||||
|
port.host_id = wtypes.Unset
|
||||||
|
port.interface_id = wtypes.Unset
|
||||||
|
port.node_id = wtypes.Unset
|
||||||
|
|
||||||
|
port.links = [link.Link.make_link('self', pecan.request.host_url,
|
||||||
|
'ports', port.uuid),
|
||||||
|
link.Link.make_link('bookmark',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'ports', port.uuid,
|
||||||
|
bookmark=True)
|
||||||
|
]
|
||||||
|
|
||||||
|
port.lldp_agents = [link.Link.make_link('self',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'ports',
|
||||||
|
port.uuid + "/lldp_agents"),
|
||||||
|
link.Link.make_link('bookmark',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'ports',
|
||||||
|
port.uuid + "/lldp_agents",
|
||||||
|
bookmark=True)
|
||||||
|
]
|
||||||
|
|
||||||
|
port.lldp_neighbours = [
|
||||||
|
link.Link.make_link('self',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'ports',
|
||||||
|
port.uuid + "/lldp_neighbors"),
|
||||||
|
link.Link.make_link('bookmark',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'ports',
|
||||||
|
port.uuid + "/lldp_neighbors",
|
||||||
|
bookmark=True)
|
||||||
|
]
|
||||||
|
|
||||||
|
return port
|
||||||
|
|
||||||
|
|
||||||
|
class PortCollection(collection.Collection):
|
||||||
|
"""API representation of a collection of Port objects."""
|
||||||
|
|
||||||
|
ports = [Port]
|
||||||
|
"A list containing Port objects"
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self._type = 'ports'
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def convert_with_links(cls, rpc_ports, limit, url=None,
|
||||||
|
expand=False, **kwargs):
|
||||||
|
collection = PortCollection()
|
||||||
|
collection.ports = [Port.convert_with_links(p, expand)
|
||||||
|
for p in rpc_ports]
|
||||||
|
collection.next = collection.get_next(limit, url=url, **kwargs)
|
||||||
|
return collection
|
||||||
|
|
||||||
|
|
||||||
|
class PortController(rest.RestController):
|
||||||
|
"""REST controller for Ports."""
|
||||||
|
|
||||||
|
lldp_agents = lldp_agent.LLDPAgentController(
|
||||||
|
from_ports=True)
|
||||||
|
"Expose lldp_agents as a sub-element of ports"
|
||||||
|
|
||||||
|
lldp_neighbours = lldp_neighbour.LLDPNeighbourController(
|
||||||
|
from_ports=True)
|
||||||
|
"Expose lldp_neighbours as a sub-element of ports"
|
||||||
|
|
||||||
|
_custom_actions = {
|
||||||
|
'detail': ['GET'],
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(self, from_hosts=False, from_iinterface=False,
|
||||||
|
from_node=False):
|
||||||
|
self._from_hosts = from_hosts
|
||||||
|
self._from_iinterface = from_iinterface
|
||||||
|
self._from_node = from_node
|
||||||
|
|
||||||
|
def _get_ports_collection(self, uuid, interface_uuid, node_uuid,
|
||||||
|
marker, limit, sort_key, sort_dir,
|
||||||
|
expand=False, resource_url=None):
|
||||||
|
|
||||||
|
if self._from_hosts and not uuid:
|
||||||
|
raise exception.InvalidParameterValue(_(
|
||||||
|
"Host id not specified."))
|
||||||
|
|
||||||
|
if self._from_iinterface and not uuid:
|
||||||
|
raise exception.InvalidParameterValue(_(
|
||||||
|
"Interface id not specified."))
|
||||||
|
|
||||||
|
if self._from_node and not uuid:
|
||||||
|
raise exception.InvalidParameterValue(_(
|
||||||
|
"node id not specified."))
|
||||||
|
|
||||||
|
limit = utils.validate_limit(limit)
|
||||||
|
sort_dir = utils.validate_sort_dir(sort_dir)
|
||||||
|
|
||||||
|
marker_obj = None
|
||||||
|
if marker:
|
||||||
|
marker_obj = objects.Port.get_by_uuid(
|
||||||
|
pecan.request.context,
|
||||||
|
marker)
|
||||||
|
|
||||||
|
if self._from_hosts:
|
||||||
|
ports = objects.Port.get_by_host(
|
||||||
|
pecan.request.context,
|
||||||
|
uuid, limit,
|
||||||
|
marker_obj,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
elif self._from_node:
|
||||||
|
ports = objects.Port.get_by_numa_node(
|
||||||
|
pecan.request.context,
|
||||||
|
uuid, limit,
|
||||||
|
marker_obj,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
else:
|
||||||
|
if uuid and not interface_uuid:
|
||||||
|
ports = objects.Port.get_by_host(
|
||||||
|
pecan.request.context,
|
||||||
|
uuid, limit,
|
||||||
|
marker_obj,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
else:
|
||||||
|
ports = objects.Port.list(
|
||||||
|
pecan.request.context,
|
||||||
|
limit, marker_obj,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
|
||||||
|
return PortCollection.convert_with_links(ports, limit,
|
||||||
|
url=resource_url,
|
||||||
|
expand=expand,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(PortCollection, types.uuid, types.uuid,
|
||||||
|
types.uuid, types.uuid, int, wtypes.text, wtypes.text)
|
||||||
|
def get_all(self, uuid=None, interface_uuid=None, node_uuid=None,
|
||||||
|
marker=None, limit=None, sort_key='id', sort_dir='asc'):
|
||||||
|
"""Retrieve a list of ports."""
|
||||||
|
|
||||||
|
return self._get_ports_collection(uuid,
|
||||||
|
interface_uuid,
|
||||||
|
node_uuid,
|
||||||
|
marker, limit, sort_key, sort_dir)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(PortCollection, types.uuid, types.uuid, int,
|
||||||
|
wtypes.text, wtypes.text)
|
||||||
|
def detail(self, uuid=None, marker=None, limit=None,
|
||||||
|
sort_key='id', sort_dir='asc'):
|
||||||
|
"""Retrieve a list of ports with detail."""
|
||||||
|
|
||||||
|
# NOTE(lucasagomes): /detail should only work against collections
|
||||||
|
parent = pecan.request.path.split('/')[:-1][-1]
|
||||||
|
if parent != "ports":
|
||||||
|
raise exception.HTTPNotFound
|
||||||
|
|
||||||
|
expand = True
|
||||||
|
resource_url = '/'.join(['ports', 'detail'])
|
||||||
|
return self._get_ports_collection(uuid, marker, limit, sort_key,
|
||||||
|
sort_dir, expand, resource_url)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(Port, types.uuid)
|
||||||
|
def get_one(self, port_uuid):
|
||||||
|
"""Retrieve information about the given port."""
|
||||||
|
if self._from_hosts:
|
||||||
|
raise exception.OperationNotPermitted
|
||||||
|
|
||||||
|
rpc_port = objects.Port.get_by_uuid(
|
||||||
|
pecan.request.context, port_uuid)
|
||||||
|
return Port.convert_with_links(rpc_port)
|
168
inventory/inventory/inventory/api/controllers/v1/query.py
Normal file
168
inventory/inventory/inventory/api/controllers/v1/query.py
Normal file
@ -0,0 +1,168 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
# Copyright © 2012 New Dream Network, LLC (DreamHost)
|
||||||
|
# Copyright 2013 IBM Corp.
|
||||||
|
# Copyright © 2013 eNovance <licensing@enovance.com>
|
||||||
|
# Copyright Ericsson AB 2013. All rights reserved
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
import ast
|
||||||
|
import functools
|
||||||
|
import inspect
|
||||||
|
from inventory.common.i18n import _
|
||||||
|
from oslo_log import log
|
||||||
|
from oslo_utils import strutils
|
||||||
|
from oslo_utils import timeutils
|
||||||
|
import six
|
||||||
|
import wsme
|
||||||
|
from wsme import types as wtypes
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
operation_kind = wtypes.Enum(str, 'lt', 'le', 'eq', 'ne', 'ge', 'gt')
|
||||||
|
|
||||||
|
|
||||||
|
class _Base(wtypes.Base):
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_db_model(cls, m):
|
||||||
|
return cls(**(m.as_dict()))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_db_and_links(cls, m, links):
|
||||||
|
return cls(links=links, **(m.as_dict()))
|
||||||
|
|
||||||
|
def as_dict(self, db_model):
|
||||||
|
valid_keys = inspect.getargspec(db_model.__init__)[0]
|
||||||
|
if 'self' in valid_keys:
|
||||||
|
valid_keys.remove('self')
|
||||||
|
return self.as_dict_from_keys(valid_keys)
|
||||||
|
|
||||||
|
def as_dict_from_keys(self, keys):
|
||||||
|
return dict((k, getattr(self, k))
|
||||||
|
for k in keys
|
||||||
|
if hasattr(self, k) and
|
||||||
|
getattr(self, k) != wsme.Unset)
|
||||||
|
|
||||||
|
|
||||||
|
class Query(_Base):
|
||||||
|
"""Query filter.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# The data types supported by the query.
|
||||||
|
_supported_types = ['integer', 'float', 'string', 'boolean']
|
||||||
|
|
||||||
|
# Functions to convert the data field to the correct type.
|
||||||
|
_type_converters = {'integer': int,
|
||||||
|
'float': float,
|
||||||
|
'boolean': functools.partial(
|
||||||
|
strutils.bool_from_string, strict=True),
|
||||||
|
'string': six.text_type,
|
||||||
|
'datetime': timeutils.parse_isotime}
|
||||||
|
|
||||||
|
_op = None # provide a default
|
||||||
|
|
||||||
|
def get_op(self):
|
||||||
|
return self._op or 'eq'
|
||||||
|
|
||||||
|
def set_op(self, value):
|
||||||
|
self._op = value
|
||||||
|
|
||||||
|
field = wtypes.text
|
||||||
|
"The name of the field to test"
|
||||||
|
|
||||||
|
# op = wsme.wsattr(operation_kind, default='eq')
|
||||||
|
# this ^ doesn't seem to work.
|
||||||
|
op = wsme.wsproperty(operation_kind, get_op, set_op)
|
||||||
|
"The comparison operator. Defaults to 'eq'."
|
||||||
|
|
||||||
|
value = wtypes.text
|
||||||
|
"The value to compare against the stored data"
|
||||||
|
|
||||||
|
type = wtypes.text
|
||||||
|
"The data type of value to compare against the stored data"
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
# for logging calls
|
||||||
|
return '<Query %r %s %r %s>' % (self.field,
|
||||||
|
self.op,
|
||||||
|
self.value,
|
||||||
|
self.type)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def sample(cls):
|
||||||
|
return cls(field='resource_id',
|
||||||
|
op='eq',
|
||||||
|
value='bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
|
||||||
|
type='string'
|
||||||
|
)
|
||||||
|
|
||||||
|
def as_dict(self):
|
||||||
|
return self.as_dict_from_keys(['field', 'op', 'type', 'value'])
|
||||||
|
|
||||||
|
def _get_value_as_type(self, forced_type=None):
|
||||||
|
"""Convert metadata value to the specified data type.
|
||||||
|
|
||||||
|
This method is called during metadata query to help convert the
|
||||||
|
querying metadata to the data type specified by user. If there is no
|
||||||
|
data type given, the metadata will be parsed by ast.literal_eval to
|
||||||
|
try to do a smart converting.
|
||||||
|
|
||||||
|
NOTE (flwang) Using "_" as prefix to avoid an InvocationError raised
|
||||||
|
from wsmeext/sphinxext.py. It's OK to call it outside the Query class.
|
||||||
|
Because the "public" side of that class is actually the outside of the
|
||||||
|
API, and the "private" side is the API implementation. The method is
|
||||||
|
only used in the API implementation, so it's OK.
|
||||||
|
|
||||||
|
:returns: metadata value converted with the specified data type.
|
||||||
|
"""
|
||||||
|
type = forced_type or self.type
|
||||||
|
try:
|
||||||
|
converted_value = self.value
|
||||||
|
if not type:
|
||||||
|
try:
|
||||||
|
converted_value = ast.literal_eval(self.value)
|
||||||
|
except (ValueError, SyntaxError):
|
||||||
|
msg = _('Failed to convert the metadata value %s'
|
||||||
|
' automatically') % (self.value)
|
||||||
|
LOG.debug(msg)
|
||||||
|
else:
|
||||||
|
if type not in self._supported_types:
|
||||||
|
# Types must be explicitly declared so the
|
||||||
|
# correct type converter may be used. Subclasses
|
||||||
|
# of Query may define _supported_types and
|
||||||
|
# _type_converters to define their own types.
|
||||||
|
raise TypeError()
|
||||||
|
converted_value = self._type_converters[type](self.value)
|
||||||
|
except ValueError:
|
||||||
|
msg = _('Failed to convert the value %(value)s'
|
||||||
|
' to the expected data type %(type)s.') % \
|
||||||
|
{'value': self.value, 'type': type}
|
||||||
|
raise wsme.exc.ClientSideError(msg)
|
||||||
|
except TypeError:
|
||||||
|
msg = _('The data type %(type)s is not supported. The supported'
|
||||||
|
' data type list is: %(supported)s') % \
|
||||||
|
{'type': type, 'supported': self._supported_types}
|
||||||
|
raise wsme.exc.ClientSideError(msg)
|
||||||
|
except Exception:
|
||||||
|
msg = _('Unexpected exception converting %(value)s to'
|
||||||
|
' the expected data type %(type)s.') % \
|
||||||
|
{'value': self.value, 'type': type}
|
||||||
|
raise wsme.exc.ClientSideError(msg)
|
||||||
|
return converted_value
|
586
inventory/inventory/inventory/api/controllers/v1/sensor.py
Normal file
586
inventory/inventory/inventory/api/controllers/v1/sensor.py
Normal file
@ -0,0 +1,586 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2013 UnitedStack Inc.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
# Copyright (c) 2013-2018 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
import copy
|
||||||
|
import jsonpatch
|
||||||
|
import pecan
|
||||||
|
from pecan import rest
|
||||||
|
import six
|
||||||
|
import wsme
|
||||||
|
from wsme import types as wtypes
|
||||||
|
import wsmeext.pecan as wsme_pecan
|
||||||
|
|
||||||
|
from inventory.api.controllers.v1 import base
|
||||||
|
from inventory.api.controllers.v1 import collection
|
||||||
|
from inventory.api.controllers.v1 import link
|
||||||
|
from inventory.api.controllers.v1 import types
|
||||||
|
from inventory.api.controllers.v1 import utils
|
||||||
|
from inventory.common import constants
|
||||||
|
from inventory.common import exception
|
||||||
|
from inventory.common import hwmon_api
|
||||||
|
from inventory.common.i18n import _
|
||||||
|
from inventory.common import k_host
|
||||||
|
from inventory.common import utils as cutils
|
||||||
|
from inventory import objects
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class SensorPatchType(types.JsonPatchType):
|
||||||
|
@staticmethod
|
||||||
|
def mandatory_attrs():
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
class Sensor(base.APIBase):
|
||||||
|
"""API representation of an Sensor
|
||||||
|
|
||||||
|
This class enforces type checking and value constraints, and converts
|
||||||
|
between the internal object model and the API representation of an
|
||||||
|
sensor.
|
||||||
|
"""
|
||||||
|
|
||||||
|
uuid = types.uuid
|
||||||
|
"Unique UUID for this sensor"
|
||||||
|
|
||||||
|
sensorname = wtypes.text
|
||||||
|
"Represent the name of the sensor. Unique with path per host"
|
||||||
|
|
||||||
|
path = wtypes.text
|
||||||
|
"Represent the path of the sensor. Unique with sensorname per host"
|
||||||
|
|
||||||
|
sensortype = wtypes.text
|
||||||
|
"Represent the type of sensor. e.g. Temperature, WatchDog"
|
||||||
|
|
||||||
|
datatype = wtypes.text
|
||||||
|
"Represent the entity monitored. e.g. discrete, analog"
|
||||||
|
|
||||||
|
status = wtypes.text
|
||||||
|
"Represent current sensor status: ok, minor, major, critical, disabled"
|
||||||
|
|
||||||
|
state = wtypes.text
|
||||||
|
"Represent the current state of the sensor"
|
||||||
|
|
||||||
|
state_requested = wtypes.text
|
||||||
|
"Represent the requested state of the sensor"
|
||||||
|
|
||||||
|
audit_interval = int
|
||||||
|
"Represent the audit_interval of the sensor."
|
||||||
|
|
||||||
|
algorithm = wtypes.text
|
||||||
|
"Represent the algorithm of the sensor."
|
||||||
|
|
||||||
|
actions_minor = wtypes.text
|
||||||
|
"Represent the minor configured actions of the sensor. CSV."
|
||||||
|
|
||||||
|
actions_major = wtypes.text
|
||||||
|
"Represent the major configured actions of the sensor. CSV."
|
||||||
|
|
||||||
|
actions_critical = wtypes.text
|
||||||
|
"Represent the critical configured actions of the sensor. CSV."
|
||||||
|
|
||||||
|
suppress = wtypes.text
|
||||||
|
"Represent supress sensor if True, otherwise not suppress sensor"
|
||||||
|
|
||||||
|
value = wtypes.text
|
||||||
|
"Represent current value of the discrete sensor"
|
||||||
|
|
||||||
|
unit_base = wtypes.text
|
||||||
|
"Represent the unit base of the analog sensor e.g. revolutions"
|
||||||
|
|
||||||
|
unit_modifier = wtypes.text
|
||||||
|
"Represent the unit modifier of the analog sensor e.g. 10**2"
|
||||||
|
|
||||||
|
unit_rate = wtypes.text
|
||||||
|
"Represent the unit rate of the sensor e.g. /minute"
|
||||||
|
|
||||||
|
t_minor_lower = wtypes.text
|
||||||
|
"Represent the minor lower threshold of the analog sensor"
|
||||||
|
|
||||||
|
t_minor_upper = wtypes.text
|
||||||
|
"Represent the minor upper threshold of the analog sensor"
|
||||||
|
|
||||||
|
t_major_lower = wtypes.text
|
||||||
|
"Represent the major lower threshold of the analog sensor"
|
||||||
|
|
||||||
|
t_major_upper = wtypes.text
|
||||||
|
"Represent the major upper threshold of the analog sensor"
|
||||||
|
|
||||||
|
t_critical_lower = wtypes.text
|
||||||
|
"Represent the critical lower threshold of the analog sensor"
|
||||||
|
|
||||||
|
t_critical_upper = wtypes.text
|
||||||
|
"Represent the critical upper threshold of the analog sensor"
|
||||||
|
|
||||||
|
capabilities = {wtypes.text: utils.ValidTypes(wtypes.text,
|
||||||
|
six.integer_types)}
|
||||||
|
"Represent meta data of the sensor"
|
||||||
|
|
||||||
|
host_id = int
|
||||||
|
"Represent the host_id the sensor belongs to"
|
||||||
|
|
||||||
|
sensorgroup_id = int
|
||||||
|
"Represent the sensorgroup_id the sensor belongs to"
|
||||||
|
|
||||||
|
host_uuid = types.uuid
|
||||||
|
"Represent the UUID of the host the sensor belongs to"
|
||||||
|
|
||||||
|
sensorgroup_uuid = types.uuid
|
||||||
|
"Represent the UUID of the sensorgroup the sensor belongs to"
|
||||||
|
|
||||||
|
links = [link.Link]
|
||||||
|
"Represent a list containing a self link and associated sensor links"
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self.fields = objects.Sensor.fields.keys()
|
||||||
|
for k in self.fields:
|
||||||
|
setattr(self, k, kwargs.get(k))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def convert_with_links(cls, rpc_sensor, expand=True):
|
||||||
|
|
||||||
|
sensor = Sensor(**rpc_sensor.as_dict())
|
||||||
|
|
||||||
|
sensor_fields_common = ['uuid', 'host_id', 'sensorgroup_id',
|
||||||
|
'sensortype', 'datatype',
|
||||||
|
'sensorname', 'path',
|
||||||
|
|
||||||
|
'status',
|
||||||
|
'state', 'state_requested',
|
||||||
|
'sensor_action_requested',
|
||||||
|
'actions_minor',
|
||||||
|
'actions_major',
|
||||||
|
'actions_critical',
|
||||||
|
|
||||||
|
'suppress',
|
||||||
|
'audit_interval',
|
||||||
|
'algorithm',
|
||||||
|
'capabilities',
|
||||||
|
'host_uuid', 'sensorgroup_uuid',
|
||||||
|
'created_at', 'updated_at', ]
|
||||||
|
|
||||||
|
sensor_fields_analog = ['unit_base',
|
||||||
|
'unit_modifier',
|
||||||
|
'unit_rate',
|
||||||
|
|
||||||
|
't_minor_lower',
|
||||||
|
't_minor_upper',
|
||||||
|
't_major_lower',
|
||||||
|
't_major_upper',
|
||||||
|
't_critical_lower',
|
||||||
|
't_critical_upper', ]
|
||||||
|
|
||||||
|
if rpc_sensor.datatype == 'discrete':
|
||||||
|
sensor_fields = sensor_fields_common
|
||||||
|
elif rpc_sensor.datatype == 'analog':
|
||||||
|
sensor_fields = sensor_fields_common + sensor_fields_analog
|
||||||
|
else:
|
||||||
|
LOG.error(_("Invalid datatype={}").format(rpc_sensor.datatype))
|
||||||
|
|
||||||
|
if not expand:
|
||||||
|
sensor.unset_fields_except(sensor_fields)
|
||||||
|
|
||||||
|
# never expose the id attribute
|
||||||
|
sensor.host_id = wtypes.Unset
|
||||||
|
sensor.sensorgroup_id = wtypes.Unset
|
||||||
|
|
||||||
|
sensor.links = [link.Link.make_link('self', pecan.request.host_url,
|
||||||
|
'sensors', sensor.uuid),
|
||||||
|
link.Link.make_link('bookmark',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'sensors', sensor.uuid,
|
||||||
|
bookmark=True)
|
||||||
|
]
|
||||||
|
return sensor
|
||||||
|
|
||||||
|
|
||||||
|
class SensorCollection(collection.Collection):
|
||||||
|
"""API representation of a collection of Sensor objects."""
|
||||||
|
|
||||||
|
sensors = [Sensor]
|
||||||
|
"A list containing Sensor objects"
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self._type = 'sensors'
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def convert_with_links(cls, rpc_sensors, limit, url=None,
|
||||||
|
expand=False, **kwargs):
|
||||||
|
collection = SensorCollection()
|
||||||
|
collection.sensors = [Sensor.convert_with_links(p, expand)
|
||||||
|
for p in rpc_sensors]
|
||||||
|
collection.next = collection.get_next(limit, url=url, **kwargs)
|
||||||
|
return collection
|
||||||
|
|
||||||
|
|
||||||
|
LOCK_NAME = 'SensorController'
|
||||||
|
|
||||||
|
|
||||||
|
class SensorController(rest.RestController):
|
||||||
|
"""REST controller for Sensors."""
|
||||||
|
|
||||||
|
_custom_actions = {
|
||||||
|
'detail': ['GET'],
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(self, from_hosts=False, from_sensorgroup=False):
|
||||||
|
self._from_hosts = from_hosts
|
||||||
|
self._from_sensorgroup = from_sensorgroup
|
||||||
|
self._api_token = None
|
||||||
|
self._hwmon_address = k_host.LOCALHOST_HOSTNAME
|
||||||
|
self._hwmon_port = constants.HWMON_PORT
|
||||||
|
|
||||||
|
def _get_sensors_collection(self, uuid, sensorgroup_uuid,
|
||||||
|
marker, limit, sort_key, sort_dir,
|
||||||
|
expand=False, resource_url=None):
|
||||||
|
|
||||||
|
if self._from_hosts and not uuid:
|
||||||
|
raise exception.InvalidParameterValue(_(
|
||||||
|
"Host id not specified."))
|
||||||
|
|
||||||
|
if self._from_sensorgroup and not uuid:
|
||||||
|
raise exception.InvalidParameterValue(_(
|
||||||
|
"SensorGroup id not specified."))
|
||||||
|
|
||||||
|
limit = utils.validate_limit(limit)
|
||||||
|
sort_dir = utils.validate_sort_dir(sort_dir)
|
||||||
|
|
||||||
|
marker_obj = None
|
||||||
|
if marker:
|
||||||
|
marker_obj = objects.Sensor.get_by_uuid(
|
||||||
|
pecan.request.context,
|
||||||
|
marker)
|
||||||
|
|
||||||
|
if self._from_hosts:
|
||||||
|
sensors = pecan.request.dbapi.sensor_get_by_host(
|
||||||
|
uuid, limit,
|
||||||
|
marker_obj,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
LOG.debug("dbapi.sensor_get_by_host=%s" % sensors)
|
||||||
|
elif self._from_sensorgroup:
|
||||||
|
sensors = pecan.request.dbapi.sensor_get_by_sensorgroup(
|
||||||
|
uuid,
|
||||||
|
limit,
|
||||||
|
marker_obj,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
LOG.debug("dbapi.sensor_get_by_sensorgroup=%s" % sensors)
|
||||||
|
else:
|
||||||
|
if uuid and not sensorgroup_uuid:
|
||||||
|
sensors = pecan.request.dbapi.sensor_get_by_host(
|
||||||
|
uuid, limit,
|
||||||
|
marker_obj,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
LOG.debug("dbapi.sensor_get_by_host=%s" % sensors)
|
||||||
|
elif uuid and sensorgroup_uuid: # Need ihost_uuid ?
|
||||||
|
sensors = pecan.request.dbapi.sensor_get_by_host_sensorgroup(
|
||||||
|
uuid,
|
||||||
|
sensorgroup_uuid,
|
||||||
|
limit,
|
||||||
|
marker_obj,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
LOG.debug("dbapi.sensor_get_by_host_sensorgroup=%s" %
|
||||||
|
sensors)
|
||||||
|
|
||||||
|
elif sensorgroup_uuid: # Need ihost_uuid ?
|
||||||
|
sensors = pecan.request.dbapi.sensor_get_by_host_sensorgroup(
|
||||||
|
uuid, # None
|
||||||
|
sensorgroup_uuid,
|
||||||
|
limit,
|
||||||
|
marker_obj,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
|
||||||
|
else:
|
||||||
|
sensors = pecan.request.dbapi.sensor_get_list(
|
||||||
|
limit, marker_obj,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
|
||||||
|
return SensorCollection.convert_with_links(sensors, limit,
|
||||||
|
url=resource_url,
|
||||||
|
expand=expand,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(SensorCollection, types.uuid, types.uuid,
|
||||||
|
types.uuid, int, wtypes.text, wtypes.text)
|
||||||
|
def get_all(self, uuid=None, sensorgroup_uuid=None,
|
||||||
|
marker=None, limit=None, sort_key='id', sort_dir='asc'):
|
||||||
|
"""Retrieve a list of sensors."""
|
||||||
|
|
||||||
|
return self._get_sensors_collection(uuid, sensorgroup_uuid,
|
||||||
|
marker, limit,
|
||||||
|
sort_key, sort_dir)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(SensorCollection, types.uuid, types.uuid, int,
|
||||||
|
wtypes.text, wtypes.text)
|
||||||
|
def detail(self, uuid=None, marker=None, limit=None,
|
||||||
|
sort_key='id', sort_dir='asc'):
|
||||||
|
"""Retrieve a list of sensors with detail."""
|
||||||
|
|
||||||
|
# NOTE(lucasagomes): /detail should only work against collections
|
||||||
|
parent = pecan.request.path.split('/')[:-1][-1]
|
||||||
|
if parent != "sensors":
|
||||||
|
raise exception.HTTPNotFound
|
||||||
|
|
||||||
|
expand = True
|
||||||
|
resource_url = '/'.join(['sensors', 'detail'])
|
||||||
|
return self._get_sensors_collection(uuid, marker, limit, sort_key,
|
||||||
|
sort_dir, expand, resource_url)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(Sensor, types.uuid)
|
||||||
|
def get_one(self, sensor_uuid):
|
||||||
|
"""Retrieve information about the given sensor."""
|
||||||
|
if self._from_hosts:
|
||||||
|
raise exception.OperationNotPermitted
|
||||||
|
|
||||||
|
rpc_sensor = objects.Sensor.get_by_uuid(
|
||||||
|
pecan.request.context, sensor_uuid)
|
||||||
|
|
||||||
|
if rpc_sensor.datatype == 'discrete':
|
||||||
|
rpc_sensor = objects.SensorDiscrete.get_by_uuid(
|
||||||
|
pecan.request.context, sensor_uuid)
|
||||||
|
elif rpc_sensor.datatype == 'analog':
|
||||||
|
rpc_sensor = objects.SensorAnalog.get_by_uuid(
|
||||||
|
pecan.request.context, sensor_uuid)
|
||||||
|
else:
|
||||||
|
LOG.error(_("Invalid datatype={}").format(rpc_sensor.datatype))
|
||||||
|
|
||||||
|
return Sensor.convert_with_links(rpc_sensor)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _new_sensor_semantic_checks(sensor):
|
||||||
|
datatype = sensor.as_dict().get('datatype') or ""
|
||||||
|
sensortype = sensor.as_dict().get('sensortype') or ""
|
||||||
|
if not (datatype and sensortype):
|
||||||
|
raise wsme.exc.ClientSideError(_("sensor-add Cannot "
|
||||||
|
"add a sensor "
|
||||||
|
"without a valid datatype "
|
||||||
|
"and sensortype."))
|
||||||
|
|
||||||
|
if datatype not in constants.SENSOR_DATATYPE_VALID_LIST:
|
||||||
|
raise wsme.exc.ClientSideError(
|
||||||
|
_("sensor datatype must be one of %s.") %
|
||||||
|
constants.SENSOR_DATATYPE_VALID_LIST)
|
||||||
|
|
||||||
|
@cutils.synchronized(LOCK_NAME)
|
||||||
|
@wsme_pecan.wsexpose(Sensor, body=Sensor)
|
||||||
|
def post(self, sensor):
|
||||||
|
"""Create a new sensor."""
|
||||||
|
if self._from_hosts:
|
||||||
|
raise exception.OperationNotPermitted
|
||||||
|
|
||||||
|
self._new_sensor_semantic_checks(sensor)
|
||||||
|
try:
|
||||||
|
ihost = pecan.request.dbapi.host_get(sensor.host_uuid)
|
||||||
|
|
||||||
|
if hasattr(sensor, 'datatype'):
|
||||||
|
if sensor.datatype == 'discrete':
|
||||||
|
new_sensor = pecan.request.dbapi.sensor_discrete_create(
|
||||||
|
ihost.id, sensor.as_dict())
|
||||||
|
elif sensor.datatype == 'analog':
|
||||||
|
new_sensor = pecan.request.dbapi.sensor_analog_create(
|
||||||
|
ihost.id, sensor.as_dict())
|
||||||
|
else:
|
||||||
|
raise wsme.exc.ClientSideError(
|
||||||
|
_("Invalid datatype. {}").format(sensor.datatype))
|
||||||
|
else:
|
||||||
|
raise wsme.exc.ClientSideError(_("Unspecified datatype."))
|
||||||
|
|
||||||
|
except exception.InventoryException as e:
|
||||||
|
LOG.exception(e)
|
||||||
|
raise wsme.exc.ClientSideError(_("Invalid data"))
|
||||||
|
return sensor.convert_with_links(new_sensor)
|
||||||
|
|
||||||
|
@cutils.synchronized(LOCK_NAME)
|
||||||
|
@wsme.validate(types.uuid, [SensorPatchType])
|
||||||
|
@wsme_pecan.wsexpose(Sensor, types.uuid,
|
||||||
|
body=[SensorPatchType])
|
||||||
|
def patch(self, sensor_uuid, patch):
|
||||||
|
"""Update an existing sensor."""
|
||||||
|
if self._from_hosts:
|
||||||
|
raise exception.OperationNotPermitted
|
||||||
|
|
||||||
|
rpc_sensor = objects.Sensor.get_by_uuid(pecan.request.context,
|
||||||
|
sensor_uuid)
|
||||||
|
if rpc_sensor.datatype == 'discrete':
|
||||||
|
rpc_sensor = objects.SensorDiscrete.get_by_uuid(
|
||||||
|
pecan.request.context, sensor_uuid)
|
||||||
|
elif rpc_sensor.datatype == 'analog':
|
||||||
|
rpc_sensor = objects.SensorAnalog.get_by_uuid(
|
||||||
|
pecan.request.context, sensor_uuid)
|
||||||
|
else:
|
||||||
|
raise wsme.exc.ClientSideError(_("Invalid datatype={}").format(
|
||||||
|
rpc_sensor.datatype))
|
||||||
|
|
||||||
|
rpc_sensor_orig = copy.deepcopy(rpc_sensor)
|
||||||
|
|
||||||
|
# replace ihost_uuid and sensorgroup_uuid with corresponding
|
||||||
|
utils.validate_patch(patch)
|
||||||
|
patch_obj = jsonpatch.JsonPatch(patch)
|
||||||
|
my_host_uuid = None
|
||||||
|
for p in patch_obj:
|
||||||
|
if p['path'] == '/host_uuid':
|
||||||
|
p['path'] = '/host_id'
|
||||||
|
host = objects.Host.get_by_uuid(pecan.request.context,
|
||||||
|
p['value'])
|
||||||
|
p['value'] = host.id
|
||||||
|
my_host_uuid = host.uuid
|
||||||
|
|
||||||
|
if p['path'] == '/sensorgroup_uuid':
|
||||||
|
p['path'] = '/sensorgroup_id'
|
||||||
|
try:
|
||||||
|
sensorgroup = objects.sensorgroup.get_by_uuid(
|
||||||
|
pecan.request.context, p['value'])
|
||||||
|
p['value'] = sensorgroup.id
|
||||||
|
LOG.info("sensorgroup_uuid=%s id=%s" % (p['value'],
|
||||||
|
sensorgroup.id))
|
||||||
|
except exception.InventoryException:
|
||||||
|
p['value'] = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
sensor = Sensor(**jsonpatch.apply_patch(rpc_sensor.as_dict(),
|
||||||
|
patch_obj))
|
||||||
|
|
||||||
|
except utils.JSONPATCH_EXCEPTIONS as e:
|
||||||
|
raise exception.PatchError(patch=patch, reason=e)
|
||||||
|
|
||||||
|
# Update only the fields that have changed
|
||||||
|
if rpc_sensor.datatype == 'discrete':
|
||||||
|
fields = objects.SensorDiscrete.fields
|
||||||
|
else:
|
||||||
|
fields = objects.SensorAnalog.fields
|
||||||
|
|
||||||
|
for field in fields:
|
||||||
|
if rpc_sensor[field] != getattr(sensor, field):
|
||||||
|
rpc_sensor[field] = getattr(sensor, field)
|
||||||
|
|
||||||
|
delta = rpc_sensor.obj_what_changed()
|
||||||
|
sensor_suppress_attrs = ['suppress']
|
||||||
|
force_action = False
|
||||||
|
if any(x in delta for x in sensor_suppress_attrs):
|
||||||
|
valid_suppress = ['True', 'False', 'true', 'false', 'force_action']
|
||||||
|
if rpc_sensor.suppress.lower() not in valid_suppress:
|
||||||
|
raise wsme.exc.ClientSideError(_("Invalid suppress value, "
|
||||||
|
"select 'True' or 'False'"))
|
||||||
|
elif rpc_sensor.suppress.lower() == 'force_action':
|
||||||
|
LOG.info("suppress=%s" % rpc_sensor.suppress.lower())
|
||||||
|
rpc_sensor.suppress = rpc_sensor_orig.suppress
|
||||||
|
force_action = True
|
||||||
|
|
||||||
|
self._semantic_modifiable_fields(patch_obj, force_action)
|
||||||
|
|
||||||
|
if not pecan.request.user_agent.startswith('hwmon'):
|
||||||
|
hwmon_sensor = cutils.removekeys_nonhwmon(
|
||||||
|
rpc_sensor.as_dict())
|
||||||
|
|
||||||
|
if not my_host_uuid:
|
||||||
|
host = objects.Host.get_by_uuid(pecan.request.context,
|
||||||
|
rpc_sensor.host_id)
|
||||||
|
my_host_uuid = host.uuid
|
||||||
|
LOG.warn("Missing host_uuid updated=%s" % my_host_uuid)
|
||||||
|
|
||||||
|
hwmon_sensor.update({'host_uuid': my_host_uuid})
|
||||||
|
|
||||||
|
hwmon_response = hwmon_api.sensor_modify(
|
||||||
|
self._api_token, self._hwmon_address, self._hwmon_port,
|
||||||
|
hwmon_sensor,
|
||||||
|
constants.HWMON_DEFAULT_TIMEOUT_IN_SECS)
|
||||||
|
|
||||||
|
if not hwmon_response:
|
||||||
|
hwmon_response = {'status': 'fail',
|
||||||
|
'reason': 'no response',
|
||||||
|
'action': 'retry'}
|
||||||
|
|
||||||
|
if hwmon_response['status'] != 'pass':
|
||||||
|
msg = _("HWMON has returned with a status of {}, reason: {}, "
|
||||||
|
"recommended action: {}").format(
|
||||||
|
hwmon_response.get('status'),
|
||||||
|
hwmon_response.get('reason'),
|
||||||
|
hwmon_response.get('action'))
|
||||||
|
|
||||||
|
if force_action:
|
||||||
|
LOG.error(msg)
|
||||||
|
else:
|
||||||
|
raise wsme.exc.ClientSideError(msg)
|
||||||
|
|
||||||
|
rpc_sensor.save()
|
||||||
|
|
||||||
|
return Sensor.convert_with_links(rpc_sensor)
|
||||||
|
|
||||||
|
@cutils.synchronized(LOCK_NAME)
|
||||||
|
@wsme_pecan.wsexpose(None, types.uuid, status_code=204)
|
||||||
|
def delete(self, sensor_uuid):
|
||||||
|
"""Delete a sensor."""
|
||||||
|
if self._from_hosts:
|
||||||
|
raise exception.OperationNotPermitted
|
||||||
|
|
||||||
|
pecan.request.dbapi.sensor_destroy(sensor_uuid)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _semantic_modifiable_fields(patch_obj, force_action=False):
|
||||||
|
# Prevent auto populated fields from being updated
|
||||||
|
state_rel_path = ['/uuid', '/id', '/host_id', '/datatype',
|
||||||
|
'/sensortype']
|
||||||
|
if any(p['path'] in state_rel_path for p in patch_obj):
|
||||||
|
raise wsme.exc.ClientSideError(_("The following fields can not be "
|
||||||
|
"modified: %s ") % state_rel_path)
|
||||||
|
|
||||||
|
state_rel_path = ['/actions_critical',
|
||||||
|
'/actions_major',
|
||||||
|
'/actions_minor']
|
||||||
|
if any(p['path'] in state_rel_path for p in patch_obj):
|
||||||
|
raise wsme.exc.ClientSideError(
|
||||||
|
_("The following fields can only be modified at the "
|
||||||
|
"sensorgroup level: %s") % state_rel_path)
|
||||||
|
|
||||||
|
if not (pecan.request.user_agent.startswith('hwmon') or force_action):
|
||||||
|
state_rel_path = ['/sensorname',
|
||||||
|
'/path',
|
||||||
|
'/status',
|
||||||
|
'/state',
|
||||||
|
'/possible_states',
|
||||||
|
'/algorithm',
|
||||||
|
'/actions_critical_choices',
|
||||||
|
'/actions_major_choices',
|
||||||
|
'/actions_minor_choices',
|
||||||
|
'/unit_base',
|
||||||
|
'/unit_modifier',
|
||||||
|
'/unit_rate',
|
||||||
|
'/t_minor_lower',
|
||||||
|
'/t_minor_upper',
|
||||||
|
'/t_major_lower',
|
||||||
|
'/t_major_upper',
|
||||||
|
'/t_critical_lower',
|
||||||
|
'/t_critical_upper',
|
||||||
|
]
|
||||||
|
|
||||||
|
if any(p['path'] in state_rel_path for p in patch_obj):
|
||||||
|
raise wsme.exc.ClientSideError(
|
||||||
|
_("The following fields are not remote-modifiable: %s") %
|
||||||
|
state_rel_path)
|
751
inventory/inventory/inventory/api/controllers/v1/sensorgroup.py
Normal file
751
inventory/inventory/inventory/api/controllers/v1/sensorgroup.py
Normal file
@ -0,0 +1,751 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2013 UnitedStack Inc.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
# Copyright (c) 2013-2018 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
import copy
|
||||||
|
import jsonpatch
|
||||||
|
import pecan
|
||||||
|
from pecan import rest
|
||||||
|
import six
|
||||||
|
import uuid
|
||||||
|
import wsme
|
||||||
|
from wsme import types as wtypes
|
||||||
|
import wsmeext.pecan as wsme_pecan
|
||||||
|
|
||||||
|
from inventory.api.controllers.v1 import base
|
||||||
|
from inventory.api.controllers.v1 import collection
|
||||||
|
from inventory.api.controllers.v1 import link
|
||||||
|
from inventory.api.controllers.v1 import sensor as sensor_api
|
||||||
|
from inventory.api.controllers.v1 import types
|
||||||
|
from inventory.api.controllers.v1 import utils
|
||||||
|
from inventory.common import constants
|
||||||
|
from inventory.common import exception
|
||||||
|
from inventory.common import hwmon_api
|
||||||
|
from inventory.common.i18n import _
|
||||||
|
from inventory.common import k_host
|
||||||
|
from inventory.common import utils as cutils
|
||||||
|
from inventory import objects
|
||||||
|
from oslo_log import log
|
||||||
|
from oslo_utils import uuidutils
|
||||||
|
from six import text_type as unicode
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class SensorGroupPatchType(types.JsonPatchType):
|
||||||
|
@staticmethod
|
||||||
|
def mandatory_attrs():
|
||||||
|
return ['/host_uuid', 'uuid']
|
||||||
|
|
||||||
|
|
||||||
|
class SensorGroup(base.APIBase):
|
||||||
|
"""API representation of an Sensor Group
|
||||||
|
|
||||||
|
This class enforces type checking and value constraints, and converts
|
||||||
|
between the internal object model and the API representation of an
|
||||||
|
sensorgroup.
|
||||||
|
"""
|
||||||
|
|
||||||
|
uuid = types.uuid
|
||||||
|
"Unique UUID for this sensorgroup"
|
||||||
|
|
||||||
|
sensorgroupname = wtypes.text
|
||||||
|
"Represent the name of the sensorgroup. Unique with path per host"
|
||||||
|
|
||||||
|
path = wtypes.text
|
||||||
|
"Represent the path of the sensor. Unique with sensorname per host"
|
||||||
|
|
||||||
|
sensortype = wtypes.text
|
||||||
|
"Represent the sensortype . e.g. Temperature, WatchDog"
|
||||||
|
|
||||||
|
datatype = wtypes.text
|
||||||
|
"Represent the datatype e.g. discrete or analog,"
|
||||||
|
|
||||||
|
state = wtypes.text
|
||||||
|
"Represent the state of the sensorgroup"
|
||||||
|
|
||||||
|
possible_states = wtypes.text
|
||||||
|
"Represent the possible states of the sensorgroup"
|
||||||
|
|
||||||
|
algorithm = wtypes.text
|
||||||
|
"Represent the algorithm of the sensorgroup."
|
||||||
|
|
||||||
|
audit_interval_group = int
|
||||||
|
"Represent the audit interval of the sensorgroup."
|
||||||
|
|
||||||
|
actions_critical_choices = wtypes.text
|
||||||
|
"Represent the configurable critical severity actions of the sensorgroup."
|
||||||
|
|
||||||
|
actions_major_choices = wtypes.text
|
||||||
|
"Represent the configurable major severity actions of the sensorgroup."
|
||||||
|
|
||||||
|
actions_minor_choices = wtypes.text
|
||||||
|
"Represent the configurable minor severity actions of the sensorgroup."
|
||||||
|
|
||||||
|
actions_minor_group = wtypes.text
|
||||||
|
"Represent the minor configured actions of the sensorgroup. CSV."
|
||||||
|
|
||||||
|
actions_major_group = wtypes.text
|
||||||
|
"Represent the major configured actions of the sensorgroup. CSV."
|
||||||
|
|
||||||
|
actions_critical_group = wtypes.text
|
||||||
|
"Represent the critical configured actions of the sensorgroup. CSV."
|
||||||
|
|
||||||
|
unit_base_group = wtypes.text
|
||||||
|
"Represent the unit base of the analog sensorgroup e.g. revolutions"
|
||||||
|
|
||||||
|
unit_modifier_group = wtypes.text
|
||||||
|
"Represent the unit modifier of the analog sensorgroup e.g. 10**2"
|
||||||
|
|
||||||
|
unit_rate_group = wtypes.text
|
||||||
|
"Represent the unit rate of the sensorgroup e.g. /minute"
|
||||||
|
|
||||||
|
t_minor_lower_group = wtypes.text
|
||||||
|
"Represent the minor lower threshold of the analog sensorgroup"
|
||||||
|
|
||||||
|
t_minor_upper_group = wtypes.text
|
||||||
|
"Represent the minor upper threshold of the analog sensorgroup"
|
||||||
|
|
||||||
|
t_major_lower_group = wtypes.text
|
||||||
|
"Represent the major lower threshold of the analog sensorgroup"
|
||||||
|
|
||||||
|
t_major_upper_group = wtypes.text
|
||||||
|
"Represent the major upper threshold of the analog sensorgroup"
|
||||||
|
|
||||||
|
t_critical_lower_group = wtypes.text
|
||||||
|
"Represent the critical lower threshold of the analog sensorgroup"
|
||||||
|
|
||||||
|
t_critical_upper_group = wtypes.text
|
||||||
|
"Represent the critical upper threshold of the analog sensorgroup"
|
||||||
|
|
||||||
|
capabilities = {wtypes.text: utils.ValidTypes(wtypes.text,
|
||||||
|
six.integer_types)}
|
||||||
|
"Represent meta data of the sensorgroup"
|
||||||
|
|
||||||
|
suppress = wtypes.text
|
||||||
|
"Represent supress sensor if True, otherwise not suppress sensor"
|
||||||
|
|
||||||
|
sensors = wtypes.text
|
||||||
|
"Represent the sensors of the sensorgroup"
|
||||||
|
|
||||||
|
host_id = int
|
||||||
|
"Represent the host_id the sensorgroup belongs to"
|
||||||
|
|
||||||
|
host_uuid = types.uuid
|
||||||
|
"Represent the UUID of the host the sensorgroup belongs to"
|
||||||
|
|
||||||
|
links = [link.Link]
|
||||||
|
"Represent a list containing a self link and associated sensorgroup links"
|
||||||
|
|
||||||
|
sensors = [link.Link]
|
||||||
|
"Links to the collection of sensors on this sensorgroup"
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self.fields = objects.SensorGroup.fields.keys()
|
||||||
|
for k in self.fields:
|
||||||
|
setattr(self, k, kwargs.get(k))
|
||||||
|
|
||||||
|
# 'sensors' is not part of objects.SenorGroups.fields (it's an
|
||||||
|
# API-only attribute)
|
||||||
|
self.fields.append('sensors')
|
||||||
|
setattr(self, 'sensors', kwargs.get('sensors', None))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def convert_with_links(cls, rsensorgroup, expand=True):
|
||||||
|
|
||||||
|
sensorgroup = SensorGroup(**rsensorgroup.as_dict())
|
||||||
|
|
||||||
|
sensorgroup_fields_common = ['uuid', 'host_id',
|
||||||
|
'host_uuid',
|
||||||
|
'sensortype', 'datatype',
|
||||||
|
'sensorgroupname',
|
||||||
|
'path',
|
||||||
|
|
||||||
|
'state',
|
||||||
|
'possible_states',
|
||||||
|
'audit_interval_group',
|
||||||
|
'algorithm',
|
||||||
|
'actions_critical_choices',
|
||||||
|
'actions_major_choices',
|
||||||
|
'actions_minor_choices',
|
||||||
|
'actions_minor_group',
|
||||||
|
'actions_major_group',
|
||||||
|
'actions_critical_group',
|
||||||
|
'sensors',
|
||||||
|
|
||||||
|
'suppress',
|
||||||
|
'capabilities',
|
||||||
|
'created_at', 'updated_at', ]
|
||||||
|
|
||||||
|
sensorgroup_fields_analog = ['unit_base_group',
|
||||||
|
'unit_modifier_group',
|
||||||
|
'unit_rate_group',
|
||||||
|
|
||||||
|
't_minor_lower_group',
|
||||||
|
't_minor_upper_group',
|
||||||
|
't_major_lower_group',
|
||||||
|
't_major_upper_group',
|
||||||
|
't_critical_lower_group',
|
||||||
|
't_critical_upper_group', ]
|
||||||
|
|
||||||
|
if rsensorgroup.datatype == 'discrete':
|
||||||
|
sensorgroup_fields = sensorgroup_fields_common
|
||||||
|
elif rsensorgroup.datatype == 'analog':
|
||||||
|
sensorgroup_fields = \
|
||||||
|
sensorgroup_fields_common + sensorgroup_fields_analog
|
||||||
|
else:
|
||||||
|
LOG.error(_("Invalid datatype={}").format(rsensorgroup.datatype))
|
||||||
|
|
||||||
|
if not expand:
|
||||||
|
sensorgroup.unset_fields_except(sensorgroup_fields)
|
||||||
|
|
||||||
|
if sensorgroup.host_id and not sensorgroup.host_uuid:
|
||||||
|
host = objects.Host.get_by_uuid(pecan.request.context,
|
||||||
|
sensorgroup.host_id)
|
||||||
|
sensorgroup.host_uuid = host.uuid
|
||||||
|
|
||||||
|
# never expose the id attribute
|
||||||
|
sensorgroup.host_id = wtypes.Unset
|
||||||
|
sensorgroup.id = wtypes.Unset
|
||||||
|
|
||||||
|
sensorgroup.links = [
|
||||||
|
link.Link.make_link('self', pecan.request.host_url,
|
||||||
|
'sensorgroups',
|
||||||
|
sensorgroup.uuid),
|
||||||
|
link.Link.make_link('bookmark',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'sensorgroups',
|
||||||
|
sensorgroup.uuid,
|
||||||
|
bookmark=True)]
|
||||||
|
|
||||||
|
sensorgroup.sensors = [
|
||||||
|
link.Link.make_link('self',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'sensorgroups',
|
||||||
|
sensorgroup.uuid + "/sensors"),
|
||||||
|
link.Link.make_link('bookmark',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'sensorgroups',
|
||||||
|
sensorgroup.uuid + "/sensors",
|
||||||
|
bookmark=True)]
|
||||||
|
|
||||||
|
return sensorgroup
|
||||||
|
|
||||||
|
|
||||||
|
class SensorGroupCollection(collection.Collection):
|
||||||
|
"""API representation of a collection of SensorGroup objects."""
|
||||||
|
|
||||||
|
sensorgroups = [SensorGroup]
|
||||||
|
"A list containing SensorGroup objects"
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self._type = 'sensorgroups'
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def convert_with_links(cls, rsensorgroups, limit, url=None,
|
||||||
|
expand=False, **kwargs):
|
||||||
|
collection = SensorGroupCollection()
|
||||||
|
collection.sensorgroups = [SensorGroup.convert_with_links(p, expand)
|
||||||
|
for p in rsensorgroups]
|
||||||
|
collection.next = collection.get_next(limit, url=url, **kwargs)
|
||||||
|
return collection
|
||||||
|
|
||||||
|
|
||||||
|
LOCK_NAME = 'SensorGroupController'
|
||||||
|
|
||||||
|
|
||||||
|
class SensorGroupController(rest.RestController):
|
||||||
|
"""REST controller for SensorGroups."""
|
||||||
|
|
||||||
|
sensors = sensor_api.SensorController(from_sensorgroup=True)
|
||||||
|
"Expose sensors as a sub-element of sensorgroups"
|
||||||
|
|
||||||
|
_custom_actions = {
|
||||||
|
'detail': ['GET'],
|
||||||
|
'relearn': ['POST'],
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(self, from_hosts=False):
|
||||||
|
self._from_hosts = from_hosts
|
||||||
|
self._api_token = None
|
||||||
|
self._hwmon_address = k_host.LOCALHOST_HOSTNAME
|
||||||
|
self._hwmon_port = constants.HWMON_PORT
|
||||||
|
|
||||||
|
def _get_sensorgroups_collection(self, uuid,
|
||||||
|
marker, limit, sort_key, sort_dir,
|
||||||
|
expand=False, resource_url=None):
|
||||||
|
|
||||||
|
if self._from_hosts and not uuid:
|
||||||
|
raise exception.InvalidParameterValue(_(
|
||||||
|
"Host id not specified."))
|
||||||
|
|
||||||
|
limit = utils.validate_limit(limit)
|
||||||
|
sort_dir = utils.validate_sort_dir(sort_dir)
|
||||||
|
|
||||||
|
marker_obj = None
|
||||||
|
if marker:
|
||||||
|
marker_obj = objects.SensorGroup.get_by_uuid(
|
||||||
|
pecan.request.context,
|
||||||
|
marker)
|
||||||
|
|
||||||
|
if self._from_hosts:
|
||||||
|
sensorgroups = pecan.request.dbapi.sensorgroup_get_by_host(
|
||||||
|
uuid, limit,
|
||||||
|
marker_obj,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
else:
|
||||||
|
if uuid:
|
||||||
|
sensorgroups = pecan.request.dbapi.sensorgroup_get_by_host(
|
||||||
|
uuid, limit,
|
||||||
|
marker_obj,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
else:
|
||||||
|
sensorgroups = pecan.request.dbapi.sensorgroup_get_list(
|
||||||
|
limit, marker_obj,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
|
||||||
|
return SensorGroupCollection.convert_with_links(sensorgroups, limit,
|
||||||
|
url=resource_url,
|
||||||
|
expand=expand,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(SensorGroupCollection, types.uuid,
|
||||||
|
types.uuid, int, wtypes.text, wtypes.text)
|
||||||
|
def get_all(self, uuid=None,
|
||||||
|
marker=None, limit=None, sort_key='id', sort_dir='asc'):
|
||||||
|
"""Retrieve a list of sensorgroups."""
|
||||||
|
|
||||||
|
return self._get_sensorgroups_collection(uuid,
|
||||||
|
marker, limit,
|
||||||
|
sort_key, sort_dir)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(SensorGroupCollection, types.uuid, types.uuid, int,
|
||||||
|
wtypes.text, wtypes.text)
|
||||||
|
def detail(self, uuid=None, marker=None, limit=None,
|
||||||
|
sort_key='id', sort_dir='asc'):
|
||||||
|
"""Retrieve a list of sensorgroups with detail."""
|
||||||
|
|
||||||
|
# NOTE(lucasagomes): /detail should only work against collections
|
||||||
|
parent = pecan.request.path.split('/')[:-1][-1]
|
||||||
|
if parent != "sensorgroups":
|
||||||
|
raise exception.HTTPNotFound
|
||||||
|
|
||||||
|
expand = True
|
||||||
|
resource_url = '/'.join(['sensorgroups', 'detail'])
|
||||||
|
return self._get_sensorgroups_collection(uuid, marker, limit,
|
||||||
|
sort_key, sort_dir,
|
||||||
|
expand, resource_url)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(SensorGroup, types.uuid)
|
||||||
|
def get_one(self, sensorgroup_uuid):
|
||||||
|
"""Retrieve information about the given sensorgroup."""
|
||||||
|
if self._from_hosts:
|
||||||
|
raise exception.OperationNotPermitted
|
||||||
|
|
||||||
|
rsensorgroup = objects.SensorGroup.get_by_uuid(
|
||||||
|
pecan.request.context, sensorgroup_uuid)
|
||||||
|
|
||||||
|
if rsensorgroup.datatype == 'discrete':
|
||||||
|
rsensorgroup = objects.SensorGroupDiscrete.get_by_uuid(
|
||||||
|
pecan.request.context, sensorgroup_uuid)
|
||||||
|
elif rsensorgroup.datatype == 'analog':
|
||||||
|
rsensorgroup = objects.SensorGroupAnalog.get_by_uuid(
|
||||||
|
pecan.request.context, sensorgroup_uuid)
|
||||||
|
else:
|
||||||
|
LOG.error(_("Invalid datatype={}").format(rsensorgroup.datatype))
|
||||||
|
|
||||||
|
return SensorGroup.convert_with_links(rsensorgroup)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _new_sensorgroup_semantic_checks(sensorgroup):
|
||||||
|
datatype = sensorgroup.as_dict().get('datatype') or ""
|
||||||
|
sensortype = sensorgroup.as_dict().get('sensortype') or ""
|
||||||
|
if not (datatype and sensortype):
|
||||||
|
raise wsme.exc.ClientSideError(_("sensorgroup-add: Cannot "
|
||||||
|
"add a sensorgroup "
|
||||||
|
"without a valid datatype "
|
||||||
|
"and sensortype."))
|
||||||
|
|
||||||
|
if datatype not in constants.SENSOR_DATATYPE_VALID_LIST:
|
||||||
|
raise wsme.exc.ClientSideError(
|
||||||
|
_("sensorgroup datatype must be one of %s.") %
|
||||||
|
constants.SENSOR_DATATYPE_VALID_LIST)
|
||||||
|
|
||||||
|
@cutils.synchronized(LOCK_NAME)
|
||||||
|
@wsme_pecan.wsexpose(SensorGroup, body=SensorGroup)
|
||||||
|
def post(self, sensorgroup):
|
||||||
|
"""Create a new sensorgroup."""
|
||||||
|
if self._from_hosts:
|
||||||
|
raise exception.OperationNotPermitted
|
||||||
|
|
||||||
|
self._new_sensorgroup_semantic_checks(sensorgroup)
|
||||||
|
try:
|
||||||
|
sensorgroup_dict = sensorgroup.as_dict()
|
||||||
|
new_sensorgroup = _create(sensorgroup_dict)
|
||||||
|
|
||||||
|
except exception.InventoryException as e:
|
||||||
|
LOG.exception(e)
|
||||||
|
raise wsme.exc.ClientSideError(_("Invalid data"))
|
||||||
|
return sensorgroup.convert_with_links(new_sensorgroup)
|
||||||
|
|
||||||
|
def _get_host_uuid(self, body):
|
||||||
|
host_uuid = body.get('host_uuid') or ""
|
||||||
|
try:
|
||||||
|
host = pecan.request.dbapi.host_get(host_uuid)
|
||||||
|
except exception.NotFound:
|
||||||
|
raise wsme.exc.ClientSideError("_get_host_uuid lookup failed")
|
||||||
|
return host.uuid
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose('json', body=unicode)
|
||||||
|
def relearn(self, body):
|
||||||
|
"""Handle Sensor Model Relearn Request."""
|
||||||
|
host_uuid = self._get_host_uuid(body)
|
||||||
|
# LOG.info("Host UUID: %s - BM_TYPE: %s" % (host_uuid, bm_type ))
|
||||||
|
|
||||||
|
# hwmon_sensorgroup = {'ihost_uuid': host_uuid}
|
||||||
|
request_body = {'host_uuid': host_uuid}
|
||||||
|
hwmon_response = hwmon_api.sensorgroup_relearn(
|
||||||
|
self._api_token, self._hwmon_address, self._hwmon_port,
|
||||||
|
request_body,
|
||||||
|
constants.HWMON_DEFAULT_TIMEOUT_IN_SECS)
|
||||||
|
|
||||||
|
if not hwmon_response:
|
||||||
|
hwmon_response = {'status': 'fail',
|
||||||
|
'reason': 'no response',
|
||||||
|
'action': 'retry'}
|
||||||
|
|
||||||
|
elif hwmon_response['status'] != 'pass':
|
||||||
|
msg = _("HWMON has returned with "
|
||||||
|
"a status of {}, reason: {}, "
|
||||||
|
"recommended action: {}").format(
|
||||||
|
hwmon_response.get('status'),
|
||||||
|
hwmon_response.get('reason'),
|
||||||
|
hwmon_response.get('action'))
|
||||||
|
|
||||||
|
raise wsme.exc.ClientSideError(msg)
|
||||||
|
|
||||||
|
@cutils.synchronized(LOCK_NAME)
|
||||||
|
@wsme.validate(types.uuid, [SensorGroupPatchType])
|
||||||
|
@wsme_pecan.wsexpose(SensorGroup, types.uuid,
|
||||||
|
body=[SensorGroupPatchType])
|
||||||
|
def patch(self, sensorgroup_uuid, patch):
|
||||||
|
"""Update an existing sensorgroup."""
|
||||||
|
if self._from_hosts:
|
||||||
|
raise exception.OperationNotPermitted
|
||||||
|
|
||||||
|
rsensorgroup = objects.SensorGroup.get_by_uuid(
|
||||||
|
pecan.request.context, sensorgroup_uuid)
|
||||||
|
|
||||||
|
if rsensorgroup.datatype == 'discrete':
|
||||||
|
rsensorgroup = objects.SensorGroupDiscrete.get_by_uuid(
|
||||||
|
pecan.request.context, sensorgroup_uuid)
|
||||||
|
elif rsensorgroup.datatype == 'analog':
|
||||||
|
rsensorgroup = objects.SensorGroupAnalog.get_by_uuid(
|
||||||
|
pecan.request.context, sensorgroup_uuid)
|
||||||
|
else:
|
||||||
|
raise wsme.exc.ClientSideError(_("Invalid datatype={}").format(
|
||||||
|
rsensorgroup.datatype))
|
||||||
|
|
||||||
|
rsensorgroup_orig = copy.deepcopy(rsensorgroup)
|
||||||
|
|
||||||
|
host = pecan.request.dbapi.host_get(
|
||||||
|
rsensorgroup['host_id']).as_dict()
|
||||||
|
|
||||||
|
utils.validate_patch(patch)
|
||||||
|
patch_obj = jsonpatch.JsonPatch(patch)
|
||||||
|
my_host_uuid = None
|
||||||
|
for p in patch_obj:
|
||||||
|
# For Profile replace host_uuid with corresponding id
|
||||||
|
if p['path'] == '/host_uuid':
|
||||||
|
p['path'] = '/host_id'
|
||||||
|
host = objects.Host.get_by_uuid(pecan.request.context,
|
||||||
|
p['value'])
|
||||||
|
p['value'] = host.id
|
||||||
|
my_host_uuid = host.uuid
|
||||||
|
|
||||||
|
# update sensors if set
|
||||||
|
sensors = None
|
||||||
|
for s in patch:
|
||||||
|
if '/sensors' in s['path']:
|
||||||
|
sensors = s['value']
|
||||||
|
patch.remove(s)
|
||||||
|
break
|
||||||
|
|
||||||
|
if sensors:
|
||||||
|
_update_sensors("modify", rsensorgroup, host, sensors)
|
||||||
|
|
||||||
|
try:
|
||||||
|
sensorgroup = SensorGroup(**jsonpatch.apply_patch(
|
||||||
|
rsensorgroup.as_dict(),
|
||||||
|
patch_obj))
|
||||||
|
|
||||||
|
except utils.JSONPATCH_EXCEPTIONS as e:
|
||||||
|
raise exception.PatchError(patch=patch, reason=e)
|
||||||
|
|
||||||
|
# Update only the fields that have changed
|
||||||
|
if rsensorgroup.datatype == 'discrete':
|
||||||
|
fields = objects.SensorGroupDiscrete.fields
|
||||||
|
else:
|
||||||
|
fields = objects.SensorGroupAnalog.fields
|
||||||
|
|
||||||
|
for field in fields:
|
||||||
|
if rsensorgroup[field] != getattr(sensorgroup, field):
|
||||||
|
rsensorgroup[field] = getattr(sensorgroup, field)
|
||||||
|
|
||||||
|
delta = rsensorgroup.obj_what_changed()
|
||||||
|
|
||||||
|
sensorgroup_suppress_attrs = ['suppress']
|
||||||
|
force_action = False
|
||||||
|
if any(x in delta for x in sensorgroup_suppress_attrs):
|
||||||
|
valid_suppress = ['True', 'False', 'true', 'false', 'force_action']
|
||||||
|
if rsensorgroup.suppress.lower() not in valid_suppress:
|
||||||
|
raise wsme.exc.ClientSideError(_("Invalid suppress value, "
|
||||||
|
"select 'True' or 'False'"))
|
||||||
|
elif rsensorgroup.suppress.lower() == 'force_action':
|
||||||
|
LOG.info("suppress=%s" % rsensorgroup.suppress.lower())
|
||||||
|
rsensorgroup.suppress = rsensorgroup_orig.suppress
|
||||||
|
force_action = True
|
||||||
|
|
||||||
|
self._semantic_modifiable_fields(patch_obj, force_action)
|
||||||
|
|
||||||
|
if not pecan.request.user_agent.startswith('hwmon'):
|
||||||
|
hwmon_sensorgroup = cutils.removekeys_nonhwmon(
|
||||||
|
rsensorgroup.as_dict())
|
||||||
|
|
||||||
|
if not my_host_uuid:
|
||||||
|
host = objects.Host.get_by_uuid(pecan.request.context,
|
||||||
|
rsensorgroup.host_id)
|
||||||
|
my_host_uuid = host.uuid
|
||||||
|
|
||||||
|
hwmon_sensorgroup.update({'host_uuid': my_host_uuid})
|
||||||
|
|
||||||
|
hwmon_response = hwmon_api.sensorgroup_modify(
|
||||||
|
self._api_token, self._hwmon_address, self._hwmon_port,
|
||||||
|
hwmon_sensorgroup,
|
||||||
|
constants.HWMON_DEFAULT_TIMEOUT_IN_SECS)
|
||||||
|
|
||||||
|
if not hwmon_response:
|
||||||
|
hwmon_response = {'status': 'fail',
|
||||||
|
'reason': 'no response',
|
||||||
|
'action': 'retry'}
|
||||||
|
|
||||||
|
if hwmon_response['status'] != 'pass':
|
||||||
|
msg = _("HWMON has returned with a status of {}, reason: {}, "
|
||||||
|
"recommended action: {}").format(
|
||||||
|
hwmon_response.get('status'),
|
||||||
|
hwmon_response.get('reason'),
|
||||||
|
hwmon_response.get('action'))
|
||||||
|
|
||||||
|
if force_action:
|
||||||
|
LOG.error(msg)
|
||||||
|
else:
|
||||||
|
raise wsme.exc.ClientSideError(msg)
|
||||||
|
|
||||||
|
sensorgroup_prop_attrs = ['audit_interval_group',
|
||||||
|
'actions_minor_group',
|
||||||
|
'actions_major_group',
|
||||||
|
'actions_critical_group',
|
||||||
|
'suppress']
|
||||||
|
|
||||||
|
if any(x in delta for x in sensorgroup_prop_attrs):
|
||||||
|
# propagate to Sensors within this SensorGroup
|
||||||
|
sensor_val = {'audit_interval': rsensorgroup.audit_interval_group,
|
||||||
|
'actions_minor': rsensorgroup.actions_minor_group,
|
||||||
|
'actions_major': rsensorgroup.actions_major_group,
|
||||||
|
'actions_critical':
|
||||||
|
rsensorgroup.actions_critical_group}
|
||||||
|
if 'suppress' in delta:
|
||||||
|
sensor_val.update({'suppress': rsensorgroup.suppress})
|
||||||
|
pecan.request.dbapi.sensorgroup_propagate(
|
||||||
|
rsensorgroup.uuid, sensor_val)
|
||||||
|
|
||||||
|
rsensorgroup.save()
|
||||||
|
|
||||||
|
return SensorGroup.convert_with_links(rsensorgroup)
|
||||||
|
|
||||||
|
@cutils.synchronized(LOCK_NAME)
|
||||||
|
@wsme_pecan.wsexpose(None, types.uuid, status_code=204)
|
||||||
|
def delete(self, sensorgroup_uuid):
|
||||||
|
"""Delete a sensorgroup."""
|
||||||
|
if self._from_hosts:
|
||||||
|
raise exception.OperationNotPermitted
|
||||||
|
|
||||||
|
pecan.request.dbapi.sensorgroup_destroy(sensorgroup_uuid)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _semantic_modifiable_fields(patch_obj, force_action=False):
|
||||||
|
# Prevent auto populated fields from being updated
|
||||||
|
state_rel_path = ['/uuid', '/id', '/host_id', '/datatype',
|
||||||
|
'/sensortype']
|
||||||
|
|
||||||
|
if any(p['path'] in state_rel_path for p in patch_obj):
|
||||||
|
raise wsme.exc.ClientSideError(_("The following fields can not be "
|
||||||
|
"modified: %s ") % state_rel_path)
|
||||||
|
|
||||||
|
if not (pecan.request.user_agent.startswith('hwmon') or force_action):
|
||||||
|
state_rel_path = ['/sensorgroupname', '/path',
|
||||||
|
'/state', '/possible_states',
|
||||||
|
'/actions_critical_choices',
|
||||||
|
'/actions_major_choices',
|
||||||
|
'/actions_minor_choices',
|
||||||
|
'/unit_base_group',
|
||||||
|
'/unit_modifier_group',
|
||||||
|
'/unit_rate_group',
|
||||||
|
'/t_minor_lower_group',
|
||||||
|
'/t_minor_upper_group',
|
||||||
|
'/t_major_lower_group',
|
||||||
|
'/t_major_upper_group',
|
||||||
|
'/t_critical_lower_group',
|
||||||
|
'/t_critical_upper_group',
|
||||||
|
]
|
||||||
|
|
||||||
|
if any(p['path'] in state_rel_path for p in patch_obj):
|
||||||
|
raise wsme.exc.ClientSideError(
|
||||||
|
_("The following fields are not remote-modifiable: %s") %
|
||||||
|
state_rel_path)
|
||||||
|
|
||||||
|
|
||||||
|
def _create(sensorgroup, from_profile=False):
|
||||||
|
"""Create a sensorgroup through a non-HTTP request e.g. via profile.py
|
||||||
|
while still passing through sensorgroup semantic checks.
|
||||||
|
Hence, not declared inside a class.
|
||||||
|
Param:
|
||||||
|
sensorgroup - dictionary of sensorgroup values
|
||||||
|
from_profile - Boolean whether from profile
|
||||||
|
"""
|
||||||
|
|
||||||
|
if 'host_id' in sensorgroup and sensorgroup['host_id']:
|
||||||
|
ihostid = sensorgroup['host_id']
|
||||||
|
else:
|
||||||
|
ihostid = sensorgroup['host_uuid']
|
||||||
|
|
||||||
|
ihost = pecan.request.dbapi.host_get(ihostid)
|
||||||
|
if uuidutils.is_uuid_like(ihostid):
|
||||||
|
host_id = ihost['id']
|
||||||
|
else:
|
||||||
|
host_id = ihostid
|
||||||
|
sensorgroup.update({'host_id': host_id})
|
||||||
|
LOG.info("sensorgroup post sensorgroups ihostid: %s" % host_id)
|
||||||
|
sensorgroup['host_uuid'] = ihost['uuid']
|
||||||
|
|
||||||
|
# Assign UUID if not already done.
|
||||||
|
if not sensorgroup.get('uuid'):
|
||||||
|
sensorgroup['uuid'] = str(uuid.uuid4())
|
||||||
|
|
||||||
|
# Get sensors
|
||||||
|
sensors = None
|
||||||
|
if 'sensors' in sensorgroup:
|
||||||
|
sensors = sensorgroup['sensors']
|
||||||
|
|
||||||
|
# Set defaults - before checks to allow for optional attributes
|
||||||
|
# if not from_profile:
|
||||||
|
# sensorgroup = _set_defaults(sensorgroup)
|
||||||
|
|
||||||
|
# Semantic checks
|
||||||
|
# sensorgroup = _check("add",
|
||||||
|
# sensorgroup,
|
||||||
|
# sensors=sensors,
|
||||||
|
# ifaces=uses_if,
|
||||||
|
# from_profile=from_profile)
|
||||||
|
|
||||||
|
if sensorgroup.get('datatype'):
|
||||||
|
if sensorgroup['datatype'] == 'discrete':
|
||||||
|
new_sensorgroup = pecan.request.dbapi.sensorgroup_discrete_create(
|
||||||
|
ihost.id, sensorgroup)
|
||||||
|
elif sensorgroup['datatype'] == 'analog':
|
||||||
|
new_sensorgroup = pecan.request.dbapi.sensorgroup_analog_create(
|
||||||
|
ihost.id, sensorgroup)
|
||||||
|
else:
|
||||||
|
raise wsme.exc.ClientSideError(_("Invalid datatype. %s") %
|
||||||
|
sensorgroup.datatype)
|
||||||
|
else:
|
||||||
|
raise wsme.exc.ClientSideError(_("Unspecified datatype."))
|
||||||
|
|
||||||
|
# Update sensors
|
||||||
|
if sensors:
|
||||||
|
try:
|
||||||
|
_update_sensors("modify",
|
||||||
|
new_sensorgroup.as_dict(),
|
||||||
|
ihost,
|
||||||
|
sensors)
|
||||||
|
except Exception as e:
|
||||||
|
pecan.request.dbapi.sensorgroup_destroy(
|
||||||
|
new_sensorgroup.as_dict()['uuid'])
|
||||||
|
raise e
|
||||||
|
|
||||||
|
# Update sensors
|
||||||
|
# return new_sensorgroup
|
||||||
|
return SensorGroup.convert_with_links(new_sensorgroup)
|
||||||
|
|
||||||
|
|
||||||
|
def _update_sensors(op, sensorgroup, ihost, sensors):
|
||||||
|
sensors = sensors.split(',')
|
||||||
|
|
||||||
|
this_sensorgroup_datatype = None
|
||||||
|
this_sensorgroup_sensortype = None
|
||||||
|
if op == "add":
|
||||||
|
this_sensorgroup_id = 0
|
||||||
|
else:
|
||||||
|
this_sensorgroup_id = sensorgroup['id']
|
||||||
|
this_sensorgroup_datatype = sensorgroup['datatype']
|
||||||
|
this_sensorgroup_sensortype = sensorgroup['sensortype']
|
||||||
|
|
||||||
|
if sensors:
|
||||||
|
# Update Sensors' sensorgroup_uuid attribute
|
||||||
|
sensors_list = pecan.request.dbapi.sensor_get_all(
|
||||||
|
host_id=ihost['id'])
|
||||||
|
for p in sensors_list:
|
||||||
|
# if new sensor associated
|
||||||
|
if (p.uuid in sensors or p.sensorname in sensors) \
|
||||||
|
and not p.sensorgroup_id:
|
||||||
|
values = {'sensorgroup_id': sensorgroup['id']}
|
||||||
|
# else if old sensor disassociated
|
||||||
|
elif ((p.uuid not in sensors and p.sensorname not in sensors) and
|
||||||
|
p.sensorgroup_id and
|
||||||
|
p.sensorgroup_id == this_sensorgroup_id):
|
||||||
|
values = {'sensorgroup_id': None}
|
||||||
|
else:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if p.datatype != this_sensorgroup_datatype:
|
||||||
|
msg = _("Invalid datatype: host {} sensor {}: Expected: {} "
|
||||||
|
"Received: {}.").format(
|
||||||
|
(ihost['hostname'], p.sensorname,
|
||||||
|
this_sensorgroup_datatype, p.datatype))
|
||||||
|
raise wsme.exc.ClientSideError(msg)
|
||||||
|
|
||||||
|
if p.sensortype != this_sensorgroup_sensortype:
|
||||||
|
msg = _("Invalid sensortype: host {} sensor {}: Expected: {} "
|
||||||
|
"Received: {}.").format(
|
||||||
|
ihost['hostname'], p.sensorname,
|
||||||
|
this_sensorgroup_sensortype, p.sensortype)
|
||||||
|
raise wsme.exc.ClientSideError(msg)
|
||||||
|
|
||||||
|
try:
|
||||||
|
pecan.request.dbapi.sensor_update(p.uuid, values)
|
||||||
|
except exception.HTTPNotFound:
|
||||||
|
msg = _("Sensor update of sensorgroup_uuid failed: host {} "
|
||||||
|
"sensor {}").format(ihost['hostname'], p.sensorname)
|
||||||
|
raise wsme.exc.ClientSideError(msg)
|
39
inventory/inventory/inventory/api/controllers/v1/state.py
Normal file
39
inventory/inventory/inventory/api/controllers/v1/state.py
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2013 Red Hat, Inc.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
# Copyright (c) 2013-2014 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
|
||||||
|
from inventory.api.controllers.v1 import base
|
||||||
|
from inventory.api.controllers.v1 import link
|
||||||
|
from wsme import types as wtypes
|
||||||
|
|
||||||
|
|
||||||
|
class State(base.APIBase):
|
||||||
|
|
||||||
|
current = wtypes.text
|
||||||
|
"The current state"
|
||||||
|
|
||||||
|
target = wtypes.text
|
||||||
|
"The user modified desired state"
|
||||||
|
|
||||||
|
available = [wtypes.text]
|
||||||
|
"A list of available states it is able to transition to"
|
||||||
|
|
||||||
|
links = [link.Link]
|
||||||
|
"A list containing a self link and associated state links"
|
49
inventory/inventory/inventory/api/controllers/v1/sysinv.py
Normal file
49
inventory/inventory/inventory/api/controllers/v1/sysinv.py
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
#
|
||||||
|
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
from cgtsclient.v1 import client as cgts_client
|
||||||
|
from inventory.api import config
|
||||||
|
from keystoneauth1 import loading as ks_loading
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
_SESSION = None
|
||||||
|
|
||||||
|
|
||||||
|
def cgtsclient(context, version=1, endpoint=None):
|
||||||
|
"""Constructs a cgts client object for making API requests.
|
||||||
|
|
||||||
|
:param context: The FM request context for auth.
|
||||||
|
:param version: API endpoint version.
|
||||||
|
:param endpoint: Optional If the endpoint is not available, it will be
|
||||||
|
retrieved from session
|
||||||
|
"""
|
||||||
|
global _SESSION
|
||||||
|
|
||||||
|
if not _SESSION:
|
||||||
|
_SESSION = ks_loading.load_session_from_conf_options(
|
||||||
|
CONF, config.sysinv_group.name)
|
||||||
|
|
||||||
|
auth_token = context.auth_token
|
||||||
|
if endpoint is None:
|
||||||
|
auth = context.get_auth_plugin()
|
||||||
|
service_type, service_name, interface = \
|
||||||
|
CONF.sysinv.catalog_info.split(':')
|
||||||
|
service_parameters = {'service_type': service_type,
|
||||||
|
'service_name': service_name,
|
||||||
|
'interface': interface,
|
||||||
|
'region_name': CONF.sysinv.os_region_name}
|
||||||
|
endpoint = _SESSION.get_endpoint(auth, **service_parameters)
|
||||||
|
|
||||||
|
return cgts_client.Client(version=version,
|
||||||
|
endpoint=endpoint,
|
||||||
|
token=auth_token)
|
266
inventory/inventory/inventory/api/controllers/v1/system.py
Normal file
266
inventory/inventory/inventory/api/controllers/v1/system.py
Normal file
@ -0,0 +1,266 @@
|
|||||||
|
#
|
||||||
|
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
from oslo_log import log
|
||||||
|
import pecan
|
||||||
|
from pecan import rest
|
||||||
|
import six
|
||||||
|
from wsme import types as wtypes
|
||||||
|
import wsmeext.pecan as wsme_pecan
|
||||||
|
|
||||||
|
from inventory.api.controllers.v1 import base
|
||||||
|
from inventory.api.controllers.v1 import collection
|
||||||
|
from inventory.api.controllers.v1 import host
|
||||||
|
from inventory.api.controllers.v1 import link
|
||||||
|
from inventory.api.controllers.v1 import types
|
||||||
|
from inventory.api.controllers.v1 import utils as api_utils
|
||||||
|
from inventory.common import constants
|
||||||
|
from inventory.common import exception
|
||||||
|
from inventory.common import k_host
|
||||||
|
from inventory import objects
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
VALID_VSWITCH_TYPES = [constants.VSWITCH_TYPE_OVS_DPDK]
|
||||||
|
|
||||||
|
|
||||||
|
class System(base.APIBase):
|
||||||
|
"""API representation of a system.
|
||||||
|
|
||||||
|
This class enforces type checking and value constraints, and converts
|
||||||
|
between the internal object model and the API representation of
|
||||||
|
a system.
|
||||||
|
"""
|
||||||
|
|
||||||
|
uuid = types.uuid
|
||||||
|
"The UUID of the system"
|
||||||
|
|
||||||
|
name = wtypes.text
|
||||||
|
"The name of the system"
|
||||||
|
|
||||||
|
system_type = wtypes.text
|
||||||
|
"The type of the system"
|
||||||
|
|
||||||
|
system_mode = wtypes.text
|
||||||
|
"The mode of the system"
|
||||||
|
|
||||||
|
description = wtypes.text
|
||||||
|
"The name of the system"
|
||||||
|
|
||||||
|
contact = wtypes.text
|
||||||
|
"The contact of the system"
|
||||||
|
|
||||||
|
location = wtypes.text
|
||||||
|
"The location of the system"
|
||||||
|
|
||||||
|
services = int
|
||||||
|
"The services of the system"
|
||||||
|
|
||||||
|
software_version = wtypes.text
|
||||||
|
"A textual description of the entity"
|
||||||
|
|
||||||
|
timezone = wtypes.text
|
||||||
|
"The timezone of the system"
|
||||||
|
|
||||||
|
links = [link.Link]
|
||||||
|
"A list containing a self link and associated system links"
|
||||||
|
|
||||||
|
hosts = [link.Link]
|
||||||
|
"Links to the collection of hosts contained in this system"
|
||||||
|
|
||||||
|
capabilities = {wtypes.text: api_utils.ValidTypes(wtypes.text, bool,
|
||||||
|
six.integer_types)}
|
||||||
|
"System defined capabilities"
|
||||||
|
|
||||||
|
region_name = wtypes.text
|
||||||
|
"The region name of the system"
|
||||||
|
|
||||||
|
distributed_cloud_role = wtypes.text
|
||||||
|
"The distributed cloud role of the system"
|
||||||
|
|
||||||
|
service_project_name = wtypes.text
|
||||||
|
"The service project name of the system"
|
||||||
|
|
||||||
|
security_feature = wtypes.text
|
||||||
|
"Kernel arguments associated with enabled spectre/meltdown fix features"
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self.fields = objects.System.fields.keys()
|
||||||
|
|
||||||
|
for k in self.fields:
|
||||||
|
# Translate any special internal representation of data to its
|
||||||
|
# customer facing form
|
||||||
|
if k == 'security_feature':
|
||||||
|
# look up which customer-facing-security-feature-string goes
|
||||||
|
# with the kernel arguments tracked in sysinv
|
||||||
|
kernel_args = kwargs.get(k)
|
||||||
|
translated_string = kernel_args
|
||||||
|
|
||||||
|
for user_string, args_string in \
|
||||||
|
constants.SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_OPTS.iteritems(): # noqa
|
||||||
|
if args_string == kernel_args:
|
||||||
|
translated_string = user_string
|
||||||
|
break
|
||||||
|
setattr(self, k, translated_string)
|
||||||
|
else:
|
||||||
|
# No translation required
|
||||||
|
setattr(self, k, kwargs.get(k))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def convert_with_links(cls, rpc_system, expand=True):
|
||||||
|
minimum_fields = ['id', 'uuid', 'name', 'system_type', 'system_mode',
|
||||||
|
'description', 'capabilities',
|
||||||
|
'contact', 'location', 'software_version',
|
||||||
|
'created_at', 'updated_at', 'timezone',
|
||||||
|
'region_name', 'service_project_name',
|
||||||
|
'distributed_cloud_role', 'security_feature']
|
||||||
|
|
||||||
|
fields = minimum_fields if not expand else None
|
||||||
|
|
||||||
|
iSystem = System.from_rpc_object(rpc_system, fields)
|
||||||
|
|
||||||
|
iSystem.links = [link.Link.make_link('self', pecan.request.host_url,
|
||||||
|
'systems', iSystem.uuid),
|
||||||
|
link.Link.make_link('bookmark',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'systems', iSystem.uuid,
|
||||||
|
bookmark=True)
|
||||||
|
]
|
||||||
|
|
||||||
|
if expand:
|
||||||
|
iSystem.hosts = [
|
||||||
|
link.Link.make_link('self',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'systems',
|
||||||
|
iSystem.uuid + "/hosts"),
|
||||||
|
link.Link.make_link('bookmark',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'systems',
|
||||||
|
iSystem.uuid + "/hosts",
|
||||||
|
bookmark=True)]
|
||||||
|
|
||||||
|
return iSystem
|
||||||
|
|
||||||
|
|
||||||
|
class SystemCollection(collection.Collection):
|
||||||
|
"""API representation of a collection of systems."""
|
||||||
|
|
||||||
|
systems = [System]
|
||||||
|
"A list containing system objects"
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self._type = 'systems'
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def convert_with_links(cls, systems, limit, url=None,
|
||||||
|
expand=False, **kwargs):
|
||||||
|
collection = SystemCollection()
|
||||||
|
collection.systems = [System.convert_with_links(ch, expand)
|
||||||
|
for ch in systems]
|
||||||
|
|
||||||
|
collection.next = collection.get_next(limit, url=url, **kwargs)
|
||||||
|
return collection
|
||||||
|
|
||||||
|
|
||||||
|
LOCK_NAME = 'SystemController'
|
||||||
|
|
||||||
|
|
||||||
|
class SystemController(rest.RestController):
|
||||||
|
"""REST controller for system."""
|
||||||
|
|
||||||
|
hosts = host.HostController(from_system=True)
|
||||||
|
"Expose hosts as a sub-element of system"
|
||||||
|
|
||||||
|
_custom_actions = {
|
||||||
|
'detail': ['GET'],
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self._bm_region = None
|
||||||
|
|
||||||
|
def _bm_region_get(self):
|
||||||
|
# only supported region type is BM_EXTERNAL
|
||||||
|
if not self._bm_region:
|
||||||
|
self._bm_region = k_host.BM_EXTERNAL
|
||||||
|
return self._bm_region
|
||||||
|
|
||||||
|
def _get_system_collection(self, marker, limit, sort_key, sort_dir,
|
||||||
|
expand=False, resource_url=None):
|
||||||
|
limit = api_utils.validate_limit(limit)
|
||||||
|
sort_dir = api_utils.validate_sort_dir(sort_dir)
|
||||||
|
marker_obj = None
|
||||||
|
if marker:
|
||||||
|
marker_obj = objects.System.get_by_uuid(pecan.request.context,
|
||||||
|
marker)
|
||||||
|
system = pecan.request.dbapi.system_get_list(limit, marker_obj,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
for i in system:
|
||||||
|
i.capabilities['bm_region'] = self._bm_region_get()
|
||||||
|
|
||||||
|
return SystemCollection.convert_with_links(system, limit,
|
||||||
|
url=resource_url,
|
||||||
|
expand=expand,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(SystemCollection, types.uuid,
|
||||||
|
int, wtypes.text, wtypes.text)
|
||||||
|
def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc'):
|
||||||
|
"""Retrieve a list of systems.
|
||||||
|
|
||||||
|
:param marker: pagination marker for large data sets.
|
||||||
|
:param limit: maximum number of resources to return in a single result.
|
||||||
|
:param sort_key: column to sort results by. Default: id.
|
||||||
|
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
|
||||||
|
"""
|
||||||
|
return self._get_system_collection(marker, limit, sort_key, sort_dir)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(SystemCollection, types.uuid, int,
|
||||||
|
wtypes.text, wtypes.text)
|
||||||
|
def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc'):
|
||||||
|
"""Retrieve a list of system with detail.
|
||||||
|
|
||||||
|
:param marker: pagination marker for large data sets.
|
||||||
|
:param limit: maximum number of resources to return in a single result.
|
||||||
|
:param sort_key: column to sort results by. Default: id.
|
||||||
|
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
|
||||||
|
"""
|
||||||
|
# /detail should only work agaist collections
|
||||||
|
parent = pecan.request.path.split('/')[:-1][-1]
|
||||||
|
if parent != "system":
|
||||||
|
raise exception.HTTPNotFound
|
||||||
|
|
||||||
|
expand = True
|
||||||
|
resource_url = '/'.join(['system', 'detail'])
|
||||||
|
return self._get_system_collection(marker, limit, sort_key, sort_dir,
|
||||||
|
expand, resource_url)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(System, types.uuid)
|
||||||
|
def get_one(self, system_uuid):
|
||||||
|
"""Retrieve information about the given system.
|
||||||
|
|
||||||
|
:param system_uuid: UUID of a system.
|
||||||
|
"""
|
||||||
|
rpc_system = objects.System.get_by_uuid(pecan.request.context,
|
||||||
|
system_uuid)
|
||||||
|
|
||||||
|
rpc_system.capabilities['bm_region'] = self._bm_region_get()
|
||||||
|
return System.convert_with_links(rpc_system)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(System, body=System)
|
||||||
|
def post(self, system):
|
||||||
|
"""Create a new system."""
|
||||||
|
raise exception.OperationNotPermitted
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(None, types.uuid, status_code=204)
|
||||||
|
def delete(self, system_uuid):
|
||||||
|
"""Delete a system.
|
||||||
|
|
||||||
|
:param system_uuid: UUID of a system.
|
||||||
|
"""
|
||||||
|
raise exception.OperationNotPermitted
|
215
inventory/inventory/inventory/api/controllers/v1/types.py
Normal file
215
inventory/inventory/inventory/api/controllers/v1/types.py
Normal file
@ -0,0 +1,215 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
# coding: utf-8
|
||||||
|
#
|
||||||
|
# Copyright 2013 Red Hat, Inc.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
from oslo_utils import strutils
|
||||||
|
import six
|
||||||
|
|
||||||
|
import wsme
|
||||||
|
from wsme import types as wtypes
|
||||||
|
|
||||||
|
from inventory.api.controllers.v1 import utils as apiutils
|
||||||
|
from inventory.common import exception
|
||||||
|
from inventory.common.i18n import _
|
||||||
|
from inventory.common import utils
|
||||||
|
|
||||||
|
|
||||||
|
class MACAddressType(wtypes.UserType):
|
||||||
|
"""A simple MAC address type."""
|
||||||
|
|
||||||
|
basetype = wtypes.text
|
||||||
|
name = 'macaddress'
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def validate(value):
|
||||||
|
return utils.validate_and_normalize_mac(value)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def frombasetype(value):
|
||||||
|
return MACAddressType.validate(value)
|
||||||
|
|
||||||
|
|
||||||
|
class UUIDType(wtypes.UserType):
|
||||||
|
"""A simple UUID type."""
|
||||||
|
|
||||||
|
basetype = wtypes.text
|
||||||
|
name = 'uuid'
|
||||||
|
# FIXME(lucasagomes): When used with wsexpose decorator WSME will try
|
||||||
|
# to get the name of the type by accessing it's __name__ attribute.
|
||||||
|
# Remove this __name__ attribute once it's fixed in WSME.
|
||||||
|
# https://bugs.launchpad.net/wsme/+bug/1265590
|
||||||
|
__name__ = name
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def validate(value):
|
||||||
|
if not utils.is_uuid_like(value):
|
||||||
|
raise exception.InvalidUUID(uuid=value)
|
||||||
|
return value
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def frombasetype(value):
|
||||||
|
if value is None:
|
||||||
|
return None
|
||||||
|
return UUIDType.validate(value)
|
||||||
|
|
||||||
|
|
||||||
|
class BooleanType(wtypes.UserType):
|
||||||
|
"""A simple boolean type."""
|
||||||
|
|
||||||
|
basetype = wtypes.text
|
||||||
|
name = 'boolean'
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def validate(value):
|
||||||
|
try:
|
||||||
|
return strutils.bool_from_string(value, strict=True)
|
||||||
|
except ValueError as e:
|
||||||
|
# raise Invalid to return 400 (BadRequest) in the API
|
||||||
|
raise exception.Invalid(six.text_type(e))
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def frombasetype(value):
|
||||||
|
if value is None:
|
||||||
|
return None
|
||||||
|
return BooleanType.validate(value)
|
||||||
|
|
||||||
|
|
||||||
|
class IPAddressType(wtypes.UserType):
|
||||||
|
"""A generic IP address type that supports both IPv4 and IPv6."""
|
||||||
|
|
||||||
|
basetype = wtypes.text
|
||||||
|
name = 'ipaddress'
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def validate(value):
|
||||||
|
if not utils.is_valid_ip(value):
|
||||||
|
raise exception.InvalidIPAddress(address=value)
|
||||||
|
return value
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def frombasetype(value):
|
||||||
|
if value is None:
|
||||||
|
return None
|
||||||
|
return IPAddressType.validate(value)
|
||||||
|
|
||||||
|
|
||||||
|
macaddress = MACAddressType()
|
||||||
|
uuid = UUIDType()
|
||||||
|
boolean = BooleanType()
|
||||||
|
ipaddress = IPAddressType()
|
||||||
|
|
||||||
|
|
||||||
|
class ApiDictType(wtypes.UserType):
|
||||||
|
name = 'apidict'
|
||||||
|
__name__ = name
|
||||||
|
|
||||||
|
basetype = {wtypes.text:
|
||||||
|
apiutils.ValidTypes(wtypes.text, six.integer_types)}
|
||||||
|
|
||||||
|
|
||||||
|
apidict = ApiDictType()
|
||||||
|
|
||||||
|
|
||||||
|
class JsonPatchType(wtypes.Base):
|
||||||
|
"""A complex type that represents a single json-patch operation."""
|
||||||
|
|
||||||
|
path = wtypes.wsattr(wtypes.StringType(pattern='^(/[\w-]+)+$'),
|
||||||
|
mandatory=True)
|
||||||
|
op = wtypes.wsattr(wtypes.Enum(str, 'add', 'replace', 'remove'),
|
||||||
|
mandatory=True)
|
||||||
|
value = apiutils.ValidTypes(wtypes.text, six.integer_types, float)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def internal_attrs():
|
||||||
|
"""Returns a list of internal attributes.
|
||||||
|
|
||||||
|
Internal attributes can't be added, replaced or removed. This
|
||||||
|
method may be overwritten by derived class.
|
||||||
|
|
||||||
|
"""
|
||||||
|
return ['/created_at', '/id', '/links', '/updated_at', '/uuid']
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def mandatory_attrs():
|
||||||
|
"""Retruns a list of mandatory attributes.
|
||||||
|
|
||||||
|
Mandatory attributes can't be removed from the document. This
|
||||||
|
method should be overwritten by derived class.
|
||||||
|
|
||||||
|
"""
|
||||||
|
return []
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def validate(patch):
|
||||||
|
if patch.path in patch.internal_attrs():
|
||||||
|
msg = _("'%s' is an internal attribute and can not be updated")
|
||||||
|
raise wsme.exc.ClientSideError(msg % patch.path)
|
||||||
|
|
||||||
|
if patch.path in patch.mandatory_attrs() and patch.op == 'remove':
|
||||||
|
msg = _("'%s' is a mandatory attribute and can not be removed")
|
||||||
|
raise wsme.exc.ClientSideError(msg % patch.path)
|
||||||
|
|
||||||
|
if patch.op == 'add':
|
||||||
|
if patch.path.count('/') == 1:
|
||||||
|
msg = _('Adding a new attribute (%s) to the root of '
|
||||||
|
' the resource is not allowed')
|
||||||
|
raise wsme.exc.ClientSideError(msg % patch.path)
|
||||||
|
|
||||||
|
if patch.op != 'remove':
|
||||||
|
if not patch.value:
|
||||||
|
msg = _("Edit and Add operation of the field requires "
|
||||||
|
"non-empty value.")
|
||||||
|
raise wsme.exc.ClientSideError(msg)
|
||||||
|
|
||||||
|
ret = {'path': patch.path, 'op': patch.op}
|
||||||
|
if patch.value:
|
||||||
|
ret['value'] = patch.value
|
||||||
|
return ret
|
||||||
|
|
||||||
|
|
||||||
|
class MultiType(wtypes.UserType):
|
||||||
|
"""A complex type that represents one or more types.
|
||||||
|
|
||||||
|
Used for validating that a value is an instance of one of the types.
|
||||||
|
|
||||||
|
:param *types: Variable-length list of types.
|
||||||
|
|
||||||
|
"""
|
||||||
|
def __init__(self, types):
|
||||||
|
self.types = types
|
||||||
|
|
||||||
|
def validate(self, value):
|
||||||
|
for t in self.types:
|
||||||
|
if t is wsme.types.text and isinstance(value, wsme.types.bytes):
|
||||||
|
value = value.decode()
|
||||||
|
if isinstance(t, list):
|
||||||
|
if isinstance(value, list):
|
||||||
|
for v in value:
|
||||||
|
if not isinstance(v, t[0]):
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
return value
|
||||||
|
elif isinstance(value, t):
|
||||||
|
return value
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
_("Wrong type. Expected '%(type)s', got '%(value)s'")
|
||||||
|
% {'type': self.types, 'value': type(value)})
|
567
inventory/inventory/inventory/api/controllers/v1/utils.py
Executable file
567
inventory/inventory/inventory/api/controllers/v1/utils.py
Executable file
@ -0,0 +1,567 @@
|
|||||||
|
#
|
||||||
|
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
import contextlib
|
||||||
|
import jsonpatch
|
||||||
|
import netaddr
|
||||||
|
import os
|
||||||
|
import pecan
|
||||||
|
import re
|
||||||
|
import socket
|
||||||
|
import sys
|
||||||
|
import traceback
|
||||||
|
import tsconfig.tsconfig as tsc
|
||||||
|
import wsme
|
||||||
|
|
||||||
|
from inventory.api.controllers.v1.sysinv import cgtsclient
|
||||||
|
from inventory.common import constants
|
||||||
|
from inventory.common import exception
|
||||||
|
from inventory.common.i18n import _
|
||||||
|
from inventory.common import k_host
|
||||||
|
from inventory.common.utils import memoized
|
||||||
|
from inventory import objects
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
KEY_VALUE_SEP = '='
|
||||||
|
JSONPATCH_EXCEPTIONS = (jsonpatch.JsonPatchException,
|
||||||
|
jsonpatch.JsonPointerException,
|
||||||
|
KeyError)
|
||||||
|
|
||||||
|
|
||||||
|
def ip_version_to_string(ip_version):
|
||||||
|
return str(constants.IP_FAMILIES[ip_version])
|
||||||
|
|
||||||
|
|
||||||
|
def validate_limit(limit):
|
||||||
|
if limit and limit < 0:
|
||||||
|
raise wsme.exc.ClientSideError(_("Limit must be positive"))
|
||||||
|
|
||||||
|
return min(CONF.api.limit_max, limit) or CONF.api.limit_max
|
||||||
|
|
||||||
|
|
||||||
|
def validate_sort_dir(sort_dir):
|
||||||
|
if sort_dir not in ['asc', 'desc']:
|
||||||
|
raise wsme.exc.ClientSideError(_("Invalid sort direction: %s. "
|
||||||
|
"Acceptable values are "
|
||||||
|
"'asc' or 'desc'") % sort_dir)
|
||||||
|
return sort_dir
|
||||||
|
|
||||||
|
|
||||||
|
def validate_patch(patch):
|
||||||
|
"""Performs a basic validation on patch."""
|
||||||
|
|
||||||
|
if not isinstance(patch, list):
|
||||||
|
patch = [patch]
|
||||||
|
|
||||||
|
for p in patch:
|
||||||
|
path_pattern = re.compile("^/[a-zA-Z0-9-_]+(/[a-zA-Z0-9-_]+)*$")
|
||||||
|
|
||||||
|
if not isinstance(p, dict) or \
|
||||||
|
any(key for key in ["path", "op"] if key not in p):
|
||||||
|
raise wsme.exc.ClientSideError(
|
||||||
|
_("Invalid patch format: %s") % str(p))
|
||||||
|
|
||||||
|
path = p["path"]
|
||||||
|
op = p["op"]
|
||||||
|
|
||||||
|
if op not in ["add", "replace", "remove"]:
|
||||||
|
raise wsme.exc.ClientSideError(
|
||||||
|
_("Operation not supported: %s") % op)
|
||||||
|
|
||||||
|
if not path_pattern.match(path):
|
||||||
|
raise wsme.exc.ClientSideError(_("Invalid path: %s") % path)
|
||||||
|
|
||||||
|
if op == "add":
|
||||||
|
if path.count('/') == 1:
|
||||||
|
raise wsme.exc.ClientSideError(
|
||||||
|
_("Adding an additional attribute (%s) to the "
|
||||||
|
"resource is not allowed") % path)
|
||||||
|
|
||||||
|
|
||||||
|
def validate_mtu(mtu):
|
||||||
|
"""Check if MTU is valid"""
|
||||||
|
if mtu < 576 or mtu > 9216:
|
||||||
|
raise wsme.exc.ClientSideError(_(
|
||||||
|
"MTU must be between 576 and 9216 bytes."))
|
||||||
|
|
||||||
|
|
||||||
|
def validate_address_within_address_pool(ip, pool):
|
||||||
|
"""Determine whether an IP address is within the specified IP address pool.
|
||||||
|
:param ip netaddr.IPAddress object
|
||||||
|
:param pool objects.AddressPool object
|
||||||
|
"""
|
||||||
|
ipset = netaddr.IPSet()
|
||||||
|
for start, end in pool.ranges:
|
||||||
|
ipset.update(netaddr.IPRange(start, end))
|
||||||
|
|
||||||
|
if netaddr.IPAddress(ip) not in ipset:
|
||||||
|
raise wsme.exc.ClientSideError(_(
|
||||||
|
"IP address %s is not within address pool ranges") % str(ip))
|
||||||
|
|
||||||
|
|
||||||
|
def validate_address_within_nework(ip, network):
|
||||||
|
"""Determine whether an IP address is within the specified IP network.
|
||||||
|
:param ip netaddr.IPAddress object
|
||||||
|
:param network objects.Network object
|
||||||
|
"""
|
||||||
|
LOG.info("TODO(sc) validate_address_within_address_pool "
|
||||||
|
"ip=%s, network=%s" % (ip, network))
|
||||||
|
|
||||||
|
|
||||||
|
class ValidTypes(wsme.types.UserType):
|
||||||
|
"""User type for validate that value has one of a few types."""
|
||||||
|
|
||||||
|
def __init__(self, *types):
|
||||||
|
self.types = types
|
||||||
|
|
||||||
|
def validate(self, value):
|
||||||
|
for t in self.types:
|
||||||
|
if t is wsme.types.text and isinstance(value, wsme.types.bytes):
|
||||||
|
value = value.decode()
|
||||||
|
if isinstance(value, t):
|
||||||
|
return value
|
||||||
|
else:
|
||||||
|
raise ValueError("Wrong type. Expected '%s', got '%s'" % (
|
||||||
|
self.types, type(value)))
|
||||||
|
|
||||||
|
|
||||||
|
def is_valid_hostname(hostname):
|
||||||
|
"""Determine whether an address is valid as per RFC 1123.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Maximum length of 255
|
||||||
|
rc = True
|
||||||
|
length = len(hostname)
|
||||||
|
if length > 255:
|
||||||
|
raise wsme.exc.ClientSideError(_(
|
||||||
|
"Hostname {} is too long. Length {} is greater than 255."
|
||||||
|
"Please configure valid hostname.").format(hostname, length))
|
||||||
|
|
||||||
|
# Allow a single dot on the right hand side
|
||||||
|
if hostname[-1] == ".":
|
||||||
|
hostname = hostname[:-1]
|
||||||
|
# Create a regex to ensure:
|
||||||
|
# - hostname does not begin or end with a dash
|
||||||
|
# - each segment is 1 to 63 characters long
|
||||||
|
# - valid characters are A-Z (any case) and 0-9
|
||||||
|
valid_re = re.compile("(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
|
||||||
|
rc = all(valid_re.match(x) for x in hostname.split("."))
|
||||||
|
if not rc:
|
||||||
|
raise wsme.exc.ClientSideError(_(
|
||||||
|
"Hostname %s is invalid. Hostname may not begin or end with"
|
||||||
|
" a dash. Each segment is 1 to 63 chars long and valid"
|
||||||
|
" characters are A-Z, a-z, and 0-9."
|
||||||
|
" Please configure valid hostname.") % (hostname))
|
||||||
|
|
||||||
|
return rc
|
||||||
|
|
||||||
|
|
||||||
|
def is_host_active_controller(host):
|
||||||
|
"""Returns True if the supplied host is the active controller."""
|
||||||
|
if host['personality'] == k_host.CONTROLLER:
|
||||||
|
return host['hostname'] == socket.gethostname()
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def is_host_simplex_controller(host):
|
||||||
|
return host['personality'] == k_host.CONTROLLER and \
|
||||||
|
os.path.isfile(tsc.PLATFORM_SIMPLEX_FLAG)
|
||||||
|
|
||||||
|
|
||||||
|
def is_aio_simplex_host_unlocked(host):
|
||||||
|
return (get_system_mode() == constants.SYSTEM_MODE_SIMPLEX and
|
||||||
|
host['administrative'] != k_host.ADMIN_LOCKED and
|
||||||
|
host['invprovision'] != k_host.PROVISIONING)
|
||||||
|
|
||||||
|
|
||||||
|
def get_vswitch_type():
|
||||||
|
system = pecan.request.dbapi.system_get_one()
|
||||||
|
return system.capabilities.get('vswitch_type')
|
||||||
|
|
||||||
|
|
||||||
|
def get_https_enabled():
|
||||||
|
system = pecan.request.dbapi.system_get_one()
|
||||||
|
return system.capabilities.get('https_enabled', False)
|
||||||
|
|
||||||
|
|
||||||
|
def get_tpm_config():
|
||||||
|
tpmconfig = None
|
||||||
|
try:
|
||||||
|
tpmconfig = pecan.request.dbapi.tpmconfig_get_one()
|
||||||
|
except exception.InventoryException:
|
||||||
|
pass
|
||||||
|
return tpmconfig
|
||||||
|
|
||||||
|
|
||||||
|
def get_sdn_enabled():
|
||||||
|
system = pecan.request.dbapi.system_get_one()
|
||||||
|
return system.capabilities.get('sdn_enabled', False)
|
||||||
|
|
||||||
|
|
||||||
|
def get_region_config():
|
||||||
|
system = pecan.request.dbapi.system_get_one()
|
||||||
|
# TODO(mpeters): this should to be updated to return a boolean value
|
||||||
|
# requires integration changes between horizon, cgts-client and users to
|
||||||
|
# transition to a proper boolean value
|
||||||
|
return system.capabilities.get('region_config', False)
|
||||||
|
|
||||||
|
|
||||||
|
def get_shared_services():
|
||||||
|
system = pecan.request.dbapi.system_get_one()
|
||||||
|
return system.capabilities.get('shared_services', None)
|
||||||
|
|
||||||
|
|
||||||
|
class SystemHelper(object):
|
||||||
|
@staticmethod
|
||||||
|
def get_product_build():
|
||||||
|
active_controller = HostHelper.get_active_controller()
|
||||||
|
if k_host.COMPUTE in active_controller.subfunctions:
|
||||||
|
return constants.TIS_AIO_BUILD
|
||||||
|
return constants.TIS_STD_BUILD
|
||||||
|
|
||||||
|
|
||||||
|
class HostHelper(object):
|
||||||
|
@staticmethod
|
||||||
|
@memoized
|
||||||
|
def get_active_controller(dbapi=None):
|
||||||
|
"""Returns host object for active controller."""
|
||||||
|
if not dbapi:
|
||||||
|
dbapi = pecan.request.dbapi
|
||||||
|
hosts = objects.Host.list(pecan.request.context,
|
||||||
|
filters={'personality': k_host.CONTROLLER})
|
||||||
|
active_controller = None
|
||||||
|
for host in hosts:
|
||||||
|
if is_host_active_controller(host):
|
||||||
|
active_controller = host
|
||||||
|
break
|
||||||
|
|
||||||
|
return active_controller
|
||||||
|
|
||||||
|
|
||||||
|
def get_system_mode(dbapi=None):
|
||||||
|
if not dbapi:
|
||||||
|
dbapi = pecan.request.dbapi
|
||||||
|
system = dbapi.system_get_one()
|
||||||
|
return system.system_mode
|
||||||
|
|
||||||
|
|
||||||
|
def get_distributed_cloud_role(dbapi=None):
|
||||||
|
if not dbapi:
|
||||||
|
dbapi = pecan.request.dbapi
|
||||||
|
system = dbapi.system_get_one()
|
||||||
|
return system.distributed_cloud_role
|
||||||
|
|
||||||
|
|
||||||
|
def is_kubernetes_config(dbapi=None):
|
||||||
|
if not dbapi:
|
||||||
|
dbapi = pecan.request.dbapi
|
||||||
|
system = dbapi.system_get_one()
|
||||||
|
return system.capabilities.get('kubernetes_enabled', False)
|
||||||
|
|
||||||
|
|
||||||
|
def is_aio_duplex_system():
|
||||||
|
return get_system_mode() == constants.SYSTEM_MODE_DUPLEX and \
|
||||||
|
SystemHelper.get_product_build() == constants.TIS_AIO_BUILD
|
||||||
|
|
||||||
|
|
||||||
|
def get_compute_count(dbapi=None):
|
||||||
|
if not dbapi:
|
||||||
|
dbapi = pecan.request.dbapi
|
||||||
|
return len(dbapi.host_get_by_personality(k_host.COMPUTE))
|
||||||
|
|
||||||
|
|
||||||
|
class SBApiHelper(object):
|
||||||
|
"""API Helper Class for manipulating Storage Backends.
|
||||||
|
|
||||||
|
Common functionality needed by the storage_backend API and it's derived
|
||||||
|
APIs: storage_ceph, storage_lvm, storage_file.
|
||||||
|
"""
|
||||||
|
@staticmethod
|
||||||
|
def validate_backend(storage_backend_dict):
|
||||||
|
|
||||||
|
backend = storage_backend_dict.get('backend')
|
||||||
|
if not backend:
|
||||||
|
raise wsme.exc.ClientSideError("This operation requires a "
|
||||||
|
"storage backend to be specified.")
|
||||||
|
|
||||||
|
if backend not in constants.SB_SUPPORTED:
|
||||||
|
raise wsme.exc.ClientSideError("Supplied storage backend (%s) is "
|
||||||
|
"not supported." % backend)
|
||||||
|
|
||||||
|
name = storage_backend_dict.get('name')
|
||||||
|
if not name:
|
||||||
|
# Get the list of backends of this type. If none are present, then
|
||||||
|
# this is the system default backend for this type. Therefore use
|
||||||
|
# the default name.
|
||||||
|
backend_list = \
|
||||||
|
pecan.request.dbapi.storage_backend_get_list_by_type(
|
||||||
|
backend_type=backend)
|
||||||
|
if not backend_list:
|
||||||
|
storage_backend_dict['name'] = constants.SB_DEFAULT_NAMES[
|
||||||
|
backend]
|
||||||
|
else:
|
||||||
|
raise wsme.exc.ClientSideError(
|
||||||
|
"This operation requires storage "
|
||||||
|
"backend name to be specified.")
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def common_checks(operation, storage_backend_dict):
|
||||||
|
backend = SBApiHelper.validate_backend(storage_backend_dict)
|
||||||
|
|
||||||
|
backend_type = storage_backend_dict['backend']
|
||||||
|
backend_name = storage_backend_dict['name']
|
||||||
|
|
||||||
|
try:
|
||||||
|
existing_backend = pecan.request.dbapi.storage_backend_get_by_name(
|
||||||
|
backend_name)
|
||||||
|
except exception.StorageBackendNotFoundByName:
|
||||||
|
existing_backend = None
|
||||||
|
|
||||||
|
# The "shared_services" of an external backend can't have any internal
|
||||||
|
# backend, vice versa. Note: This code needs to be revisited when
|
||||||
|
# "non_shared_services" external backend (e.g. emc) is added into
|
||||||
|
# storage-backend.
|
||||||
|
if operation in [
|
||||||
|
constants.SB_API_OP_CREATE, constants.SB_API_OP_MODIFY]:
|
||||||
|
current_bk_svcs = []
|
||||||
|
backends = pecan.request.dbapi.storage_backend_get_list()
|
||||||
|
for bk in backends:
|
||||||
|
if backend_type == constants.SB_TYPE_EXTERNAL:
|
||||||
|
if bk.as_dict()['backend'] != backend_type:
|
||||||
|
current_bk_svcs += \
|
||||||
|
SBApiHelper.getListFromServices(bk.as_dict())
|
||||||
|
else:
|
||||||
|
if bk.as_dict()['backend'] == constants.SB_TYPE_EXTERNAL:
|
||||||
|
current_bk_svcs += \
|
||||||
|
SBApiHelper.getListFromServices(bk.as_dict())
|
||||||
|
|
||||||
|
new_bk_svcs = SBApiHelper.getListFromServices(storage_backend_dict)
|
||||||
|
for svc in new_bk_svcs:
|
||||||
|
if svc in current_bk_svcs:
|
||||||
|
raise wsme.exc.ClientSideError("Service (%s) already has "
|
||||||
|
"a backend." % svc)
|
||||||
|
|
||||||
|
# Deny any change while a backend is configuring
|
||||||
|
backends = pecan.request.dbapi.storage_backend_get_list()
|
||||||
|
for bk in backends:
|
||||||
|
if bk['state'] == constants.SB_STATE_CONFIGURING:
|
||||||
|
msg = _("%s backend is configuring, please wait for "
|
||||||
|
"current operation to complete before making "
|
||||||
|
"changes.") % bk['backend'].title()
|
||||||
|
raise wsme.exc.ClientSideError(msg)
|
||||||
|
|
||||||
|
if not existing_backend:
|
||||||
|
existing_backends_by_type = set(bk['backend'] for bk in backends)
|
||||||
|
|
||||||
|
if (backend_type in existing_backends_by_type and
|
||||||
|
backend_type not in [
|
||||||
|
constants.SB_TYPE_CEPH,
|
||||||
|
constants.SB_TYPE_CEPH_EXTERNAL]):
|
||||||
|
msg = _("Only one %s backend is supported.") % backend_type
|
||||||
|
raise wsme.exc.ClientSideError(msg)
|
||||||
|
|
||||||
|
elif (backend_type != constants.SB_TYPE_CEPH_EXTERNAL and
|
||||||
|
backend_type not in existing_backends_by_type and
|
||||||
|
backend_name != constants.SB_DEFAULT_NAMES[backend_type]):
|
||||||
|
msg = _("The primary {} backend must use the "
|
||||||
|
"default name: {}.").format(
|
||||||
|
backend_type,
|
||||||
|
constants.SB_DEFAULT_NAMES[backend_type])
|
||||||
|
raise wsme.exc.ClientSideError(msg)
|
||||||
|
|
||||||
|
# Deny operations with a single, unlocked, controller.
|
||||||
|
# TODO(oponcea): Remove this once sm supports in-service config reload
|
||||||
|
ctrls = objects.Host.list(pecan.request.context,
|
||||||
|
filters={'personality': k_host.CONTROLLER})
|
||||||
|
if len(ctrls) == 1:
|
||||||
|
if ctrls[0].administrative == k_host.ADMIN_UNLOCKED:
|
||||||
|
if get_system_mode() == constants.SYSTEM_MODE_SIMPLEX:
|
||||||
|
msg = _("Storage backend operations require controller "
|
||||||
|
"host to be locked.")
|
||||||
|
else:
|
||||||
|
msg = _("Storage backend operations require "
|
||||||
|
"both controllers to be enabled and available.")
|
||||||
|
raise wsme.exc.ClientSideError(msg)
|
||||||
|
else:
|
||||||
|
for ctrl in ctrls:
|
||||||
|
if ctrl.availability not in [k_host.AVAILABILITY_AVAILABLE,
|
||||||
|
k_host.AVAILABILITY_DEGRADED]:
|
||||||
|
msg = _("Storage backend operations require "
|
||||||
|
"both controllers "
|
||||||
|
"to be enabled and available/degraded.")
|
||||||
|
raise wsme.exc.ClientSideError(msg)
|
||||||
|
|
||||||
|
if existing_backend and operation == constants.SB_API_OP_CREATE:
|
||||||
|
if (existing_backend.state == constants.SB_STATE_CONFIGURED or
|
||||||
|
existing_backend.state == constants.SB_STATE_CONFIG_ERR):
|
||||||
|
msg = (
|
||||||
|
_("Initial (%s) backend was previously created. Use the "
|
||||||
|
"modify API for further provisioning or supply a unique "
|
||||||
|
"name to add an additional backend.") %
|
||||||
|
existing_backend.name)
|
||||||
|
raise wsme.exc.ClientSideError(msg)
|
||||||
|
elif not existing_backend and operation == constants.SB_API_OP_MODIFY:
|
||||||
|
raise wsme.exc.ClientSideError(
|
||||||
|
"Attempting to modify non-existant (%s) backend." % backend)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def set_backend_data(requested, defaults, checks, supported_svcs,
|
||||||
|
current=None):
|
||||||
|
"""Returns a valid backend dictionary based on current inputs
|
||||||
|
|
||||||
|
:param requested: data from the API
|
||||||
|
:param defaults: values that should be set if missing or
|
||||||
|
not currently set
|
||||||
|
:param checks: a set of valid data to be mapped into the
|
||||||
|
backend capabilities
|
||||||
|
:param supported_svcs: services that are allowed to be used
|
||||||
|
with this backend
|
||||||
|
:param current: the existing view of this data (typically from the DB)
|
||||||
|
"""
|
||||||
|
if current:
|
||||||
|
merged = current.copy()
|
||||||
|
else:
|
||||||
|
merged = requested.copy()
|
||||||
|
|
||||||
|
# go through the requested values
|
||||||
|
for key in requested:
|
||||||
|
if key in merged and merged[key] != requested[key]:
|
||||||
|
merged[key] = requested[key]
|
||||||
|
|
||||||
|
# Set existing defaults
|
||||||
|
for key in merged:
|
||||||
|
if merged[key] is None and key in defaults:
|
||||||
|
merged[key] = defaults[key]
|
||||||
|
|
||||||
|
# Add the missing defaults
|
||||||
|
for key in defaults:
|
||||||
|
if key not in merged:
|
||||||
|
merged[key] = defaults[key]
|
||||||
|
|
||||||
|
# Pop the current set of data and make sure only supported parameters
|
||||||
|
# are populated
|
||||||
|
hiera_data = merged.pop('capabilities', {})
|
||||||
|
merged['capabilities'] = {}
|
||||||
|
|
||||||
|
merged_hiera_data = defaults.pop('capabilities', {})
|
||||||
|
merged_hiera_data.update(hiera_data)
|
||||||
|
|
||||||
|
for key in merged_hiera_data:
|
||||||
|
if key in checks['backend']:
|
||||||
|
merged['capabilities'][key] = merged_hiera_data[key]
|
||||||
|
continue
|
||||||
|
for svc in supported_svcs:
|
||||||
|
if key in checks[svc]:
|
||||||
|
merged['capabilities'][key] = merged_hiera_data[key]
|
||||||
|
|
||||||
|
return merged
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def check_minimal_number_of_controllers(min_number):
|
||||||
|
chosts = pecan.request.dbapi.host_get_by_personality(
|
||||||
|
k_host.CONTROLLER)
|
||||||
|
|
||||||
|
if len(chosts) < min_number:
|
||||||
|
raise wsme.exc.ClientSideError(
|
||||||
|
"This operation requires %s controllers provisioned." %
|
||||||
|
min_number)
|
||||||
|
|
||||||
|
for chost in chosts:
|
||||||
|
if chost.invprovision != k_host.PROVISIONED:
|
||||||
|
raise wsme.exc.ClientSideError(
|
||||||
|
"This operation requires %s controllers provisioned." %
|
||||||
|
min_number)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def getListFromServices(be_dict):
|
||||||
|
return [] if be_dict['services'] is None \
|
||||||
|
else be_dict['services'].split(',')
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def setServicesFromList(be_dict, svc_list):
|
||||||
|
be_dict['services'] = ','.join(svc_list)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def is_svc_enabled(sb_list, svc):
|
||||||
|
for b in sb_list:
|
||||||
|
if b.services:
|
||||||
|
if svc in b.services:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def enable_backend(sb, backend_enable_function):
|
||||||
|
"""In-service enable storage backend """
|
||||||
|
try:
|
||||||
|
# Initiate manifest application
|
||||||
|
LOG.info(_("Initializing configuration of storage %s backend.") %
|
||||||
|
sb.backend.title())
|
||||||
|
backend_enable_function(pecan.request.context)
|
||||||
|
LOG.info("Configuration of storage %s backend initialized, "
|
||||||
|
"continuing in background." % sb.backend.title())
|
||||||
|
except exception.InventoryException:
|
||||||
|
LOG.exception("Manifests failed!")
|
||||||
|
# Set lvm backend to error so that it can be recreated
|
||||||
|
values = {'state': constants.SB_STATE_CONFIG_ERR, 'task': None}
|
||||||
|
pecan.request.dbapi.storage_backend_update(sb.uuid, values)
|
||||||
|
msg = (_("%s configuration failed, check node status and retry. "
|
||||||
|
"If problem persists contact next level of support.") %
|
||||||
|
sb.backend.title())
|
||||||
|
raise wsme.exc.ClientSideError(msg)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def is_primary_ceph_tier(name_string):
|
||||||
|
"""Check if a tier name string is for the primary ceph tier. """
|
||||||
|
if name_string == constants.SB_TIER_DEFAULT_NAMES[
|
||||||
|
constants.SB_TYPE_CEPH]:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def is_primary_ceph_backend(name_string):
|
||||||
|
"""Check if a backend name string is for the primary ceph backend. """
|
||||||
|
if name_string == constants.SB_DEFAULT_NAMES[constants.SB_TYPE_CEPH]:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
@contextlib.contextmanager
|
||||||
|
def save_and_reraise_exception():
|
||||||
|
"""Save current exception, run some code and then re-raise.
|
||||||
|
|
||||||
|
In some cases the exception context can be cleared, resulting in None
|
||||||
|
being attempted to be re-raised after an exception handler is run. This
|
||||||
|
can happen when eventlet switches greenthreads or when running an
|
||||||
|
exception handler, code raises and catches an exception. In both
|
||||||
|
cases the exception context will be cleared.
|
||||||
|
|
||||||
|
To work around this, we save the exception state, run handler code, and
|
||||||
|
then re-raise the original exception. If another exception occurs, the
|
||||||
|
saved exception is logged and the new exception is re-raised.
|
||||||
|
"""
|
||||||
|
type_, value, tb = sys.exc_info()
|
||||||
|
try:
|
||||||
|
yield
|
||||||
|
except Exception:
|
||||||
|
LOG.error(_('Original exception being dropped: %s'),
|
||||||
|
traceback.format_exception(type_, value, tb))
|
||||||
|
raise
|
||||||
|
raise (type_, value, tb)
|
||||||
|
|
||||||
|
|
||||||
|
def _get_port(host_name, port_name):
|
||||||
|
hosts = cgtsclient(pecan.request.context).ihost.list()
|
||||||
|
for h in hosts:
|
||||||
|
if h.hostname == host_name:
|
||||||
|
ports = cgtsclient(pecan.request.context).port.list(h.uuid)
|
||||||
|
for p in ports:
|
||||||
|
if p.name == port_name:
|
||||||
|
return p
|
||||||
|
return None
|
66
inventory/inventory/inventory/api/controllers/v1/versions.py
Normal file
66
inventory/inventory/inventory/api/controllers/v1/versions.py
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
# Copyright (c) 2015 Intel Corporation
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
from oslo_config import cfg
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
# This is the version 1 API
|
||||||
|
BASE_VERSION = 1
|
||||||
|
|
||||||
|
# Here goes a short log of changes in every version.
|
||||||
|
# Refer to doc/source/dev/webapi-version-history.rst for a detailed explanation
|
||||||
|
# of what each version contains.
|
||||||
|
#
|
||||||
|
# v1.0: corresponds to Initial API
|
||||||
|
|
||||||
|
MINOR_0_INITIAL_VERSION = 0
|
||||||
|
|
||||||
|
# When adding another version, update:
|
||||||
|
# - MINOR_MAX_VERSION
|
||||||
|
# - doc/source/contributor/webapi-version-history.rst with a detailed
|
||||||
|
# explanation of what changed in the new version
|
||||||
|
# - common/release_mappings.py, RELEASE_MAPPING['master']['api']
|
||||||
|
|
||||||
|
MINOR_MAX_VERSION = MINOR_0_INITIAL_VERSION
|
||||||
|
|
||||||
|
# String representations of the minor and maximum versions
|
||||||
|
_MIN_VERSION_STRING = '{}.{}'.format(BASE_VERSION, MINOR_0_INITIAL_VERSION)
|
||||||
|
_MAX_VERSION_STRING = '{}.{}'.format(BASE_VERSION, MINOR_MAX_VERSION)
|
||||||
|
|
||||||
|
|
||||||
|
def min_version_string():
|
||||||
|
"""Returns the minimum supported API version (as a string)"""
|
||||||
|
return _MIN_VERSION_STRING
|
||||||
|
|
||||||
|
|
||||||
|
def max_version_string():
|
||||||
|
"""Returns the maximum supported API version (as a string).
|
||||||
|
|
||||||
|
If the service is pinned, the maximum API version is the pinned
|
||||||
|
version. Otherwise, it is the maximum supported API version.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# TODO(jkung): enable when release versions supported
|
||||||
|
# release_ver = release_mappings.RELEASE_MAPPING.get(
|
||||||
|
# CONF.pin_release_version)
|
||||||
|
# if release_ver:
|
||||||
|
# return release_ver['api']
|
||||||
|
# else:
|
||||||
|
return _MAX_VERSION_STRING
|
110
inventory/inventory/inventory/api/hooks.py
Normal file
110
inventory/inventory/inventory/api/hooks.py
Normal file
@ -0,0 +1,110 @@
|
|||||||
|
#
|
||||||
|
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
from inventory.common import context
|
||||||
|
from inventory.common.i18n import _
|
||||||
|
from inventory.conductor import rpcapi
|
||||||
|
from inventory.db import api as dbapi
|
||||||
|
from inventory.systemconfig import plugin as systemconfig_plugin
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log
|
||||||
|
from oslo_serialization import jsonutils
|
||||||
|
from pecan import hooks
|
||||||
|
import webob
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class ContextHook(hooks.PecanHook):
|
||||||
|
"""Configures a request context and attaches it to the request.
|
||||||
|
|
||||||
|
The following HTTP request headers are used:
|
||||||
|
|
||||||
|
X-User-Name:
|
||||||
|
Used for context.user_name.
|
||||||
|
|
||||||
|
X-User-Id:
|
||||||
|
Used for context.user_id.
|
||||||
|
|
||||||
|
X-Project-Name:
|
||||||
|
Used for context.project.
|
||||||
|
|
||||||
|
X-Project-Id:
|
||||||
|
Used for context.project_id.
|
||||||
|
|
||||||
|
X-Auth-Token:
|
||||||
|
Used for context.auth_token.
|
||||||
|
|
||||||
|
X-Roles:
|
||||||
|
Used for context.roles.
|
||||||
|
|
||||||
|
X-Service_Catalog:
|
||||||
|
Used for context.service_catalog.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def before(self, state):
|
||||||
|
headers = state.request.headers
|
||||||
|
environ = state.request.environ
|
||||||
|
user_name = headers.get('X-User-Name')
|
||||||
|
user_id = headers.get('X-User-Id')
|
||||||
|
project = headers.get('X-Project-Name')
|
||||||
|
project_id = headers.get('X-Project-Id')
|
||||||
|
domain_id = headers.get('X-User-Domain-Id')
|
||||||
|
domain_name = headers.get('X-User-Domain-Name')
|
||||||
|
auth_token = headers.get('X-Auth-Token')
|
||||||
|
roles = headers.get('X-Roles', '').split(',')
|
||||||
|
catalog_header = headers.get('X-Service-Catalog')
|
||||||
|
service_catalog = None
|
||||||
|
if catalog_header:
|
||||||
|
try:
|
||||||
|
service_catalog = jsonutils.loads(catalog_header)
|
||||||
|
except ValueError:
|
||||||
|
raise webob.exc.HTTPInternalServerError(
|
||||||
|
_('Invalid service catalog json.'))
|
||||||
|
|
||||||
|
auth_token_info = environ.get('keystone.token_info')
|
||||||
|
auth_url = CONF.keystone_authtoken.auth_uri
|
||||||
|
|
||||||
|
state.request.context = context.make_context(
|
||||||
|
auth_token=auth_token,
|
||||||
|
auth_url=auth_url,
|
||||||
|
auth_token_info=auth_token_info,
|
||||||
|
user_name=user_name,
|
||||||
|
user_id=user_id,
|
||||||
|
project_name=project,
|
||||||
|
project_id=project_id,
|
||||||
|
domain_id=domain_id,
|
||||||
|
domain_name=domain_name,
|
||||||
|
roles=roles,
|
||||||
|
service_catalog=service_catalog
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class DBHook(hooks.PecanHook):
|
||||||
|
"""Attach the dbapi object to the request so controllers can get to it."""
|
||||||
|
|
||||||
|
def before(self, state):
|
||||||
|
state.request.dbapi = dbapi.get_instance()
|
||||||
|
|
||||||
|
|
||||||
|
class RPCHook(hooks.PecanHook):
|
||||||
|
"""Attach the rpcapi object to the request so controllers can get to it."""
|
||||||
|
|
||||||
|
def before(self, state):
|
||||||
|
state.request.rpcapi = rpcapi.ConductorAPI()
|
||||||
|
|
||||||
|
|
||||||
|
class SystemConfigHook(hooks.PecanHook):
|
||||||
|
"""Attach the rpcapi object to the request so controllers can get to it."""
|
||||||
|
|
||||||
|
def before(self, state):
|
||||||
|
state.request.systemconfig = systemconfig_plugin.SystemConfigPlugin(
|
||||||
|
invoke_kwds={'context': state.request.context})
|
||||||
|
|
||||||
|
# state.request.systemconfig = systemconfig.SystemConfigOperator(
|
||||||
|
# state.request.context,
|
||||||
|
# state.request.dbapi)
|
19
inventory/inventory/inventory/api/middleware/__init__.py
Normal file
19
inventory/inventory/inventory/api/middleware/__init__.py
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
#
|
||||||
|
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
from inventory.api.middleware import auth_token
|
||||||
|
from inventory.api.middleware import parsable_error
|
||||||
|
# from inventory.api.middleware import json_ext
|
||||||
|
|
||||||
|
|
||||||
|
ParsableErrorMiddleware = parsable_error.ParsableErrorMiddleware
|
||||||
|
AuthTokenMiddleware = auth_token.AuthTokenMiddleware
|
||||||
|
# JsonExtensionMiddleware = json_ext.JsonExtensionMiddleware
|
||||||
|
|
||||||
|
__all__ = ('ParsableErrorMiddleware',
|
||||||
|
'AuthTokenMiddleware')
|
||||||
|
|
||||||
|
# 'JsonExtensionMiddleware')
|
75
inventory/inventory/inventory/api/middleware/auth_token.py
Normal file
75
inventory/inventory/inventory/api/middleware/auth_token.py
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from keystonemiddleware import auth_token
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
|
from inventory.common import exception
|
||||||
|
from inventory.common.i18n import _
|
||||||
|
from inventory.common import utils
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class AuthTokenMiddleware(auth_token.AuthProtocol):
|
||||||
|
"""A wrapper on Keystone auth_token middleware.
|
||||||
|
|
||||||
|
Does not perform verification of authentication tokens
|
||||||
|
for public routes in the API.
|
||||||
|
|
||||||
|
"""
|
||||||
|
def __init__(self, app, conf, public_api_routes=None):
|
||||||
|
if public_api_routes is None:
|
||||||
|
public_api_routes = []
|
||||||
|
route_pattern_tpl = '%s(\.json)?$'
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.public_api_routes = [re.compile(route_pattern_tpl % route_tpl)
|
||||||
|
for route_tpl in public_api_routes]
|
||||||
|
except re.error as e:
|
||||||
|
msg = _('Cannot compile public API routes: %s') % e
|
||||||
|
|
||||||
|
LOG.error(msg)
|
||||||
|
raise exception.ConfigInvalid(error_msg=msg)
|
||||||
|
|
||||||
|
super(AuthTokenMiddleware, self).__init__(app, conf)
|
||||||
|
|
||||||
|
def __call__(self, env, start_response):
|
||||||
|
path = utils.safe_rstrip(env.get('PATH_INFO'), '/')
|
||||||
|
|
||||||
|
# The information whether the API call is being performed against the
|
||||||
|
# public API is required for some other components. Saving it to the
|
||||||
|
# WSGI environment is reasonable thereby.
|
||||||
|
env['is_public_api'] = any(map(lambda pattern: re.match(pattern, path),
|
||||||
|
self.public_api_routes))
|
||||||
|
|
||||||
|
if env['is_public_api']:
|
||||||
|
return self._app(env, start_response)
|
||||||
|
|
||||||
|
return super(AuthTokenMiddleware, self).__call__(env, start_response)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def factory(cls, global_config, **local_conf):
|
||||||
|
public_routes = local_conf.get('acl_public_routes', '')
|
||||||
|
public_api_routes = [path.strip() for path in public_routes.split(',')]
|
||||||
|
|
||||||
|
def _factory(app):
|
||||||
|
return cls(app, global_config, public_api_routes=public_api_routes)
|
||||||
|
return _factory
|
@ -0,0 +1,99 @@
|
|||||||
|
# -*- encoding: utf-8 -*-
|
||||||
|
#
|
||||||
|
# Copyright © 2012 New Dream Network, LLC (DreamHost)
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
"""
|
||||||
|
Middleware to replace the plain text message body of an error
|
||||||
|
response with one formatted so the client can parse it.
|
||||||
|
|
||||||
|
Based on pecan.middleware.errordocument
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
from xml import etree as et
|
||||||
|
|
||||||
|
from oslo_log import log
|
||||||
|
import six
|
||||||
|
import webob
|
||||||
|
|
||||||
|
from inventory.common.i18n import _
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class ParsableErrorMiddleware(object):
|
||||||
|
"""Replace error body with something the client can parse."""
|
||||||
|
def __init__(self, app):
|
||||||
|
self.app = app
|
||||||
|
|
||||||
|
def __call__(self, environ, start_response):
|
||||||
|
# Request for this state, modified by replace_start_response()
|
||||||
|
# and used when an error is being reported.
|
||||||
|
state = {}
|
||||||
|
|
||||||
|
def replacement_start_response(status, headers, exc_info=None):
|
||||||
|
"""Overrides the default response to make errors parsable."""
|
||||||
|
try:
|
||||||
|
status_code = int(status.split(' ')[0])
|
||||||
|
state['status_code'] = status_code
|
||||||
|
except (ValueError, TypeError): # pragma: nocover
|
||||||
|
raise Exception(_(
|
||||||
|
'ErrorDocumentMiddleware received an invalid '
|
||||||
|
'status %s') % status)
|
||||||
|
else:
|
||||||
|
if (state['status_code'] // 100) not in (2, 3):
|
||||||
|
# Remove some headers so we can replace them later
|
||||||
|
# when we have the full error message and can
|
||||||
|
# compute the length.
|
||||||
|
headers = [(h, v)
|
||||||
|
for (h, v) in headers
|
||||||
|
if h not in ('Content-Length', 'Content-Type')
|
||||||
|
]
|
||||||
|
# Save the headers in case we need to modify them.
|
||||||
|
state['headers'] = headers
|
||||||
|
return start_response(status, headers, exc_info)
|
||||||
|
|
||||||
|
# The default is application/json. However, Pecan will try
|
||||||
|
# to output HTML errors if no Accept header is provided.
|
||||||
|
if 'HTTP_ACCEPT' not in environ or environ['HTTP_ACCEPT'] == '*/*':
|
||||||
|
environ['HTTP_ACCEPT'] = 'application/json'
|
||||||
|
|
||||||
|
app_iter = self.app(environ, replacement_start_response)
|
||||||
|
if (state['status_code'] // 100) not in (2, 3):
|
||||||
|
req = webob.Request(environ)
|
||||||
|
if (req.accept.best_match(
|
||||||
|
['application/json', 'application/xml']) ==
|
||||||
|
'application/xml'):
|
||||||
|
try:
|
||||||
|
# simple check xml is valid
|
||||||
|
body = [et.ElementTree.tostring(
|
||||||
|
et.ElementTree.fromstring('<error_message>' +
|
||||||
|
'\n'.join(app_iter) +
|
||||||
|
'</error_message>'))]
|
||||||
|
except et.ElementTree.ParseError as err:
|
||||||
|
LOG.error('Error parsing HTTP response: %s', err)
|
||||||
|
body = ['<error_message>%s' % state['status_code'] +
|
||||||
|
'</error_message>']
|
||||||
|
state['headers'].append(('Content-Type', 'application/xml'))
|
||||||
|
else:
|
||||||
|
if six.PY3:
|
||||||
|
app_iter = [i.decode('utf-8') for i in app_iter]
|
||||||
|
body = [json.dumps({'error_message': '\n'.join(app_iter)})]
|
||||||
|
if six.PY3:
|
||||||
|
body = [item.encode('utf-8') for item in body]
|
||||||
|
state['headers'].append(('Content-Type', 'application/json'))
|
||||||
|
state['headers'].append(('Content-Length', str(len(body[0]))))
|
||||||
|
else:
|
||||||
|
body = app_iter
|
||||||
|
return body
|
31
inventory/inventory/inventory/cmd/__init__.py
Normal file
31
inventory/inventory/inventory/cmd/__init__.py
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
os.environ['EVENTLET_NO_GREENDNS'] = 'yes' # noqa E402
|
||||||
|
|
||||||
|
import eventlet
|
||||||
|
|
||||||
|
eventlet.monkey_patch(os=False)
|
||||||
|
|
||||||
|
import oslo_i18n as i18n # noqa I202
|
||||||
|
|
||||||
|
i18n.install('inventory')
|
58
inventory/inventory/inventory/cmd/agent.py
Normal file
58
inventory/inventory/inventory/cmd/agent.py
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- encoding: utf-8 -*-
|
||||||
|
#
|
||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
#
|
||||||
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
"""
|
||||||
|
The Inventory Agent Service
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log
|
||||||
|
from oslo_service import service
|
||||||
|
|
||||||
|
from inventory.common import rpc_service
|
||||||
|
from inventory.common import service as inventory_service
|
||||||
|
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
# Parse config file and command line options, then start logging
|
||||||
|
inventory_service.prepare_service(sys.argv)
|
||||||
|
|
||||||
|
# connection is based upon host and MANAGER_TOPIC
|
||||||
|
mgr = rpc_service.RPCService(CONF.host,
|
||||||
|
'inventory.agent.manager',
|
||||||
|
'AgentManager')
|
||||||
|
launcher = service.launch(CONF, mgr)
|
||||||
|
launcher.wait()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
sys.exit(main())
|
86
inventory/inventory/inventory/cmd/api.py
Normal file
86
inventory/inventory/inventory/cmd/api.py
Normal file
@ -0,0 +1,86 @@
|
|||||||
|
#
|
||||||
|
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import eventlet
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log as logging
|
||||||
|
from oslo_service import systemd
|
||||||
|
from oslo_service import wsgi
|
||||||
|
|
||||||
|
import logging as std_logging
|
||||||
|
|
||||||
|
from inventory.api import app
|
||||||
|
from inventory.api import config
|
||||||
|
from inventory.common.i18n import _
|
||||||
|
|
||||||
|
api_opts = [
|
||||||
|
cfg.StrOpt('bind_host',
|
||||||
|
default="0.0.0.0",
|
||||||
|
help=_('IP address for inventory api to listen')),
|
||||||
|
cfg.IntOpt('bind_port',
|
||||||
|
default=6380,
|
||||||
|
help=_('listen port for inventory api')),
|
||||||
|
cfg.StrOpt('bind_host_pxe',
|
||||||
|
default="0.0.0.0",
|
||||||
|
help=_('IP address for inventory api pxe to listen')),
|
||||||
|
cfg.IntOpt('api_workers', default=2,
|
||||||
|
help=_("number of api workers")),
|
||||||
|
cfg.IntOpt('limit_max',
|
||||||
|
default=1000,
|
||||||
|
help='the maximum number of items returned in a single '
|
||||||
|
'response from a collection resource')
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
eventlet.monkey_patch(os=False)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
|
||||||
|
config.init(sys.argv[1:])
|
||||||
|
config.setup_logging()
|
||||||
|
|
||||||
|
application = app.load_paste_app()
|
||||||
|
|
||||||
|
CONF.register_opts(api_opts, 'api')
|
||||||
|
|
||||||
|
host = CONF.api.bind_host
|
||||||
|
port = CONF.api.bind_port
|
||||||
|
workers = CONF.api.api_workers
|
||||||
|
|
||||||
|
if workers < 1:
|
||||||
|
LOG.warning("Wrong worker number, worker = %(workers)s", workers)
|
||||||
|
workers = 1
|
||||||
|
|
||||||
|
LOG.info("Serving on http://%(host)s:%(port)s with %(workers)s",
|
||||||
|
{'host': host, 'port': port, 'workers': workers})
|
||||||
|
systemd.notify_once()
|
||||||
|
service = wsgi.Server(CONF, CONF.prog, application, host, port)
|
||||||
|
|
||||||
|
app.serve(service, CONF, workers)
|
||||||
|
|
||||||
|
pxe_host = CONF.api.bind_host_pxe
|
||||||
|
if pxe_host:
|
||||||
|
pxe_service = wsgi.Server(CONF, CONF.prog, application, pxe_host, port)
|
||||||
|
app.serve_pxe(pxe_service, CONF, 1)
|
||||||
|
|
||||||
|
LOG.debug("Configuration:")
|
||||||
|
CONF.log_opt_values(LOG, std_logging.DEBUG)
|
||||||
|
|
||||||
|
app.wait()
|
||||||
|
if pxe_host:
|
||||||
|
app.wait_pxe()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
55
inventory/inventory/inventory/cmd/conductor.py
Normal file
55
inventory/inventory/inventory/cmd/conductor.py
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- encoding: utf-8 -*-
|
||||||
|
#
|
||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
#
|
||||||
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
"""
|
||||||
|
The Inventory Conductor Service
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log
|
||||||
|
from oslo_service import service
|
||||||
|
|
||||||
|
from inventory.common import rpc_service
|
||||||
|
from inventory.common import service as inventory_service
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
# Parse config file and command line options, then start logging
|
||||||
|
inventory_service.prepare_service(sys.argv)
|
||||||
|
|
||||||
|
mgr = rpc_service.RPCService(CONF.host,
|
||||||
|
'inventory.conductor.manager',
|
||||||
|
'ConductorManager')
|
||||||
|
|
||||||
|
launcher = service.launch(CONF, mgr)
|
||||||
|
launcher.wait()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
sys.exit(main())
|
19
inventory/inventory/inventory/cmd/dbsync.py
Normal file
19
inventory/inventory/inventory/cmd/dbsync.py
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
#
|
||||||
|
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
from oslo_config import cfg
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from inventory.db import migration
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
cfg.CONF(sys.argv[1:],
|
||||||
|
project='inventory')
|
||||||
|
migration.db_sync()
|
133
inventory/inventory/inventory/cmd/dnsmasq_lease_update.py
Executable file
133
inventory/inventory/inventory/cmd/dnsmasq_lease_update.py
Executable file
@ -0,0 +1,133 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- encoding: utf-8 -*-
|
||||||
|
#
|
||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
#
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
# Copyright (c) 2013-2016 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
Handle lease database updates from dnsmasq DHCP server
|
||||||
|
This file was based on dhcpbridge.py from nova
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from inventory.common import context
|
||||||
|
from inventory.common.i18n import _
|
||||||
|
from inventory.common import service as inventory_service
|
||||||
|
from inventory.conductor import rpcapi as conductor_rpcapi
|
||||||
|
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
|
||||||
|
def add_lease(mac, ip_address):
|
||||||
|
"""Called when a new lease is created."""
|
||||||
|
|
||||||
|
ctxt = context.get_admin_context()
|
||||||
|
rpcapi = \
|
||||||
|
conductor_rpcapi.ConductorAPI(topic=conductor_rpcapi.MANAGER_TOPIC)
|
||||||
|
|
||||||
|
cid = None
|
||||||
|
cid = os.getenv('DNSMASQ_CLIENT_ID')
|
||||||
|
|
||||||
|
tags = None
|
||||||
|
tags = os.getenv('DNSMASQ_TAGS')
|
||||||
|
|
||||||
|
if tags is not None:
|
||||||
|
# TODO(sc): Maybe this shouldn't be synchronous - if this hangs,
|
||||||
|
# we could cause dnsmasq to get stuck...
|
||||||
|
rpcapi.handle_dhcp_lease(ctxt, tags, mac, ip_address, cid)
|
||||||
|
|
||||||
|
|
||||||
|
def old_lease(mac, ip_address):
|
||||||
|
"""Called when an old lease is recognized."""
|
||||||
|
|
||||||
|
# This happens when a node is rebooted, but it can also happen if the
|
||||||
|
# node was deleted and then rebooted, so we need to re-add in that case.
|
||||||
|
|
||||||
|
ctxt = context.get_admin_context()
|
||||||
|
rpcapi = conductor_rpcapi.ConductorAPI(
|
||||||
|
topic=conductor_rpcapi.MANAGER_TOPIC)
|
||||||
|
|
||||||
|
cid = None
|
||||||
|
cid = os.getenv('DNSMASQ_CLIENT_ID')
|
||||||
|
|
||||||
|
tags = None
|
||||||
|
tags = os.getenv('DNSMASQ_TAGS')
|
||||||
|
|
||||||
|
if tags is not None:
|
||||||
|
# TODO(sc): Maybe this shouldn't be synchronous - if this hangs,
|
||||||
|
# we could cause dnsmasq to get stuck...
|
||||||
|
rpcapi.handle_dhcp_lease(ctxt, tags, mac, ip_address, cid)
|
||||||
|
|
||||||
|
|
||||||
|
def del_lease(mac, ip_address):
|
||||||
|
"""Called when a lease expires."""
|
||||||
|
# We will only delete the ihost when it is requested by the user.
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def add_action_parsers(subparsers):
|
||||||
|
# NOTE(cfb): dnsmasq always passes mac, and ip. hostname
|
||||||
|
# is passed if known. We don't care about
|
||||||
|
# hostname, but argparse will complain if we
|
||||||
|
# do not accept it.
|
||||||
|
for action in ['add', 'del', 'old']:
|
||||||
|
parser = subparsers.add_parser(action)
|
||||||
|
parser.add_argument('mac')
|
||||||
|
parser.add_argument('ip')
|
||||||
|
parser.add_argument('hostname', nargs='?', default='')
|
||||||
|
parser.set_defaults(func=globals()[action + '_lease'])
|
||||||
|
|
||||||
|
|
||||||
|
CONF.register_cli_opt(
|
||||||
|
cfg.SubCommandOpt('action',
|
||||||
|
title='Action options',
|
||||||
|
help='Available dnsmasq_lease_update options',
|
||||||
|
handler=add_action_parsers))
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
# Parse config file and command line options, then start logging
|
||||||
|
# The mac is to be truncated to 17 characters, which is the proper
|
||||||
|
# length of a mac address, in order to handle IPv6 where a DUID
|
||||||
|
# is provided instead of a mac address. The truncated DUID is
|
||||||
|
# then equivalent to the mac address.
|
||||||
|
inventory_service.prepare_service(sys.argv)
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
if CONF.action.name in ['add', 'del', 'old']:
|
||||||
|
msg = (_("Called '%(action)s' for mac '%(mac)s' with ip '%(ip)s'") %
|
||||||
|
{"action": CONF.action.name,
|
||||||
|
"mac": CONF.action.mac[-17:],
|
||||||
|
"ip": CONF.action.ip})
|
||||||
|
LOG.info(msg)
|
||||||
|
CONF.action.func(CONF.action.mac[-17:], CONF.action.ip)
|
||||||
|
else:
|
||||||
|
LOG.error(_("Unknown action: %(action)") % {"action":
|
||||||
|
CONF.action.name})
|
0
inventory/inventory/inventory/common/__init__.py
Normal file
0
inventory/inventory/inventory/common/__init__.py
Normal file
43
inventory/inventory/inventory/common/base.py
Normal file
43
inventory/inventory/inventory/common/base.py
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
#
|
||||||
|
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
from oslo_log import log
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class APIResourceWrapper(object):
|
||||||
|
"""Simple wrapper for api objects.
|
||||||
|
|
||||||
|
Define _attrs on the child class and pass in the
|
||||||
|
api object as the only argument to the constructor
|
||||||
|
"""
|
||||||
|
_attrs = []
|
||||||
|
_apiresource = None # Make sure _apiresource is there even in __init__.
|
||||||
|
|
||||||
|
def __init__(self, apiresource):
|
||||||
|
self._apiresource = apiresource
|
||||||
|
|
||||||
|
def __getattribute__(self, attr):
|
||||||
|
try:
|
||||||
|
return object.__getattribute__(self, attr)
|
||||||
|
except AttributeError:
|
||||||
|
if attr not in self._attrs:
|
||||||
|
raise
|
||||||
|
# __getattr__ won't find properties
|
||||||
|
return getattr(self._apiresource, attr)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "<%s: %s>" % (self.__class__.__name__,
|
||||||
|
dict((attr, getattr(self, attr))
|
||||||
|
for attr in self._attrs
|
||||||
|
if hasattr(self, attr)))
|
||||||
|
|
||||||
|
def as_dict(self):
|
||||||
|
obj = {}
|
||||||
|
for key in self._attrs:
|
||||||
|
obj[key] = getattr(self._apiresource, key, None)
|
||||||
|
return obj
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user