Merge remote-tracking branch 'gerrit/master' into f/centos76
Change-Id: I1c4ce76f1870b085b7f8ec39d4e3ae0c9b401a2d Signed-off-by: Saul Wold <sgw@linux.intel.com>
This commit is contained in:
commit
17b864fbeb
@ -1 +0,0 @@
|
|||||||
TIS_PATCH_VER=2
|
|
@ -1,25 +0,0 @@
|
|||||||
From a15d83975ed19367767e18354ea07cd5d281e265 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Don Penney <don.penney@windriver.com>
|
|
||||||
Date: Tue, 27 Sep 2016 10:14:02 -0400
|
|
||||||
Subject: [PATCH] Update package versioning for TIS format
|
|
||||||
|
|
||||||
---
|
|
||||||
SPECS/crontabs.spec | 2 +-
|
|
||||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
|
||||||
|
|
||||||
diff --git a/SPECS/crontabs.spec b/SPECS/crontabs.spec
|
|
||||||
index 7001d1a..4512bcc 100644
|
|
||||||
--- a/SPECS/crontabs.spec
|
|
||||||
+++ b/SPECS/crontabs.spec
|
|
||||||
@@ -2,7 +2,7 @@
|
|
||||||
Summary: Root crontab files used to schedule the execution of programs
|
|
||||||
Name: crontabs
|
|
||||||
Version: 1.11
|
|
||||||
-Release: 6.%{snap_release}%{?dist}
|
|
||||||
+Release: 6.%{snap_release}.el7%{?_tis_dist}.%{tis_patch_ver}
|
|
||||||
License: Public Domain and GPLv2
|
|
||||||
Group: System Environment/Base
|
|
||||||
URL: https://fedorahosted.org/crontabs
|
|
||||||
--
|
|
||||||
1.8.3.1
|
|
||||||
|
|
@ -1,2 +0,0 @@
|
|||||||
spec-add-patch-to-remove-printing-of-motd-script-nam.patch
|
|
||||||
0001-Update-package-versioning-for-TIS-format.patch
|
|
@ -1,35 +0,0 @@
|
|||||||
From 9221bd11aec1590df2dc3f19e9a582d76ed7adc0 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Michel Thebeau <michel.thebeau@windriver.com>
|
|
||||||
Date: Mon, 25 Jul 2016 11:45:55 -0400
|
|
||||||
Subject: [PATCH] spec: add patch to remove printing of motd script name
|
|
||||||
|
|
||||||
Signed-off-by: Michel Thebeau <michel.thebeau@windriver.com>
|
|
||||||
---
|
|
||||||
SPECS/crontabs.spec | 4 ++++
|
|
||||||
1 file changed, 4 insertions(+)
|
|
||||||
|
|
||||||
diff --git a/SPECS/crontabs.spec b/SPECS/crontabs.spec
|
|
||||||
index d6a8c69..90098e1 100644
|
|
||||||
--- a/SPECS/crontabs.spec
|
|
||||||
+++ b/SPECS/crontabs.spec
|
|
||||||
@@ -10,6 +10,8 @@ Source0: https://fedorahosted.org/releases/c/r/crontabs/%{name}-%{version}-1.%{s
|
|
||||||
BuildArch: noarch
|
|
||||||
Requires: /etc/cron.d
|
|
||||||
|
|
||||||
+Patch1: run-parts-add-option-to-remove-printing-of-motd-scri.patch
|
|
||||||
+
|
|
||||||
%description
|
|
||||||
This package is used by Fedora mainly for executing files by cron.
|
|
||||||
|
|
||||||
@@ -25,6 +27,8 @@ your system.
|
|
||||||
%prep
|
|
||||||
%setup -q
|
|
||||||
|
|
||||||
+%patch1 -p1
|
|
||||||
+
|
|
||||||
%build
|
|
||||||
#empty
|
|
||||||
|
|
||||||
--
|
|
||||||
1.8.3.1
|
|
||||||
|
|
@ -1,65 +0,0 @@
|
|||||||
From f0bd54cb83ba430ef81153c7a6da2a52daca5266 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Michel Thebeau <michel.thebeau@windriver.com>
|
|
||||||
Date: Mon, 25 Jul 2016 11:23:18 -0400
|
|
||||||
Subject: [PATCH] run-parts: add option to remove printing of motd script name
|
|
||||||
|
|
||||||
The awk statement seems to be a round-about way of printing the name of
|
|
||||||
the motd script (/etc/motd.d/*). The pipe seems to allow awk to print
|
|
||||||
without user input; while the end of input causes awk to exit. Any
|
|
||||||
input to awk is echoed to terminal before the motd script name is
|
|
||||||
printed.
|
|
||||||
|
|
||||||
The script name that is printed is appended to /etc/motd. This is
|
|
||||||
undesirable. Add an option to skip the awk program.
|
|
||||||
|
|
||||||
Signed-off-by: Michel Thebeau <michel.thebeau@windriver.com>
|
|
||||||
---
|
|
||||||
run-parts | 25 +++++++++++++++++++------
|
|
||||||
1 file changed, 19 insertions(+), 6 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/run-parts b/run-parts
|
|
||||||
index 7e148f8..b444f4e 100755
|
|
||||||
--- a/run-parts
|
|
||||||
+++ b/run-parts
|
|
||||||
@@ -4,6 +4,14 @@
|
|
||||||
# keep going when something fails
|
|
||||||
set +e
|
|
||||||
|
|
||||||
+# First parameter to remove printing of the names of executed scripts.
|
|
||||||
+# The default is unmodified behaviour, print the name of scripts.
|
|
||||||
+with_progname="y"
|
|
||||||
+if [ "$1" == "--without-progname" ]; then
|
|
||||||
+ with_progname=""
|
|
||||||
+ shift
|
|
||||||
+fi
|
|
||||||
+
|
|
||||||
if [ $# -lt 1 ]; then
|
|
||||||
echo "Usage: run-parts [--list | --test] <dir>"
|
|
||||||
exit 1
|
|
||||||
@@ -87,12 +95,17 @@ for i in $(LC_ALL=C; echo ${1%/}/*[^~,]) ; do
|
|
||||||
|
|
||||||
# run executable files
|
|
||||||
logger -p cron.notice -t "run-parts($1)[$$]" "starting $(basename $i)"
|
|
||||||
- $i 2>&1 | awk -v "progname=$i" \
|
|
||||||
- 'progname {
|
|
||||||
- print progname ":\n"
|
|
||||||
- progname="";
|
|
||||||
- }
|
|
||||||
- { print; }'
|
|
||||||
+ if [ -n "$with_progname" ]; then
|
|
||||||
+ $i 2>&1 | awk -v "progname=$i" \
|
|
||||||
+ 'progname {
|
|
||||||
+ print progname ":\n"
|
|
||||||
+ progname="";
|
|
||||||
+ }
|
|
||||||
+ { print; }'
|
|
||||||
+ else
|
|
||||||
+ $i 2>&1
|
|
||||||
+ fi
|
|
||||||
+
|
|
||||||
logger -i -p cron.notice -t "run-parts($1)" "finished $(basename $i)"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
--
|
|
||||||
1.8.3.1
|
|
||||||
|
|
@ -1 +0,0 @@
|
|||||||
mirror:Source/crontabs-1.11-6.20121102git.el7.src.rpm
|
|
2
base/golang-dep/centos/build_srpm.data
Normal file
2
base/golang-dep/centos/build_srpm.data
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
COPY_LIST="$CGCS_BASE/downloads/dep-v0.5.0.tar.gz"
|
||||||
|
TIS_PATCH_VER=0
|
45
base/golang-dep/centos/golang-dep.spec
Normal file
45
base/golang-dep/centos/golang-dep.spec
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
Name: golang-dep
|
||||||
|
Version: 0.5.0
|
||||||
|
Release: %{tis_patch_ver}%{?_tis_dist}
|
||||||
|
Summary: Go dep dependency management tool
|
||||||
|
Group: Development/Languages
|
||||||
|
License: Go
|
||||||
|
URL: https://github.com/golang/dep
|
||||||
|
Source: dep-v0.5.0.tar.gz
|
||||||
|
BuildRequires: golang
|
||||||
|
Requires: golang
|
||||||
|
|
||||||
|
%global with_debug 0
|
||||||
|
%global debug_package %{nil}
|
||||||
|
%define __spec_install_post %{nil}
|
||||||
|
|
||||||
|
%define tooldir %{_libdir}/go/pkg/%{name}/linux_amd64
|
||||||
|
|
||||||
|
%if ! 0%{?gobuild:1}
|
||||||
|
%define gobuild(o:) go build -ldflags "${LDFLAGS:-} -B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \\n')" -a -v -x %{?**};
|
||||||
|
%endif
|
||||||
|
|
||||||
|
%description
|
||||||
|
This package includes additional go development tools.
|
||||||
|
|
||||||
|
%prep
|
||||||
|
%setup -T -c -n go/src/github.com/golang/dep
|
||||||
|
tar --strip-components=1 -x -f %{SOURCE0}
|
||||||
|
|
||||||
|
%build
|
||||||
|
export GOPATH=%{_builddir}/go
|
||||||
|
(cd cmd/dep && %gobuild -o dep)
|
||||||
|
|
||||||
|
%install
|
||||||
|
rm -rf %{buildroot}
|
||||||
|
install -d %{buildroot}%{_bindir}
|
||||||
|
install -d %{buildroot}%{tooldir}
|
||||||
|
install cmd/dep/dep %{buildroot}%{_bindir}
|
||||||
|
|
||||||
|
%clean
|
||||||
|
rm -rf %{buildroot}
|
||||||
|
|
||||||
|
%files
|
||||||
|
%defattr(-,root,root,-)
|
||||||
|
%{_bindir}/dep
|
||||||
|
|
@ -142,9 +142,9 @@ networking/net-tools
|
|||||||
filesystem/drbd/drbd-tools
|
filesystem/drbd/drbd-tools
|
||||||
database/mariadb
|
database/mariadb
|
||||||
database/python-psycopg2
|
database/python-psycopg2
|
||||||
base/crontabs
|
|
||||||
base/dnsmasq
|
base/dnsmasq
|
||||||
base/dnsmasq-config
|
base/dnsmasq-config
|
||||||
|
base/golang-dep
|
||||||
filesystem/parted
|
filesystem/parted
|
||||||
security/python-keyring
|
security/python-keyring
|
||||||
grub/grub2
|
grub/grub2
|
||||||
|
@ -23,7 +23,7 @@ class CephManagerException(Exception):
|
|||||||
message = self.message % kwargs
|
message = self.message % kwargs
|
||||||
except TypeError:
|
except TypeError:
|
||||||
LOG.warn(_LW('Exception in string format operation'))
|
LOG.warn(_LW('Exception in string format operation'))
|
||||||
for name, value in kwargs.iteritems():
|
for name, value in kwargs.items():
|
||||||
LOG.error("%s: %s" % (name, value))
|
LOG.error("%s: %s" % (name, value))
|
||||||
# at least get the core message out if something happened
|
# at least get the core message out if something happened
|
||||||
message = self.message
|
message = self.message
|
||||||
|
@ -27,8 +27,8 @@ LOGFILE="/var/log/ceph-manager.log"
|
|||||||
start()
|
start()
|
||||||
{
|
{
|
||||||
if [ -e $PIDFILE ]; then
|
if [ -e $PIDFILE ]; then
|
||||||
PIDDIR=/prod/$(cat $PIDFILE)
|
PIDDIR=/proc/$(cat $PIDFILE)
|
||||||
if [ -d ${PIDFILE} ]; then
|
if [ -d ${PIDDIR} ]; then
|
||||||
echo "$DESC already running."
|
echo "$DESC already running."
|
||||||
exit 0
|
exit 0
|
||||||
else
|
else
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
COPY_LIST="$FILES_BASE/* \
|
COPY_LIST="$FILES_BASE/* \
|
||||||
$DISTRO/patches/* \
|
$DISTRO/patches/* \
|
||||||
$CGCS_BASE/downloads/drbd-8.4.7-1.tar.gz"
|
$CGCS_BASE/downloads/drbd-8.4.11-1.tar.gz"
|
||||||
TIS_PATCH_VER=4
|
TIS_PATCH_VER=0
|
||||||
|
@ -9,7 +9,7 @@
|
|||||||
|
|
||||||
Name: drbd-kernel%{?bt_ext}
|
Name: drbd-kernel%{?bt_ext}
|
||||||
Summary: Kernel driver for DRBD
|
Summary: Kernel driver for DRBD
|
||||||
Version: 8.4.7
|
Version: 8.4.11
|
||||||
%define upstream_release 1
|
%define upstream_release 1
|
||||||
Release: %{upstream_release}%{?_tis_dist}.%{tis_patch_ver}
|
Release: %{upstream_release}%{?_tis_dist}.%{tis_patch_ver}
|
||||||
%global tarball_version %(echo "%{version}-%{?upstream_release}" | sed -e "s,%{?dist}$,,")
|
%global tarball_version %(echo "%{version}-%{?upstream_release}" | sed -e "s,%{?dist}$,,")
|
||||||
@ -25,7 +25,6 @@ Source0: http://oss.linbit.com/drbd/drbd-%{tarball_version}.tar.gz
|
|||||||
|
|
||||||
# WRS
|
# WRS
|
||||||
Patch0001: 0001-remove_bind_before_connect_error.patch
|
Patch0001: 0001-remove_bind_before_connect_error.patch
|
||||||
Patch0002: compat-Statically-initialize-families.patch
|
|
||||||
|
|
||||||
%define kversion %(rpm -q kernel%{?bt_ext}-devel | sort --version-sort | tail -1 | sed 's/kernel%{?bt_ext}-devel-//')
|
%define kversion %(rpm -q kernel%{?bt_ext}-devel | sort --version-sort | tail -1 | sed 's/kernel%{?bt_ext}-devel-//')
|
||||||
|
|
||||||
@ -86,7 +85,6 @@ echo "Done."
|
|||||||
%prep
|
%prep
|
||||||
%setup -q -n drbd-%{tarball_version}
|
%setup -q -n drbd-%{tarball_version}
|
||||||
%patch0001 -p1
|
%patch0001 -p1
|
||||||
%patch0002 -p1
|
|
||||||
|
|
||||||
%build
|
%build
|
||||||
rm -rf obj
|
rm -rf obj
|
||||||
|
@ -2,7 +2,7 @@ Index: drbd-8.4.7-1/drbd/drbd_receiver.c
|
|||||||
===================================================================
|
===================================================================
|
||||||
--- drbd-8.4.7-1.orig/drbd/drbd_receiver.c
|
--- drbd-8.4.7-1.orig/drbd/drbd_receiver.c
|
||||||
+++ drbd-8.4.7-1/drbd/drbd_receiver.c
|
+++ drbd-8.4.7-1/drbd/drbd_receiver.c
|
||||||
@@ -718,6 +718,7 @@ out:
|
@@ -719,6 +719,7 @@ out:
|
||||||
/* peer not (yet) available, network problem */
|
/* peer not (yet) available, network problem */
|
||||||
case ECONNREFUSED: case ENETUNREACH:
|
case ECONNREFUSED: case ENETUNREACH:
|
||||||
case EHOSTDOWN: case EHOSTUNREACH:
|
case EHOSTDOWN: case EHOSTUNREACH:
|
||||||
|
@ -1,171 +0,0 @@
|
|||||||
From 7510d78909774e33b64ada4055bea65881350763 Mon Sep 17 00:00:00 2001
|
|
||||||
Message-Id: <7510d78909774e33b64ada4055bea65881350763.1528136610.git.Jim.Somerville@windriver.com>
|
|
||||||
From: Nick Wang <nwang@suse.com>
|
|
||||||
Date: Mon, 13 Mar 2017 15:23:29 +0800
|
|
||||||
Subject: [PATCH 1/1] compat: Statically initialize families
|
|
||||||
|
|
||||||
In a07ea4d9, genetlink no longer use static family id.
|
|
||||||
GENL_ID_GENERATE is removed.
|
|
||||||
In 489111e5, statically initialize the families and remove
|
|
||||||
the inline functions.
|
|
||||||
|
|
||||||
Thanks to Nick Wang <nwang@suse.com> for preparing a first draft.
|
|
||||||
Unfortunately this version actually broke netlink on v4.10. Probably
|
|
||||||
only compile-tested, but never "drbdadm up" tested.
|
|
||||||
|
|
||||||
Signed-off-by: Nick Wang <nwang@suse.com>
|
|
||||||
[add missing pieces introduced in 489111e5]
|
|
||||||
Signed-off-by: Roland Kammerer <roland.kammerer@linbit.com>
|
|
||||||
[Simplified :-) and backported to drbd 8.4]
|
|
||||||
Signed-off-by: Lars Ellenberg <lars@linbit.com>
|
|
||||||
|
|
||||||
Signed-off-by: Jim Somerville <Jim.Somerville@windriver.com>
|
|
||||||
---
|
|
||||||
.../tests/have_genl_family_in_genlmsg_multicast.c | 9 ++++++
|
|
||||||
drbd/compat/tests/have_genl_id_generate.c | 6 ++++
|
|
||||||
.../tests/have_genl_register_family_with_ops.c | 9 ++++++
|
|
||||||
.../tests/have_genl_register_family_with_ops3.c | 9 ++++++
|
|
||||||
...gic_func-genl_register_family_with_ops_groups.h | 4 +++
|
|
||||||
drbd/linux/genl_magic_func.h | 34 +++++++++++++++-------
|
|
||||||
6 files changed, 61 insertions(+), 10 deletions(-)
|
|
||||||
create mode 100644 drbd/compat/tests/have_genl_family_in_genlmsg_multicast.c
|
|
||||||
create mode 100644 drbd/compat/tests/have_genl_id_generate.c
|
|
||||||
create mode 100644 drbd/compat/tests/have_genl_register_family_with_ops.c
|
|
||||||
create mode 100644 drbd/compat/tests/have_genl_register_family_with_ops3.c
|
|
||||||
|
|
||||||
diff --git a/drbd/compat/tests/have_genl_family_in_genlmsg_multicast.c b/drbd/compat/tests/have_genl_family_in_genlmsg_multicast.c
|
|
||||||
new file mode 100644
|
|
||||||
index 0000000..6d44faa
|
|
||||||
--- /dev/null
|
|
||||||
+++ b/drbd/compat/tests/have_genl_family_in_genlmsg_multicast.c
|
|
||||||
@@ -0,0 +1,9 @@
|
|
||||||
+#include <net/genetlink.h>
|
|
||||||
+
|
|
||||||
+void test(void)
|
|
||||||
+{
|
|
||||||
+ struct genl_family family = { };
|
|
||||||
+ struct sk_buff *skb = NULL;
|
|
||||||
+
|
|
||||||
+ genlmsg_multicast(&family, skb, 0, 0, GFP_KERNEL);
|
|
||||||
+}
|
|
||||||
diff --git a/drbd/compat/tests/have_genl_id_generate.c b/drbd/compat/tests/have_genl_id_generate.c
|
|
||||||
new file mode 100644
|
|
||||||
index 0000000..4ef0e8e
|
|
||||||
--- /dev/null
|
|
||||||
+++ b/drbd/compat/tests/have_genl_id_generate.c
|
|
||||||
@@ -0,0 +1,6 @@
|
|
||||||
+#include <linux/genetlink.h>
|
|
||||||
+
|
|
||||||
+void test(void)
|
|
||||||
+{
|
|
||||||
+ int i = GENL_ID_GENERATE;
|
|
||||||
+}
|
|
||||||
diff --git a/drbd/compat/tests/have_genl_register_family_with_ops.c b/drbd/compat/tests/have_genl_register_family_with_ops.c
|
|
||||||
new file mode 100644
|
|
||||||
index 0000000..27123db
|
|
||||||
--- /dev/null
|
|
||||||
+++ b/drbd/compat/tests/have_genl_register_family_with_ops.c
|
|
||||||
@@ -0,0 +1,9 @@
|
|
||||||
+#include <net/genetlink.h>
|
|
||||||
+
|
|
||||||
+void test(void)
|
|
||||||
+{
|
|
||||||
+ struct genl_family family = { };
|
|
||||||
+ struct genl_ops ops[23];
|
|
||||||
+
|
|
||||||
+ genl_register_family_with_ops(&family, ops);
|
|
||||||
+}
|
|
||||||
diff --git a/drbd/compat/tests/have_genl_register_family_with_ops3.c b/drbd/compat/tests/have_genl_register_family_with_ops3.c
|
|
||||||
new file mode 100644
|
|
||||||
index 0000000..11b6d73
|
|
||||||
--- /dev/null
|
|
||||||
+++ b/drbd/compat/tests/have_genl_register_family_with_ops3.c
|
|
||||||
@@ -0,0 +1,9 @@
|
|
||||||
+#include <net/genetlink.h>
|
|
||||||
+
|
|
||||||
+void test(void)
|
|
||||||
+{
|
|
||||||
+ struct genl_family family = { };
|
|
||||||
+ struct genl_ops ops[23];
|
|
||||||
+
|
|
||||||
+ genl_register_family_with_ops(&family, ops, 23);
|
|
||||||
+}
|
|
||||||
diff --git a/drbd/linux/genl_magic_func-genl_register_family_with_ops_groups.h b/drbd/linux/genl_magic_func-genl_register_family_with_ops_groups.h
|
|
||||||
index 27d8f73..403e8e2 100644
|
|
||||||
--- a/drbd/linux/genl_magic_func-genl_register_family_with_ops_groups.h
|
|
||||||
+++ b/drbd/linux/genl_magic_func-genl_register_family_with_ops_groups.h
|
|
||||||
@@ -29,9 +29,13 @@ static int CONCAT_(GENL_MAGIC_FAMILY, _genl_multicast_ ## group)( \
|
|
||||||
|
|
||||||
int CONCAT_(GENL_MAGIC_FAMILY, _genl_register)(void)
|
|
||||||
{
|
|
||||||
+#if defined(COMPAT_HAVE_GENL_REGISTER_FAMILY_WITH_OPS) || defined(COMPAT_HAVE_GENL_REGISTER_FAMILY_WITH_OPS3)
|
|
||||||
return genl_register_family_with_ops_groups(&ZZZ_genl_family, \
|
|
||||||
ZZZ_genl_ops, \
|
|
||||||
ZZZ_genl_mcgrps);
|
|
||||||
+#else
|
|
||||||
+ return genl_register_family(&ZZZ_genl_family);
|
|
||||||
+#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void)
|
|
||||||
diff --git a/drbd/linux/genl_magic_func.h b/drbd/linux/genl_magic_func.h
|
|
||||||
index 29f44a8..504719a 100644
|
|
||||||
--- a/drbd/linux/genl_magic_func.h
|
|
||||||
+++ b/drbd/linux/genl_magic_func.h
|
|
||||||
@@ -261,15 +261,7 @@ static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
|
|
||||||
* {{{2
|
|
||||||
*/
|
|
||||||
#define ZZZ_genl_family CONCAT_(GENL_MAGIC_FAMILY, _genl_family)
|
|
||||||
-static struct genl_family ZZZ_genl_family __read_mostly = {
|
|
||||||
- .id = GENL_ID_GENERATE,
|
|
||||||
- .name = __stringify(GENL_MAGIC_FAMILY),
|
|
||||||
- .version = GENL_MAGIC_VERSION,
|
|
||||||
-#ifdef GENL_MAGIC_FAMILY_HDRSZ
|
|
||||||
- .hdrsize = NLA_ALIGN(GENL_MAGIC_FAMILY_HDRSZ),
|
|
||||||
-#endif
|
|
||||||
- .maxattr = ARRAY_SIZE(drbd_tla_nl_policy)-1,
|
|
||||||
-};
|
|
||||||
+static struct genl_family ZZZ_genl_family;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Magic: define multicast groups
|
|
||||||
@@ -282,13 +274,35 @@ static struct genl_family ZZZ_genl_family __read_mostly = {
|
|
||||||
* genetlink: pass family to functions using groups
|
|
||||||
* genetlink: only pass array to genl_register_family_with_ops()
|
|
||||||
* which are commits c53ed742..2a94fe48
|
|
||||||
+ *
|
|
||||||
+ * v4.10, 489111e5 genetlink: statically initialize families
|
|
||||||
+ * and previous commit drop GENL_ID_GENERATE and register helper functions.
|
|
||||||
*/
|
|
||||||
-#ifdef genl_register_family_with_ops_groups
|
|
||||||
+#if defined(genl_register_family_with_ops_groups) || !defined(GENL_ID_GENERATE)
|
|
||||||
#include <linux/genl_magic_func-genl_register_family_with_ops_groups.h>
|
|
||||||
#else
|
|
||||||
#include <linux/genl_magic_func-genl_register_mc_group.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
+static struct genl_family ZZZ_genl_family __read_mostly = {
|
|
||||||
+ /* .id = GENL_ID_GENERATE, which exists no longer, and was 0 anyways */
|
|
||||||
+ .name = __stringify(GENL_MAGIC_FAMILY),
|
|
||||||
+ .version = GENL_MAGIC_VERSION,
|
|
||||||
+#ifdef GENL_MAGIC_FAMILY_HDRSZ
|
|
||||||
+ .hdrsize = NLA_ALIGN(GENL_MAGIC_FAMILY_HDRSZ),
|
|
||||||
+#endif
|
|
||||||
+ .maxattr = ARRAY_SIZE(CONCAT_(GENL_MAGIC_FAMILY, _tla_nl_policy))-1,
|
|
||||||
+
|
|
||||||
+#ifndef GENL_ID_GENERATE
|
|
||||||
+ .ops = ZZZ_genl_ops,
|
|
||||||
+ .n_ops = ARRAY_SIZE(ZZZ_genl_ops),
|
|
||||||
+ .mcgrps = ZZZ_genl_mcgrps,
|
|
||||||
+ .n_mcgrps = ARRAY_SIZE(ZZZ_genl_mcgrps),
|
|
||||||
+ .module = THIS_MODULE,
|
|
||||||
+#endif
|
|
||||||
+};
|
|
||||||
+
|
|
||||||
+
|
|
||||||
/*
|
|
||||||
* Magic: provide conversion functions {{{1
|
|
||||||
* populate skb from struct.
|
|
||||||
--
|
|
||||||
1.8.3.1
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
|||||||
COPY_LIST=" \
|
COPY_LIST=" \
|
||||||
$PKG_BASE/files/* \
|
$PKG_BASE/files/* \
|
||||||
$STX_BASE/downloads/e1000e-3.4.1.1.tar.gz"
|
$STX_BASE/downloads/e1000e-3.4.2.1.tar.gz"
|
||||||
TIS_PATCH_VER=1
|
TIS_PATCH_VER=1
|
||||||
|
@ -8,7 +8,7 @@
|
|||||||
%define kmod_name e1000e
|
%define kmod_name e1000e
|
||||||
|
|
||||||
Name: %{kmod_name}-kmod%{?bt_ext}
|
Name: %{kmod_name}-kmod%{?bt_ext}
|
||||||
Version: 3.4.1.1
|
Version: 3.4.2.1
|
||||||
Release: 0%{?_tis_dist}.%{tis_patch_ver}
|
Release: 0%{?_tis_dist}.%{tis_patch_ver}
|
||||||
Group: System Environment/Kernel
|
Group: System Environment/Kernel
|
||||||
License: GPLv2
|
License: GPLv2
|
||||||
|
@ -78,7 +78,7 @@ class LogMgmtDaemon():
|
|||||||
my_exec = os.path.basename(sys.argv[0])
|
my_exec = os.path.basename(sys.argv[0])
|
||||||
|
|
||||||
if not os.path.exists(LOG_DIR):
|
if not os.path.exists(LOG_DIR):
|
||||||
os.mkdir(LOG_DIR, 0755)
|
os.mkdir(LOG_DIR, 0o755)
|
||||||
|
|
||||||
log_format = '%(asctime)s: ' \
|
log_format = '%(asctime)s: ' \
|
||||||
+ my_exec + '[%(process)s]: ' \
|
+ my_exec + '[%(process)s]: ' \
|
||||||
|
@ -16,4 +16,4 @@ COPY_LIST="$PKG_BASE/src/LICENSE \
|
|||||||
$PKG_BASE/src/example.py \
|
$PKG_BASE/src/example.py \
|
||||||
$PKG_BASE/src/example.conf"
|
$PKG_BASE/src/example.conf"
|
||||||
|
|
||||||
TIS_PATCH_VER=3
|
TIS_PATCH_VER=4
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
[Unit]
|
[Unit]
|
||||||
Description=Collectd statistics daemon and extension services
|
Description=Collectd statistics daemon and extension services
|
||||||
Documentation=man:collectd(1) man:collectd.conf(5)
|
Documentation=man:collectd(1) man:collectd.conf(5)
|
||||||
|
Before=pmon.service
|
||||||
After=local-fs.target network-online.target
|
After=local-fs.target network-online.target
|
||||||
Requires=local-fs.target network-online.target
|
Requires=local-fs.target network-online.target
|
||||||
|
|
||||||
|
@ -4,4 +4,4 @@ COPY_LIST="$PKG_BASE/src/LICENSE \
|
|||||||
$PKG_BASE/src/influxdb.conf.pmon \
|
$PKG_BASE/src/influxdb.conf.pmon \
|
||||||
$PKG_BASE/src/influxdb.service"
|
$PKG_BASE/src/influxdb.service"
|
||||||
|
|
||||||
TIS_PATCH_VER=1
|
TIS_PATCH_VER=2
|
||||||
|
@ -1,8 +1,10 @@
|
|||||||
[Unit]
|
[Unit]
|
||||||
Description=InfluxDB open-source, distributed, time series database
|
Description=InfluxDB open-source, distributed, time series database
|
||||||
Documentation=https://influxdb.com/docs/
|
Documentation=https://influxdb.com/docs/
|
||||||
After=local-fs.target network.target
|
|
||||||
Before=collectd.service
|
Before=collectd.service
|
||||||
|
Before=pmon.service
|
||||||
|
After=local-fs.target network-online.target
|
||||||
|
Requires=local-fs.target network-online.target
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
User=influxdb
|
User=influxdb
|
||||||
|
@ -57,7 +57,7 @@ class BuddyInfo(object):
|
|||||||
for line in map(self.parse_line, buddyinfo):
|
for line in map(self.parse_line, buddyinfo):
|
||||||
numa_node = int(line["numa_node"])
|
numa_node = int(line["numa_node"])
|
||||||
zone = line["zone"]
|
zone = line["zone"]
|
||||||
free_fragments = map(int, line["nr_free"].split())
|
free_fragments = [int(nr) for nr in line["nr_free"].split()]
|
||||||
max_order = len(free_fragments)
|
max_order = len(free_fragments)
|
||||||
fragment_sizes = self.get_order_sizes(max_order)
|
fragment_sizes = self.get_order_sizes(max_order)
|
||||||
usage_in_bytes = [block[0] * block[1] for block in zip(free_fragments, fragment_sizes)]
|
usage_in_bytes = [block[0] * block[1] for block in zip(free_fragments, fragment_sizes)]
|
||||||
|
@ -106,7 +106,7 @@ def collectMemtop(influx_info, node, ci):
|
|||||||
fields["platform_avail"] += avail / MiB
|
fields["platform_avail"] += avail / MiB
|
||||||
fields["platform_hfree"] += hfree
|
fields["platform_hfree"] += hfree
|
||||||
f1.close()
|
f1.close()
|
||||||
s = generateString(measurement, tags.keys(), tags.values(), fields.keys(), fields.values())
|
s = generateString(measurement, list(tags.keys()), list(tags.values()), list(fields.keys()), list(fields.values()))
|
||||||
if s is None:
|
if s is None:
|
||||||
good_string = False
|
good_string = False
|
||||||
else:
|
else:
|
||||||
@ -221,7 +221,7 @@ def collectMemstats(influx_info, node, ci, services, syseng_services, openstack_
|
|||||||
fields["total"]["vsz"] += vsz
|
fields["total"]["vsz"] += vsz
|
||||||
break
|
break
|
||||||
# send data to InfluxDB
|
# send data to InfluxDB
|
||||||
for key in fields.keys():
|
for key in fields:
|
||||||
influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}','{}'='{}'".format(measurement, "node", tags["node"], "service", key, "rss", fields[key]["rss"], "vsz", fields[key]["vsz"]) + "\n"
|
influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}','{}'='{}'".format(measurement, "node", tags["node"], "service", key, "rss", fields[key]["rss"], "vsz", fields[key]["vsz"]) + "\n"
|
||||||
p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], influx_string), shell=True)
|
p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], influx_string), shell=True)
|
||||||
p.communicate()
|
p.communicate()
|
||||||
@ -333,7 +333,7 @@ def collectSchedtop(influx_info, node, ci, services, syseng_services, openstack_
|
|||||||
fields[svc] += occ
|
fields[svc] += occ
|
||||||
fields["total"] += occ
|
fields["total"] += occ
|
||||||
break
|
break
|
||||||
for key in fields.keys():
|
for key in fields:
|
||||||
influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}'".format(measurement, "node", tags["node"], "service", key, "occ", fields[key]) + "\n"
|
influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}'".format(measurement, "node", tags["node"], "service", key, "occ", fields[key]) + "\n"
|
||||||
# send data to InfluxDB
|
# send data to InfluxDB
|
||||||
p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], influx_string), shell=True)
|
p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], influx_string), shell=True)
|
||||||
@ -800,7 +800,7 @@ def collectRabbitMq(influx_info, node, ci):
|
|||||||
info[i] = "processes_" + info[i]
|
info[i] = "processes_" + info[i]
|
||||||
if info[i].replace("_", "").isalpha() and info[i + 1].isdigit():
|
if info[i].replace("_", "").isalpha() and info[i + 1].isdigit():
|
||||||
fields[info[i]] = info[i + 1]
|
fields[info[i]] = info[i + 1]
|
||||||
s = generateString(measurement, tags.keys(), tags.values(), fields.keys(), fields.values())
|
s = generateString(measurement, list(tags.keys()), list(tags.values()), list(fields.keys()), list(fields.values()))
|
||||||
if s is None:
|
if s is None:
|
||||||
rabbitmq_output.kill()
|
rabbitmq_output.kill()
|
||||||
else:
|
else:
|
||||||
@ -993,7 +993,7 @@ def collectFilestats(influx_info, node, ci, services, syseng_services, exclude_l
|
|||||||
p.kill()
|
p.kill()
|
||||||
continue
|
continue
|
||||||
p.kill()
|
p.kill()
|
||||||
for key in fields.keys():
|
for key in fields:
|
||||||
influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}','{}'='{}','{}'='{}'".format(measurement, "node", tags["node"], "service", key, "read/write", fields[key]["read/write"], "write", fields[key]["write"], "read", fields[key]["read"]) + "\n"
|
influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}','{}'='{}','{}'='{}'".format(measurement, "node", tags["node"], "service", key, "read/write", fields[key]["read/write"], "write", fields[key]["write"], "read", fields[key]["read"]) + "\n"
|
||||||
# send data to InfluxDB
|
# send data to InfluxDB
|
||||||
p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], influx_string), shell=True)
|
p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], influx_string), shell=True)
|
||||||
@ -1041,7 +1041,7 @@ def collectVswitch(influx_info, node, ci):
|
|||||||
for key in fields:
|
for key in fields:
|
||||||
fields[key] = line[i].strip("%")
|
fields[key] = line[i].strip("%")
|
||||||
i += 1
|
i += 1
|
||||||
influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}'".format(measurement, tags.keys()[0], tags.values()[0], tags.keys()[1], tags.values()[1], fields.keys()[0], fields.values()[0], fields.keys()[1], fields.values()[1], fields.keys()[2], fields.values()[2], fields.keys()[3], fields.values()[3], fields.keys()[4], fields.values()[4], fields.keys()[5], fields.values()[5], fields.keys()[6], fields.values()[6], fields.keys()[7], fields.values()[7], fields.keys()[8], fields.values()[8]) + "\n"
|
influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}'".format(measurement, list(tags.keys())[0], list(tags.values())[0], list(tags.keys())[1], list(tags.values())[1], list(fields.keys())[0], list(fields.values())[0], list(fields.keys())[1], list(fields.values())[1], list(fields.keys())[2], list(fields.values())[2], list(fields.keys())[3], list(fields.values())[3], list(fields.keys())[4], list(fields.values())[4], list(fields.keys())[5], list(fields.values())[5], list(fields.keys())[6], list(fields.values())[6], list(fields.keys())[7], list(fields.values())[7], list(fields.keys())[8], list(fields.values())[8]) + "\n"
|
||||||
vshell_engine_stats_output.kill()
|
vshell_engine_stats_output.kill()
|
||||||
vshell_port_stats_output = Popen("vshell port-stats-list", shell=True, stdout=PIPE)
|
vshell_port_stats_output = Popen("vshell port-stats-list", shell=True, stdout=PIPE)
|
||||||
vshell_port_stats_output.stdout.readline()
|
vshell_port_stats_output.stdout.readline()
|
||||||
@ -1059,7 +1059,7 @@ def collectVswitch(influx_info, node, ci):
|
|||||||
for key in fields1:
|
for key in fields1:
|
||||||
fields1[key] = line[i].strip("%")
|
fields1[key] = line[i].strip("%")
|
||||||
i += 1
|
i += 1
|
||||||
influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}'".format(measurement, tags1.keys()[0], tags1.values()[0], tags1.keys()[1], tags1.values()[1], fields1.keys()[0], fields1.values()[0], fields1.keys()[1], fields1.values()[1], fields1.keys()[2], fields1.values()[2], fields1.keys()[3], fields1.values()[3], fields1.keys()[4], fields1.values()[4], fields1.keys()[5], fields1.values()[5], fields1.keys()[6], fields1.values()[6]) + "\n"
|
influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}'".format(measurement, list(tags1.keys())[0], list(tags1.values())[0], list(tags1.keys())[1], list(tags1.values())[1], list(fields1.keys())[0], list(fields1.values())[0], list(fields1.keys())[1], list(fields1.values())[1], list(fields1.keys())[2], list(fields1.values())[2], list(fields1.keys())[3], list(fields1.values())[3], list(fields1.keys())[4], list(fields1.values())[4], list(fields1.keys())[5], list(fields1.values())[5], list(fields1.keys())[6], list(fields1.values())[6]) + "\n"
|
||||||
vshell_port_stats_output.kill()
|
vshell_port_stats_output.kill()
|
||||||
vshell_interface_stats_output = Popen("vshell interface-stats-list", shell=True, stdout=PIPE)
|
vshell_interface_stats_output = Popen("vshell interface-stats-list", shell=True, stdout=PIPE)
|
||||||
vshell_interface_stats_output.stdout.readline()
|
vshell_interface_stats_output.stdout.readline()
|
||||||
@ -1078,7 +1078,7 @@ def collectVswitch(influx_info, node, ci):
|
|||||||
for key in fields2:
|
for key in fields2:
|
||||||
fields2[key] = line[i].strip("%")
|
fields2[key] = line[i].strip("%")
|
||||||
i += 1
|
i += 1
|
||||||
influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}'".format(measurement, tags2.keys()[0], tags2.values()[0], tags2.keys()[1], tags2.values()[1], fields2.keys()[0], fields2.values()[0], fields2.keys()[1], fields2.values()[1], fields2.keys()[2], fields2.values()[2], fields2.keys()[3], fields2.values()[3], fields2.keys()[4], fields2.values()[4], fields2.keys()[5], fields2.values()[5], fields2.keys()[6], fields2.values()[6], fields2.keys()[7], fields2.values()[7], fields2.keys()[8], fields2.values()[8], fields2.keys()[9], fields2.values()[9]) + "\n"
|
influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}'".format(measurement, list(tags2.keys())[0], list(tags2.values())[0], list(tags2.keys())[1], list(tags2.values())[1], list(fields2.keys())[0], list(fields2.values())[0], list(fields2.keys())[1], list(fields2.values())[1], list(fields2.keys())[2], list(fields2.values())[2], list(fields2.keys())[3], list(fields2.values())[3], list(fields2.keys())[4], list(fields2.values())[4], list(fields2.keys())[5], list(fields2.values())[5], list(fields2.keys())[6], list(fields2.values())[6], list(fields2.keys())[7], list(fields2.values())[7], list(fields2.keys())[8], list(fields2.values())[8], list(fields2.keys())[9], list(fields2.values())[9]) + "\n"
|
||||||
else:
|
else:
|
||||||
continue
|
continue
|
||||||
vshell_interface_stats_output.kill()
|
vshell_interface_stats_output.kill()
|
||||||
@ -1135,7 +1135,7 @@ def collectApiStats(influx_info, node, ci, services, db_port, rabbit_port):
|
|||||||
break
|
break
|
||||||
lsof_lines.append(line)
|
lsof_lines.append(line)
|
||||||
lsof_result.kill()
|
lsof_result.kill()
|
||||||
for name, service in services.iteritems():
|
for name, service in services.items():
|
||||||
pid_list = list()
|
pid_list = list()
|
||||||
check_pid = False
|
check_pid = False
|
||||||
if name == "keystone-public":
|
if name == "keystone-public":
|
||||||
|
@ -286,7 +286,7 @@ def get_info_and_display(cc, show=None):
|
|||||||
pv_pd_num_ext = 4
|
pv_pd_num_ext = 4
|
||||||
pv_pd_num = 3
|
pv_pd_num = 3
|
||||||
|
|
||||||
for k, v in host_storage_attr.iteritems():
|
for k, v in host_storage_attr.items():
|
||||||
if show['diskview'] or show['all']:
|
if show['diskview'] or show['all']:
|
||||||
for disk_o in v['host_disks']:
|
for disk_o in v['host_disks']:
|
||||||
device_node = getattr(disk_o, 'device_node', '')
|
device_node = getattr(disk_o, 'device_node', '')
|
||||||
|
@ -287,7 +287,7 @@ def parse_arguments(debug, show):
|
|||||||
S[0:0] = L_opts
|
S[0:0] = L_opts
|
||||||
|
|
||||||
# Enable debug option, but its usage/help is hidden.
|
# Enable debug option, but its usage/help is hidden.
|
||||||
D = debug.keys()
|
D = list(debug.keys())
|
||||||
D.sort()
|
D.sort()
|
||||||
D.insert(0, 'all')
|
D.insert(0, 'all')
|
||||||
|
|
||||||
@ -338,7 +338,7 @@ def parse_arguments(debug, show):
|
|||||||
# Enable all debug flags (except libvirt_xml) if 'all' is specified
|
# Enable all debug flags (except libvirt_xml) if 'all' is specified
|
||||||
x = debug['libvirt_xml']
|
x = debug['libvirt_xml']
|
||||||
if debug['all']:
|
if debug['all']:
|
||||||
{debug.update({e: True}) for e in debug.keys()}
|
{debug.update({e: True}) for e in debug}
|
||||||
debug['libvirt_xml'] = x
|
debug['libvirt_xml'] = x
|
||||||
|
|
||||||
# Flatten show options list
|
# Flatten show options list
|
||||||
@ -370,8 +370,8 @@ def _translate_keys(collection, convert):
|
|||||||
""" For a collection of elements, translate _info field names
|
""" For a collection of elements, translate _info field names
|
||||||
into human-readable names based on a list of conversion tuples.
|
into human-readable names based on a list of conversion tuples.
|
||||||
"""
|
"""
|
||||||
for k, item in collection.iteritems():
|
for k, item in collection.items():
|
||||||
keys = item.__dict__.keys()
|
keys = list(item.__dict__.keys())
|
||||||
for from_key, to_key in convert:
|
for from_key, to_key in convert:
|
||||||
if from_key in keys and to_key not in keys:
|
if from_key in keys and to_key not in keys:
|
||||||
try:
|
try:
|
||||||
@ -394,7 +394,7 @@ def _translate_extended_states(collection):
|
|||||||
'Crashed', # 0x06
|
'Crashed', # 0x06
|
||||||
'Suspended' # 0x07
|
'Suspended' # 0x07
|
||||||
]
|
]
|
||||||
for k, item in collection.iteritems():
|
for k, item in collection.items():
|
||||||
try:
|
try:
|
||||||
setattr(item, 'power_state',
|
setattr(item, 'power_state',
|
||||||
power_states[getattr(item, 'power_state')])
|
power_states[getattr(item, 'power_state')])
|
||||||
@ -492,7 +492,7 @@ def range_to_list(csv_range=None):
|
|||||||
"""
|
"""
|
||||||
if not csv_range:
|
if not csv_range:
|
||||||
return []
|
return []
|
||||||
ranges = [(lambda L: range(L[0], L[-1] + 1))(map(int, r.split('-')))
|
ranges = [(lambda L: range(L[0], L[-1] + 1))([int(x) for x in r.split('-')])
|
||||||
for r in csv_range.split(',')]
|
for r in csv_range.split(',')]
|
||||||
return [y for x in ranges for y in x]
|
return [y for x in ranges for y in x]
|
||||||
|
|
||||||
@ -624,7 +624,7 @@ def do_libvirt_domain_info((host)):
|
|||||||
up_total += 1
|
up_total += 1
|
||||||
cpuset_total |= cpuset
|
cpuset_total |= cpuset
|
||||||
cpulist_f = _mask_to_cpulist(mask=cpuset_total)
|
cpulist_f = _mask_to_cpulist(mask=cpuset_total)
|
||||||
for key in sorted(cpulist_d.iterkeys()):
|
for key in sorted(cpulist_d.keys()):
|
||||||
cpulist_p.append(cpulist_d[key])
|
cpulist_p.append(cpulist_d[key])
|
||||||
|
|
||||||
# Determine if floating or pinned, display appropriate cpulist
|
# Determine if floating or pinned, display appropriate cpulist
|
||||||
@ -833,7 +833,7 @@ def define_option_flags(show, options=[],
|
|||||||
if 'all' in options:
|
if 'all' in options:
|
||||||
{show.update({e: True}) for e in L_brief + L_details}
|
{show.update({e: True}) for e in L_brief + L_details}
|
||||||
for e in options:
|
for e in options:
|
||||||
if e in show.keys():
|
if e in show:
|
||||||
show.update({e: True})
|
show.update({e: True})
|
||||||
|
|
||||||
|
|
||||||
@ -898,9 +898,9 @@ def print_all_tables(tenants=None,
|
|||||||
for C in ['servers', 'pcpus', 'U:dedicated', 'U:shared',
|
for C in ['servers', 'pcpus', 'U:dedicated', 'U:shared',
|
||||||
'memory', 'U:memory', 'A:mem_4K', 'A:mem_2M', 'A:mem_1G']:
|
'memory', 'U:memory', 'A:mem_4K', 'A:mem_2M', 'A:mem_1G']:
|
||||||
pt.align[C] = 'r'
|
pt.align[C] = 'r'
|
||||||
for host_name, H in sorted(hypervisors.iteritems(),
|
for host_name, H in sorted(hypervisors.items(),
|
||||||
key=lambda (k, v): (natural_keys(k))):
|
key=lambda (k, v): (natural_keys(k))):
|
||||||
A = agg_h[host_name].keys()
|
A = list(agg_h[host_name].keys())
|
||||||
|
|
||||||
try:
|
try:
|
||||||
topology_idx = topologies_idx[host_name]
|
topology_idx = topologies_idx[host_name]
|
||||||
@ -914,9 +914,9 @@ def print_all_tables(tenants=None,
|
|||||||
cpu_id = 0
|
cpu_id = 0
|
||||||
socket_id = topology_idx[cpu_id]['s']
|
socket_id = topology_idx[cpu_id]['s']
|
||||||
core_id = topology_idx[cpu_id]['c']
|
core_id = topology_idx[cpu_id]['c']
|
||||||
n_sockets = len(topology.keys())
|
n_sockets = len(list(topology.keys()))
|
||||||
n_cores = len(topology[socket_id].keys())
|
n_cores = len(list(topology[socket_id].keys()))
|
||||||
n_threads = len(topology[socket_id][core_id].keys())
|
n_threads = len(list(topology[socket_id][core_id].keys()))
|
||||||
else:
|
else:
|
||||||
if 'topology' in H.cpu_info:
|
if 'topology' in H.cpu_info:
|
||||||
topology = H.cpu_info['topology']
|
topology = H.cpu_info['topology']
|
||||||
@ -1019,7 +1019,7 @@ def print_all_tables(tenants=None,
|
|||||||
if show['topology']:
|
if show['topology']:
|
||||||
print
|
print
|
||||||
print('LOGICAL CPU TOPOLOGY (compute hosts):')
|
print('LOGICAL CPU TOPOLOGY (compute hosts):')
|
||||||
for host_name, topology in sorted(topologies.iteritems(),
|
for host_name, topology in sorted(topologies.items(),
|
||||||
key=lambda (k, v): (natural_keys(k))):
|
key=lambda (k, v): (natural_keys(k))):
|
||||||
H = hypervisors[host_name]
|
H = hypervisors[host_name]
|
||||||
try:
|
try:
|
||||||
@ -1038,9 +1038,9 @@ def print_all_tables(tenants=None,
|
|||||||
cpu_id = 0
|
cpu_id = 0
|
||||||
socket_id = topology_idx[cpu_id]['s']
|
socket_id = topology_idx[cpu_id]['s']
|
||||||
core_id = topology_idx[cpu_id]['c']
|
core_id = topology_idx[cpu_id]['c']
|
||||||
n_sockets = len(topology.keys())
|
n_sockets = len(list(topology.keys()))
|
||||||
n_cores = len(topology[socket_id].keys())
|
n_cores = len(list(topology[socket_id].keys()))
|
||||||
n_threads = len(topology[socket_id][core_id].keys())
|
n_threads = len(list(topology[socket_id][core_id].keys()))
|
||||||
|
|
||||||
print('%s: Model:%s, Arch:%s, Vendor:%s, '
|
print('%s: Model:%s, Arch:%s, Vendor:%s, '
|
||||||
'Sockets=%d, Cores/Socket=%d, Threads/Core=%d, Logical=%d'
|
'Sockets=%d, Cores/Socket=%d, Threads/Core=%d, Logical=%d'
|
||||||
@ -1083,7 +1083,7 @@ def print_all_tables(tenants=None,
|
|||||||
if show['topology-long']:
|
if show['topology-long']:
|
||||||
print
|
print
|
||||||
print('LOGICAL CPU TOPOLOGY (compute hosts):')
|
print('LOGICAL CPU TOPOLOGY (compute hosts):')
|
||||||
for host_name, topology in sorted(topologies.iteritems(),
|
for host_name, topology in sorted(topologies.items(),
|
||||||
key=lambda (k, v): (natural_keys(k))):
|
key=lambda (k, v): (natural_keys(k))):
|
||||||
H = hypervisors[host_name]
|
H = hypervisors[host_name]
|
||||||
try:
|
try:
|
||||||
@ -1102,9 +1102,9 @@ def print_all_tables(tenants=None,
|
|||||||
cpu_id = 0
|
cpu_id = 0
|
||||||
socket_id = topology_idx[cpu_id]['s']
|
socket_id = topology_idx[cpu_id]['s']
|
||||||
core_id = topology_idx[cpu_id]['c']
|
core_id = topology_idx[cpu_id]['c']
|
||||||
n_sockets = len(topology.keys())
|
n_sockets = len(list(topology.keys()))
|
||||||
n_cores = len(topology[socket_id].keys())
|
n_cores = len(list(topology[socket_id].keys()))
|
||||||
n_threads = len(topology[socket_id][core_id].keys())
|
n_threads = len(list(topology[socket_id][core_id].keys()))
|
||||||
|
|
||||||
print('%s: Model:%s, Arch:%s, Vendor:%s, '
|
print('%s: Model:%s, Arch:%s, Vendor:%s, '
|
||||||
'Sockets=%d, Cores/Socket=%d, Threads/Core=%d, Logical=%d'
|
'Sockets=%d, Cores/Socket=%d, Threads/Core=%d, Logical=%d'
|
||||||
@ -1160,7 +1160,7 @@ def print_all_tables(tenants=None,
|
|||||||
pt.align[C] = 'r'
|
pt.align[C] = 'r'
|
||||||
for C in ['in_libvirt']:
|
for C in ['in_libvirt']:
|
||||||
pt.align[C] = 'c'
|
pt.align[C] = 'c'
|
||||||
for _, S in sorted(servers.iteritems(),
|
for _, S in sorted(servers.items(),
|
||||||
key=lambda (k, v): (natural_keys(v.host),
|
key=lambda (k, v): (natural_keys(v.host),
|
||||||
v.server_group,
|
v.server_group,
|
||||||
v.instance_name)
|
v.instance_name)
|
||||||
@ -1211,7 +1211,7 @@ def print_all_tables(tenants=None,
|
|||||||
vcpus_scale = flavor_vcpus
|
vcpus_scale = flavor_vcpus
|
||||||
|
|
||||||
in_libvirt = False
|
in_libvirt = False
|
||||||
for h, D in domains.iteritems():
|
for h, D in domains.items():
|
||||||
if S.id in D:
|
if S.id in D:
|
||||||
in_libvirt = True
|
in_libvirt = True
|
||||||
break
|
break
|
||||||
@ -1256,9 +1256,9 @@ def print_all_tables(tenants=None,
|
|||||||
pt.align[C] = 'r'
|
pt.align[C] = 'r'
|
||||||
for C in ['in_nova']:
|
for C in ['in_nova']:
|
||||||
pt.align[C] = 'c'
|
pt.align[C] = 'c'
|
||||||
for host, D in sorted(domains.iteritems(),
|
for host, D in sorted(domains.items(),
|
||||||
key=lambda (k, v): (natural_keys(k))):
|
key=lambda (k, v): (natural_keys(k))):
|
||||||
for _, S in sorted(D.iteritems(),
|
for _, S in sorted(D.items(),
|
||||||
key=lambda (k, v): (v['name'])):
|
key=lambda (k, v): (v['name'])):
|
||||||
in_nova = True if S['uuid'] in servers else False
|
in_nova = True if S['uuid'] in servers else False
|
||||||
pt.add_row(
|
pt.add_row(
|
||||||
@ -1291,7 +1291,7 @@ def print_all_tables(tenants=None,
|
|||||||
'created_at',
|
'created_at',
|
||||||
])
|
])
|
||||||
pt.align = 'l'
|
pt.align = 'l'
|
||||||
for _, M in sorted(migrations.iteritems(),
|
for _, M in sorted(migrations.items(),
|
||||||
key=lambda (k, v): (k)):
|
key=lambda (k, v): (k)):
|
||||||
pt.add_row(
|
pt.add_row(
|
||||||
[M.instance_uuid,
|
[M.instance_uuid,
|
||||||
@ -1327,7 +1327,7 @@ def print_all_tables(tenants=None,
|
|||||||
for C in ['id', 'vcpus', 'ram', 'disk', 'ephemeral', 'swap',
|
for C in ['id', 'vcpus', 'ram', 'disk', 'ephemeral', 'swap',
|
||||||
'rxtx_factor']:
|
'rxtx_factor']:
|
||||||
pt.align[C] = 'r'
|
pt.align[C] = 'r'
|
||||||
for _, F in sorted(flavors.iteritems(),
|
for _, F in sorted(flavors.items(),
|
||||||
key=lambda (k, v): (k)):
|
key=lambda (k, v): (k)):
|
||||||
if F.id in flavors_in_use:
|
if F.id in flavors_in_use:
|
||||||
pt.add_row(
|
pt.add_row(
|
||||||
@ -1361,7 +1361,7 @@ def print_all_tables(tenants=None,
|
|||||||
pt.align = 'l'
|
pt.align = 'l'
|
||||||
for C in ['id', 'min_disk', 'min_ram', 'status']:
|
for C in ['id', 'min_disk', 'min_ram', 'status']:
|
||||||
pt.align[C] = 'r'
|
pt.align[C] = 'r'
|
||||||
for _, I in sorted(images.iteritems(),
|
for _, I in sorted(images.items(),
|
||||||
key=lambda (k, v): (k)):
|
key=lambda (k, v): (k)):
|
||||||
if I.id in images_in_use:
|
if I.id in images_in_use:
|
||||||
pt.add_row(
|
pt.add_row(
|
||||||
@ -1387,7 +1387,7 @@ def print_all_tables(tenants=None,
|
|||||||
'Metadata',
|
'Metadata',
|
||||||
])
|
])
|
||||||
pt.align = 'l'
|
pt.align = 'l'
|
||||||
for _, S in sorted(server_groups.iteritems(),
|
for _, S in sorted(server_groups.items(),
|
||||||
key=lambda (k, v): (k)):
|
key=lambda (k, v): (k)):
|
||||||
if S.id in server_groups_in_use:
|
if S.id in server_groups_in_use:
|
||||||
tenant = tenants[S.project_id].name
|
tenant = tenants[S.project_id].name
|
||||||
@ -1615,7 +1615,7 @@ def get_info_and_display(show=None):
|
|||||||
# translate fields into human-readable names
|
# translate fields into human-readable names
|
||||||
_translate_keys(images, convert)
|
_translate_keys(images, convert)
|
||||||
|
|
||||||
for I_id, I in images.iteritems():
|
for I_id, I in images.items():
|
||||||
meta = copy.deepcopy(I.properties)
|
meta = copy.deepcopy(I.properties)
|
||||||
I.properties = {}
|
I.properties = {}
|
||||||
for k, v in meta.items():
|
for k, v in meta.items():
|
||||||
@ -1708,7 +1708,7 @@ def get_info_and_display(show=None):
|
|||||||
|
|
||||||
# Get extra_specs
|
# Get extra_specs
|
||||||
extra_specs = {}
|
extra_specs = {}
|
||||||
for f_id, F in flavors.iteritems():
|
for f_id, F in flavors.items():
|
||||||
try:
|
try:
|
||||||
specs = F.get_keys()
|
specs = F.get_keys()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@ -1794,7 +1794,7 @@ def get_info_and_display(show=None):
|
|||||||
|
|
||||||
# Build up aggregate list per compute host
|
# Build up aggregate list per compute host
|
||||||
agg_h = {}
|
agg_h = {}
|
||||||
for H in hypervisors.keys():
|
for H in hypervisors:
|
||||||
agg_h[H] = {}
|
agg_h[H] = {}
|
||||||
for A in aggregates.values():
|
for A in aggregates.values():
|
||||||
for H in A.hosts:
|
for H in A.hosts:
|
||||||
@ -1837,7 +1837,7 @@ def get_info_and_display(show=None):
|
|||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
hosts = []
|
hosts = []
|
||||||
for h in hypervisors.keys():
|
for h in hypervisors:
|
||||||
hosts.append(h)
|
hosts.append(h)
|
||||||
|
|
||||||
# Launch tasks
|
# Launch tasks
|
||||||
@ -1851,7 +1851,7 @@ def get_info_and_display(show=None):
|
|||||||
# Reap aged workers that exceed hang timeout
|
# Reap aged workers that exceed hang timeout
|
||||||
now = time.time()
|
now = time.time()
|
||||||
reap = []
|
reap = []
|
||||||
for pid in active_pids.keys():
|
for pid in active_pids:
|
||||||
if pid == 0:
|
if pid == 0:
|
||||||
continue
|
continue
|
||||||
try:
|
try:
|
||||||
@ -1957,7 +1957,7 @@ def get_info_and_display(show=None):
|
|||||||
# We need libvirt topology information to make sense of cpusets.
|
# We need libvirt topology information to make sense of cpusets.
|
||||||
have_topology = True
|
have_topology = True
|
||||||
try:
|
try:
|
||||||
if len(topologies_idx[host].keys()) < 1:
|
if len(list(topologies_idx[host].keys())) < 1:
|
||||||
have_topology = False
|
have_topology = False
|
||||||
except:
|
except:
|
||||||
have_topology = False
|
have_topology = False
|
||||||
@ -2042,7 +2042,7 @@ def get_info_and_display(show=None):
|
|||||||
server_mismatch = False
|
server_mismatch = False
|
||||||
for S in servers.values():
|
for S in servers.values():
|
||||||
in_libvirt = False
|
in_libvirt = False
|
||||||
for h, D in domains.iteritems():
|
for h, D in domains.items():
|
||||||
if S.id in D and S.host == h:
|
if S.id in D and S.host == h:
|
||||||
in_libvirt = True
|
in_libvirt = True
|
||||||
break
|
break
|
||||||
@ -2053,8 +2053,8 @@ def get_info_and_display(show=None):
|
|||||||
% (S.id, S.instance_name, S.name, S.host))
|
% (S.id, S.instance_name, S.name, S.host))
|
||||||
|
|
||||||
# Detect mismatch where server is in libvirt but not in nova
|
# Detect mismatch where server is in libvirt but not in nova
|
||||||
for host, D in domains.iteritems():
|
for host, D in domains.items():
|
||||||
for k, S in D.iteritems():
|
for k, S in D.items():
|
||||||
in_nova = False
|
in_nova = False
|
||||||
uuid = S['uuid']
|
uuid = S['uuid']
|
||||||
if uuid in servers and servers[uuid].host == host:
|
if uuid in servers and servers[uuid].host == host:
|
||||||
|
@ -11,5 +11,5 @@ MOTD_PATH=${MOTD_PATH:-"/etc/motd.d"}
|
|||||||
MOTD_TAG=${MOTD_TAG:-"motd-update"}
|
MOTD_TAG=${MOTD_TAG:-"motd-update"}
|
||||||
|
|
||||||
if [ -d ${MOTD_PATH} ]; then
|
if [ -d ${MOTD_PATH} ]; then
|
||||||
run-parts --without-progname ${MOTD_PATH} 1>${MOTD_FILE}
|
run-parts ${MOTD_PATH} 1>${MOTD_FILE}
|
||||||
fi
|
fi
|
||||||
|
Loading…
Reference in New Issue
Block a user