Update node-feature-discovery helm chart to current version

The existing helm chart for "node feature discovery" seems to be
out of date and doesn't run properly.  Updating it to the current
version (v0.8.1) seems to fix things.

The upstream code now has a helm chart, so I've aligned with it
to simplify porting in the future.

I made one change from the upstream code, I set the chart version to
0.8.1 to match the git tag and chart appVersion.  This ensures that the
chart tarball is versioned appropriately.

At some point we could modify the build system for this chart to
use a "git repo + patches" approach rather than duplicating the
chart like I've done here.  This is out of scope for this commit
however.

Closes-Bug: 1927289
Signed-off-by: Chris Friesen <chris.friesen@windriver.com>
Change-Id: I8d9b6830a3852ae269b70c0e9d42ea8abf4e9458
This commit is contained in:
Chris Friesen 2021-05-05 18:19:27 -06:00
parent 7e5965685d
commit 573c5acfe8
14 changed files with 583 additions and 169 deletions

View File

@ -1,15 +1,15 @@
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
description: Node Feature Discovery
home: https://github.com/kubernetes-sigs/node-feature-discovery
maintainers:
- name: node-feature-discovery Authors
apiVersion: v2
appVersion: v0.8.1
description: |
Detects hardware features available on each node in a Kubernetes cluster, and advertises
those features using node labels.
name: node-feature-discovery
sources:
- https://github.com/kubernetes-sigs/node-feature-discovery
version: 0.3.0
home: https://github.com/kubernetes-sigs/node-feature-discovery
keywords:
- feature-discovery
- feature-detection
- node-labels
type: application
version: 0.8.1

View File

@ -1,17 +1,21 @@
kubernetes-sigs/node-feature-discovery
======================================
This chart runs v0.3.0 of the node-feature-discovery as implemented
at https://github.com/kubernetes-sigs/node-feature-discovery
This chart is taken from the "deployment" directory of version
v0.8.1 of the node-feature-discovery package as implemented at
https://github.com/kubernetes-sigs/node-feature-discovery
I made one change from the upstream code, I set the chart version to
0.8.1 to match the git tag and chart appVersion. This ensures that the
chart tarball is versioned appropriately.
This software enables node feature discovery for Kubernetes. It detects
hardware features available on each node in a Kubernetes cluster, and
advertises those features using node labels.
This chart uses a DaemonSet to spawn a pod on each node in the cluster
to do the actual work.
A DaemonSet spawns a pod on each node in the cluster to scan the host,
and feeds its information to a single "manager" pod running on a
controller node.
The two files under the templates directory are taken directly from
v0.3.0 at the link above. The Docker image specified is the one
published by the upstream team.
The Docker image specified is the one published by the upstream team.

View File

@ -0,0 +1,63 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "node-feature-discovery.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "node-feature-discovery.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "node-feature-discovery.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "node-feature-discovery.labels" -}}
helm.sh/chart: {{ include "node-feature-discovery.chart" . }}
{{ include "node-feature-discovery.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Selector labels
*/}}
{{- define "node-feature-discovery.selectorLabels" -}}
app.kubernetes.io/name: {{ include "node-feature-discovery.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "node-feature-discovery.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "node-feature-discovery.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,21 @@
{{- if .Values.rbac.create }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "node-feature-discovery.fullname" . }}
labels:
{{- include "node-feature-discovery.labels" . | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
- nodes
# when using command line flag --resource-labels to create extended resources
# you will need to uncomment "- nodes/status"
# - nodes/status
verbs:
- get
- patch
- update
- list
{{- end }}

View File

@ -0,0 +1,16 @@
{{- if .Values.rbac.create }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "node-feature-discovery.fullname" . }}
labels:
{{- include "node-feature-discovery.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "node-feature-discovery.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ include "node-feature-discovery.serviceAccountName" . }}
namespace: {{ $.Release.Namespace }}
{{- end }}

View File

@ -0,0 +1,86 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "node-feature-discovery.fullname" . }}-master
labels:
{{- include "node-feature-discovery.labels" . | nindent 4 }}
role: master
spec:
replicas: {{ .Values.master.replicaCount }}
selector:
matchLabels:
{{- include "node-feature-discovery.selectorLabels" . | nindent 6 }}
role: master
template:
metadata:
labels:
{{- include "node-feature-discovery.selectorLabels" . | nindent 8 }}
role: master
annotations:
{{- toYaml .Values.master.annotations | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "node-feature-discovery.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.master.podSecurityContext | nindent 8 }}
containers:
- name: master
securityContext:
{{- toYaml .Values.master.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- containerPort: 8080
name: grpc
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
command:
- "nfd-master"
resources:
{{- toYaml .Values.master.resources | nindent 12 }}
args:
{{- if .Values.master.instance | empty | not }}
- "--instance={{ .Values.master.instance }}"
{{- end }}
## Enable TLS authentication
## The example below assumes having the root certificate named ca.crt stored in
## a ConfigMap named nfd-ca-cert, and, the TLS authentication credentials stored
## in a TLS Secret named nfd-master-cert.
## Additional hardening can be enabled by specifying --verify-node-name in
## args, in which case every nfd-worker requires a individual node-specific
## TLS certificate.
# - "--ca-file=/etc/kubernetes/node-feature-discovery/trust/ca.crt"
# - "--key-file=/etc/kubernetes/node-feature-discovery/certs/tls.key"
# - "--cert-file=/etc/kubernetes/node-feature-discovery/certs/tls.crt"
# volumeMounts:
# - name: nfd-ca-cert
# mountPath: "/etc/kubernetes/node-feature-discovery/trust"
# readOnly: true
# - name: nfd-master-cert
# mountPath: "/etc/kubernetes/node-feature-discovery/certs"
# readOnly: true
# volumes:
# - name: nfd-ca-cert
# configMap:
# name: nfd-ca-cert
# - name: nfd-master-cert
# secret:
# secretName: nfd-master-cert
{{- with .Values.master.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.master.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.master.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@ -1,14 +0,0 @@
{{/*
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
{{ if ne .Values.namespace "default" }}
apiVersion: v1
kind: Namespace
metadata:
name: {{ .Values.namespace }}
{{ end }}

View File

@ -0,0 +1,9 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Values.worker.configmapName }}
labels:
{{- include "node-feature-discovery.labels" . | nindent 4 }}
data:
nfd-worker.conf: |-
{{- .Values.worker.config | nindent 4 }}

View File

@ -1,73 +0,0 @@
{{/*
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
{
"apiVersion": "apps/v1",
"kind": "DaemonSet",
"metadata": {
"labels": {
"app": {{ .Values.app_label }}
},
"namespace": {{ .Values.namespace }},
"name": {{ .Release.Name }}
},
"spec": {
"selector": {
"matchLabels": {
"app": {{ .Values.app_label }}
}
},
"template": {
"metadata": {
"labels": {
"app": {{ .Values.app_label }}
}
},
"spec": {
{{ if and .Values.node_selector_key .Values.node_selector_value }}
"nodeSelector": {
{{ .Values.node_selector_key }}: {{ .Values.node_selector_value }}
},
{{ end }}
"hostNetwork": true,
"serviceAccount": {{ .Release.Name }},
"containers": [
{
"env": [
{
"name": "NODE_NAME",
"valueFrom": {
"fieldRef": {
"fieldPath": "spec.nodeName"
}
}
}
],
"image": "quay.io/kubernetes_incubator/node-feature-discovery:v0.3.0",
"name": {{ .Release.Name }},
"args": ["--sleep-interval={{ .Values.scan_interval }}s"],
"volumeMounts": [
{
"name": "host-sys",
"mountPath": "/host-sys"
}
]
}
],
"volumes": [
{
"name": "host-sys",
"hostPath": {
"path": "/sys"
}
}
]
}
}
}
}

View File

@ -1,41 +0,0 @@
{{/*
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .Release.Name }}
namespace: {{ .Values.namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ .Release.Name }}
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
verbs:
- get
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ .Release.Name }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ .Release.Name }}
subjects:
- kind: ServiceAccount
name: {{ .Release.Name }}
namespace: {{ .Values.namespace }}

View File

@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "node-feature-discovery.fullname" . }}-master
labels:
{{- include "node-feature-discovery.labels" . | nindent 4 }}
role: master
spec:
type: {{ .Values.master.service.type }}
ports:
- port: {{ .Values.master.service.port }}
targetPort: grpc
protocol: TCP
name: grpc
selector:
{{- include "node-feature-discovery.selectorLabels" . | nindent 4 }}

View File

@ -0,0 +1,12 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "node-feature-discovery.serviceAccountName" . }}
labels:
{{- include "node-feature-discovery.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end -}}

View File

@ -0,0 +1,119 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: {{ include "node-feature-discovery.fullname" . }}-worker
labels:
{{- include "node-feature-discovery.labels" . | nindent 4 }}
role: worker
spec:
selector:
matchLabels:
{{- include "node-feature-discovery.selectorLabels" . | nindent 6 }}
role: worker
template:
metadata:
labels:
{{- include "node-feature-discovery.selectorLabels" . | nindent 8 }}
role: worker
annotations:
{{- toYaml .Values.worker.annotations | nindent 8 }}
spec:
dnsPolicy: ClusterFirstWithHostNet
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
securityContext:
{{- toYaml .Values.worker.podSecurityContext | nindent 8 }}
containers:
- name: worker
securityContext:
{{- toYaml .Values.worker.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
resources:
{{- toYaml .Values.worker.resources | nindent 12 }}
command:
- "nfd-worker"
args:
- "--sleep-interval=60s"
- "--server={{ include "node-feature-discovery.fullname" . }}-master:{{ .Values.master.service.port }}"
## Enable TLS authentication (1/3)
## The example below assumes having the root certificate named ca.crt stored in
## a ConfigMap named nfd-ca-cert, and, the TLS authentication credentials stored
## in a TLS Secret named nfd-worker-cert
# - "--ca-file=/etc/kubernetes/node-feature-discovery/trust/ca.crt"
# - "--key-file=/etc/kubernetes/node-feature-discovery/certs/tls.key"
# - "--cert-file=/etc/kubernetes/node-feature-discovery/certs/tls.crt"
volumeMounts:
- name: host-boot
mountPath: "/host-boot"
readOnly: true
- name: host-os-release
mountPath: "/host-etc/os-release"
readOnly: true
- name: host-sys
mountPath: "/host-sys"
readOnly: true
- name: source-d
mountPath: "/etc/kubernetes/node-feature-discovery/source.d/"
readOnly: true
- name: features-d
mountPath: "/etc/kubernetes/node-feature-discovery/features.d/"
readOnly: true
- name: nfd-worker-conf
mountPath: "/etc/kubernetes/node-feature-discovery"
readOnly: true
## Enable TLS authentication (2/3)
# - name: nfd-ca-cert
# mountPath: "/etc/kubernetes/node-feature-discovery/trust"
# readOnly: true
# - name: nfd-worker-cert
# mountPath: "/etc/kubernetes/node-feature-discovery/certs"
# readOnly: true
volumes:
- name: host-boot
hostPath:
path: "/boot"
- name: host-os-release
hostPath:
path: "/etc/os-release"
- name: host-sys
hostPath:
path: "/sys"
- name: source-d
hostPath:
path: "/etc/kubernetes/node-feature-discovery/source.d/"
- name: features-d
hostPath:
path: "/etc/kubernetes/node-feature-discovery/features.d/"
- name: nfd-worker-conf
configMap:
name: {{ .Values.worker.configmapName }}
items:
- key: nfd-worker.conf
path: nfd-worker.conf
## Enable TLS authentication (3/3)
# - name: nfd-ca-cert
# configMap:
# name: nfd-ca-cert
# - name: nfd-worker-cert
# secret:
# secretName: nfd-worker-cert
{{- with .Values.worker.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.worker.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.worker.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@ -1,27 +1,223 @@
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
image:
repository: k8s.gcr.io/nfd/node-feature-discovery
# This should be set to 'IfNotPresent' for released version
pullPolicy: IfNotPresent
# tag, if defined will use the given image tag, else Chart.AppVersion will be used
# tag
imagePullSecrets: []
# Default values for node-feature-discovery.
# This is a YAML-formatted file.
# Declare name/value pairs to be passed into your templates.
# name: value
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name:
# namespace to use for chart resources. Must be specified.
namespace: default
nameOverride: ""
fullnameOverride: ""
# label for the daemonset to find its pods
app_label: node-feature-discovery
master:
instance:
replicaCount: 1
# docker image to use for the pods
image: quay.io/kubernetes_incubator/node-feature-discovery:v0.3.0
podSecurityContext: {}
# fsGroup: 2000
# interval (in secs) to scan the node features
scan_interval: 60
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop: [ "ALL" ]
readOnlyRootFilesystem: true
runAsNonRoot: true
# runAsUser: 1000
# key/value pair to match against node labels to select which nodes
# should run the node feature discovery. Defaults to all nodes.
node_selector_key:
node_selector_value:
service:
type: ClusterIP
port: 8080
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations:
- key: "node-role.kubernetes.io/master"
operator: "Equal"
value: ""
effect: "NoSchedule"
annotations: {}
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
preference:
matchExpressions:
- key: "node-role.kubernetes.io/master"
operator: In
values: [""]
worker:
configmapName: nfd-worker-conf
config: |### <NFD-WORKER-CONF-START-DO-NOT-REMOVE>
#core:
# labelWhiteList:
# noPublish: false
# sleepInterval: 60s
# sources: [all]
# klog:
# addDirHeader: false
# alsologtostderr: false
# logBacktraceAt:
# logtostderr: true
# skipHeaders: false
# stderrthreshold: 2
# v: 0
# vmodule:
## NOTE: the following options are not dynamically run-time configurable
## and require a nfd-worker restart to take effect after being changed
# logDir:
# logFile:
# logFileMaxSize: 1800
# skipLogHeaders: false
#sources:
# cpu:
# cpuid:
## NOTE: whitelist has priority over blacklist
# attributeBlacklist:
# - "BMI1"
# - "BMI2"
# - "CLMUL"
# - "CMOV"
# - "CX16"
# - "ERMS"
# - "F16C"
# - "HTT"
# - "LZCNT"
# - "MMX"
# - "MMXEXT"
# - "NX"
# - "POPCNT"
# - "RDRAND"
# - "RDSEED"
# - "RDTSCP"
# - "SGX"
# - "SSE"
# - "SSE2"
# - "SSE3"
# - "SSE4.1"
# - "SSE4.2"
# - "SSSE3"
# attributeWhitelist:
# kernel:
# kconfigFile: "/path/to/kconfig"
# configOpts:
# - "NO_HZ"
# - "X86"
# - "DMI"
# pci:
# deviceClassWhitelist:
# - "0200"
# - "03"
# - "12"
# deviceLabelFields:
# - "class"
# - "vendor"
# - "device"
# - "subsystem_vendor"
# - "subsystem_device"
# usb:
# deviceClassWhitelist:
# - "0e"
# - "ef"
# - "fe"
# - "ff"
# deviceLabelFields:
# - "class"
# - "vendor"
# - "device"
# custom:
# - name: "my.kernel.feature"
# matchOn:
# - loadedKMod: ["example_kmod1", "example_kmod2"]
# - name: "my.pci.feature"
# matchOn:
# - pciId:
# class: ["0200"]
# vendor: ["15b3"]
# device: ["1014", "1017"]
# - pciId :
# vendor: ["8086"]
# device: ["1000", "1100"]
# - name: "my.usb.feature"
# matchOn:
# - usbId:
# class: ["ff"]
# vendor: ["03e7"]
# device: ["2485"]
# - usbId:
# class: ["fe"]
# vendor: ["1a6e"]
# device: ["089a"]
# - name: "my.combined.feature"
# matchOn:
# - pciId:
# vendor: ["15b3"]
# device: ["1014", "1017"]
# loadedKMod : ["vendor_kmod1", "vendor_kmod2"]
# - name: "feature.by.nodename"
# value: customValue
# matchOn:
# - nodename: ["worker-0", "my-.*-node"]
### <NFD-WORKER-CONF-END-DO-NOT-REMOVE>
podSecurityContext: {}
# fsGroup: 2000
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop: [ "ALL" ]
readOnlyRootFilesystem: true
runAsNonRoot: true
# runAsUser: 1000
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
annotations: {}
## RBAC parameteres
## https://kubernetes.io/docs/reference/access-authn-authz/rbac/
##
rbac:
create: true
serviceAccountName:
## Annotations for the Service Account
##
serviceAccountAnnotations: {}