Allow enabling vencrypt for VNC

This patchset allows enabling vencrypt for VNC, based on a
downstream patchset. [1]

Primary differences:
- script to generate pod-specific certs has been moved under
  values.conf.vencrypt.cert_init_sh to allow for it to be
  overridden if necessary
- leaves the creation of a (sub)issuer for vencrypt as
  outside the scope of this (and the nova) chart
- issuer to use to sign these certs configurable under:
  values.conf.vencrypt.issuer.kind
  values.conf.vencrypt.issuer.name
- added manifests.role_cert_manager to control creation of
  roles needed to create/update certs

1. https://github.com/vexxhost/atmosphere/pull/483

Change-Id: I955015874fed2b24570251c4cad01412bbab6045
This commit is contained in:
Mosher, Jaymes (jm616v) 2023-07-13 09:05:25 -06:00
parent bfcc66689e
commit f234218dba
7 changed files with 159 additions and 1 deletions

View File

@ -15,7 +15,7 @@ apiVersion: v1
appVersion: v1.0.0
description: OpenStack-Helm libvirt
name: libvirt
version: 0.1.22
version: 0.1.23
home: https://libvirt.org
sources:
- https://libvirt.org/git/?p=libvirt.git;a=summary

View File

@ -16,6 +16,14 @@ limitations under the License.
set -ex
# NOTE(mnaser): This will move the VNC certificates into the expected location.
if [ -f /tmp/vnc.crt ]; then
mkdir -p /etc/pki/libvirt-vnc
mv /tmp/vnc.key /etc/pki/libvirt-vnc/server-key.pem
mv /tmp/vnc.crt /etc/pki/libvirt-vnc/server-cert.pem
mv /tmp/vnc-ca.crt /etc/pki/libvirt-vnc/ca-cert.pem
fi
# TODO: We disable cgroup functionality for cgroup v2, we should fix this in the future
if $(stat -fc %T /sys/fs/cgroup/ | grep -q cgroup2fs); then
CGROUP_VERSION=v2

View File

@ -26,6 +26,10 @@ data:
{{- end }}
libvirt.sh: |
{{ tuple "bin/_libvirt.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
{{- if eq .Values.conf.qemu.vnc_tls "1" }}
cert-init.sh: |
{{ tpl .Values.conf.vencrypt.cert_init_sh . | indent 4 }}
{{- end }}
{{- if .Values.conf.ceph.enabled }}
ceph-keyring.sh: |
{{ tuple "bin/_ceph-keyring.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}

View File

@ -79,6 +79,43 @@ spec:
initContainers:
{{ tuple $envAll "pod_dependency" $mounts_libvirt_init | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
{{ dict "envAll" $envAll | include "helm-toolkit.snippets.kubernetes_apparmor_loader_init_container" | indent 8 }}
{{- if eq .Values.conf.qemu.vnc_tls "1" }}
- name: cert-init-vnc
{{ tuple $envAll "kubectl" | include "helm-toolkit.snippets.image" | indent 10 }}
{{ dict "envAll" $envAll "application" "libvirt" "container" "cert_init" | include "helm-toolkit.snippets.kubernetes_container_security_context" | indent 10 }}
command:
- /tmp/cert-init.sh
env:
- name: TYPE
value: vnc
- name: ISSUER_KIND
value: {{ .Values.conf.vencrypt.issuer.kind }}
- name: ISSUER_NAME
value: {{ .Values.conf.vencrypt.issuer.name }}
- name: POD_UID
valueFrom:
fieldRef:
fieldPath: metadata.uid
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
volumeMounts:
- name: pod-tmp
mountPath: /tmp
- name: libvirt-bin
mountPath: /tmp/cert-init.sh
subPath: cert-init.sh
readOnly: true
{{- end }}
{{- if .Values.conf.ceph.enabled }}
{{- if empty .Values.conf.ceph.cinder.keyring }}
- name: ceph-admin-keyring-placement

View File

@ -0,0 +1,53 @@
{{/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
{{- if .Values.manifests.role_cert_manager }}
{{- $serviceAccountName := "libvirt" }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ .Release.Name }}-cert-manager
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ .Release.Name }}-cert-manager
subjects:
- kind: ServiceAccount
name: {{ $serviceAccountName }}
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ .Release.Name }}-cert-manager
namespace: {{ .Release.Namespace }}
rules:
- apiGroups:
- cert-manager.io
verbs:
- get
- list
- create
resources:
- certificates
- apiGroups:
- ""
verbs:
- get
- patch
resources:
- secrets
{{- end -}}

View File

@ -31,6 +31,7 @@ images:
ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_focal_17.2.6-1-20230508'
dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0
image_repo_sync: docker.io/library/docker:17.07.0
kubectl: docker.io/bitnami/kubectl:latest
pull_policy: "IfNotPresent"
local_registry:
active: false
@ -115,11 +116,64 @@ conf:
log_level: "3"
log_outputs: "1:file:/var/log/libvirt/libvirtd.log"
qemu:
vnc_tls: "0"
vnc_tls_x509_verify: "0"
stdio_handler: "file"
user: "nova"
group: "kvm"
kubernetes:
cgroup: "kubepods.slice"
vencrypt:
# Issuer to use for the vencrypt certs.
issuer:
kind: ClusterIssuer
name: ca-clusterissuer
# Script is included here (vs in bin/) to allow overriding, in the case that
# communication happens over an IP other than the pod IP for some reason.
cert_init_sh: |
#!/bin/bash
set -x
# Script to create certs for each libvirt pod based on pod IP (by default).
cat <<EOF | kubectl apply -f -
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: ${POD_NAME}-${TYPE}
namespace: ${POD_NAMESPACE}
ownerReferences:
- apiVersion: v1
kind: Pod
name: ${POD_NAME}
uid: ${POD_UID}
spec:
secretName: ${POD_NAME}-${TYPE}
commonName: ${POD_IP}
usages:
- client auth
- server auth
dnsNames:
- ${HOSTNAME}
ipAddresses:
- ${POD_IP}
issuerRef:
kind: ${ISSUER_KIND}
name: ${ISSUER_NAME}
EOF
kubectl -n ${POD_NAMESPACE} wait --for=condition=Ready --timeout=300s \
certificate/${POD_NAME}-${TYPE}
# NOTE(mnaser): cert-manager does not clean-up the secrets when the certificate
# is deleted, so we should add an owner reference to the secret
# to ensure that it is cleaned up when the pod is deleted.
kubectl -n ${POD_NAMESPACE} patch secret ${POD_NAME}-${TYPE} \
--type=json -p='[{"op": "add", "path": "/metadata/ownerReferences", "value": [{"apiVersion": "v1", "kind": "Pod", "name": "'${POD_NAME}'", "uid": "'${POD_UID}'"}]}]'
kubectl -n ${POD_NAMESPACE} get secret ${POD_NAME}-${TYPE} -o jsonpath='{.data.tls\.crt}' | base64 -d > /tmp/${TYPE}.crt
kubectl -n ${POD_NAMESPACE} get secret ${POD_NAME}-${TYPE} -o jsonpath='{.data.tls\.key}' | base64 -d > /tmp/${TYPE}.key
kubectl -n ${POD_NAMESPACE} get secret ${POD_NAME}-${TYPE} -o jsonpath='{.data.ca\.crt}' | base64 -d > /tmp/${TYPE}-ca.crt
pod:
probes:
@ -262,6 +316,7 @@ manifests:
daemonset_libvirt: true
job_image_repo_sync: true
network_policy: false
role_cert_manager: false
secret_registry: true
secrets:

View File

@ -23,4 +23,5 @@ libvirt:
- 0.1.20 Update Ceph to 17.2.6
- 0.1.21 Disable libvirt cgroup functionality for cgroup-v2
- 0.1.22 Set targeted dependency of libvirt with ovn networking backend
- 0.1.23 Add support for enabling vencrypt
...