c47f0964d9
Test Plan:(On Debian) Kubernetes 1.23.1 package builds successfully All packages build successfully Image builds successfully Depends-On: https://review.opendev.org/c/starlingx/compile/+/825651 Story: 2009830 Task: 44638 Signed-off-by: Kaustubh Dhokte <kaustubh.dhokte@windriver.com> Change-Id: I57de1d998412e61bb928a9ce1930bc2a1c600282
150 lines
5.9 KiB
Diff
150 lines
5.9 KiB
Diff
From 2f16a9e2c8dc5995aad9a04636ff4f20cb7b51c7 Mon Sep 17 00:00:00 2001
|
|
From: Gleb Aronsky <gleb.aronsky@windriver.com>
|
|
Date: Tue, 25 Jan 2022 12:35:48 -0500
|
|
Subject: [PATCH] kubelet cpumanager infrastructure pods use system reserved
|
|
CPUs
|
|
|
|
This assigns system infrastructure pods to the "reserved" cpuset
|
|
to isolate them from the shared pool of CPUs.
|
|
|
|
Infrastructure pods include any pods that belong to the kube-system,
|
|
armada, cert-manager, vault, platform-deployment-manager, portieris,
|
|
notification or flux-helm namespaces.
|
|
|
|
The implementation is a bit simplistic, it is assumed that the
|
|
"reserved" cpuset is large enough to handle all infrastructure pods
|
|
CPU allocations.
|
|
|
|
This also prevents infrastucture pods from using Guaranteed resources.
|
|
|
|
Co-authored-by: Jim Gauld <james.gauld@windriver.com>
|
|
Signed-off-by: Gleb Aronsky <gleb.aronsky@windriver.com>
|
|
---
|
|
pkg/kubelet/cm/cpumanager/policy_static.go | 47 +++++++++++++++++--
|
|
.../cm/cpumanager/policy_static_test.go | 19 +++++++-
|
|
2 files changed, 61 insertions(+), 5 deletions(-)
|
|
|
|
diff --git a/pkg/kubelet/cm/cpumanager/policy_static.go b/pkg/kubelet/cm/cpumanager/policy_static.go
|
|
index 9697f4d4bb0..aeac7fdc8cb 100644
|
|
--- a/pkg/kubelet/cm/cpumanager/policy_static.go
|
|
+++ b/pkg/kubelet/cm/cpumanager/policy_static.go
|
|
@@ -53,6 +53,11 @@ func (e SMTAlignmentError) Type() string {
|
|
return ErrorSMTAlignment
|
|
}
|
|
|
|
+// Define namespaces used by platform infrastructure pods
|
|
+var infraNamespaces = [...]string{
|
|
+ "kube-system", "armada", "cert-manager", "platform-deployment-manager", "portieris", "vault", "notification", "flux-helm",
|
|
+}
|
|
+
|
|
// staticPolicy is a CPU manager policy that does not change CPU
|
|
// assignments for exclusively pinned guaranteed containers after the main
|
|
// container process starts.
|
|
@@ -121,10 +126,11 @@ func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reserv
|
|
klog.InfoS("Static policy created with configuration", "options", opts)
|
|
|
|
policy := &staticPolicy{
|
|
- topology: topology,
|
|
- affinity: affinity,
|
|
- cpusToReuse: make(map[string]cpuset.CPUSet),
|
|
- options: opts,
|
|
+ topology: topology,
|
|
+ affinity: affinity,
|
|
+ excludeReserved: excludeReserved,
|
|
+ cpusToReuse: make(map[string]cpuset.CPUSet),
|
|
+ options: opts,
|
|
}
|
|
|
|
allCPUs := topology.CPUDetails.CPUs()
|
|
@@ -263,6 +269,25 @@ func (p *staticPolicy) updateCPUsToReuse(pod *v1.Pod, container *v1.Container, c
|
|
}
|
|
|
|
func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Container) error {
|
|
+ // Process infra pods before guaranteed pods
|
|
+ if isKubeInfra(pod) {
|
|
+ // Container belongs in reserved pool.
|
|
+ // We don't want to fall through to the p.guaranteedCPUs() clause below so return either nil or error.
|
|
+ if _, ok := s.GetCPUSet(string(pod.UID), container.Name); ok {
|
|
+ klog.Infof("[cpumanager] static policy: reserved container already present in state, skipping (namespace: %s, pod UID: %s, pod: %s, container: %s)", pod.Namespace, string(pod.UID), pod.Name, container.Name)
|
|
+ return nil
|
|
+ }
|
|
+
|
|
+ cpuset := p.reserved
|
|
+ if cpuset.IsEmpty() {
|
|
+ // If this happens then someone messed up.
|
|
+ return fmt.Errorf("[cpumanager] static policy: reserved container unable to allocate cpus (namespace: %s, pod UID: %s, pod: %s, container: %s); cpuset=%v, reserved:%v", pod.Namespace, string(pod.UID), pod.Name, container.Name, cpuset, p.reserved)
|
|
+ }
|
|
+ s.SetCPUSet(string(pod.UID), container.Name, cpuset)
|
|
+ klog.Infof("[cpumanager] static policy: reserved: AddContainer (namespace: %s, pod UID: %s, pod: %s, container: %s); cpuset=%v", pod.Namespace, string(pod.UID), pod.Name, container.Name, cpuset)
|
|
+ return nil
|
|
+ }
|
|
+
|
|
if numCPUs := p.guaranteedCPUs(pod, container); numCPUs != 0 {
|
|
klog.InfoS("Static policy: Allocate", "pod", klog.KObj(pod), "containerName", container.Name)
|
|
// container belongs in an exclusively allocated pool
|
|
@@ -367,6 +392,10 @@ func (p *staticPolicy) guaranteedCPUs(pod *v1.Pod, container *v1.Container) int
|
|
if cpuQuantity.Value()*1000 != cpuQuantity.MilliValue() {
|
|
return 0
|
|
}
|
|
+ // Infrastructure pods use reserved CPUs even if they're in the Guaranteed QoS class
|
|
+ if isKubeInfra(pod) {
|
|
+ return 0
|
|
+ }
|
|
// Safe downcast to do for all systems with < 2.1 billion CPUs.
|
|
// Per the language spec, `int` is guaranteed to be at least 32 bits wide.
|
|
// https://golang.org/ref/spec#Numeric_types
|
|
@@ -580,3 +609,13 @@ func (p *staticPolicy) generateCPUTopologyHints(availableCPUs cpuset.CPUSet, reu
|
|
|
|
return hints
|
|
}
|
|
+
|
|
+// check if a given pod is in a platform infrastructure namespace
|
|
+func isKubeInfra(pod *v1.Pod) bool {
|
|
+ for _, namespace := range infraNamespaces {
|
|
+ if namespace == pod.Namespace {
|
|
+ return true
|
|
+ }
|
|
+ }
|
|
+ return false
|
|
+}
|
|
diff --git a/pkg/kubelet/cm/cpumanager/policy_static_test.go b/pkg/kubelet/cm/cpumanager/policy_static_test.go
|
|
index 80bd04a1f92..34c5a23c553 100644
|
|
--- a/pkg/kubelet/cm/cpumanager/policy_static_test.go
|
|
+++ b/pkg/kubelet/cm/cpumanager/policy_static_test.go
|
|
@@ -830,7 +830,8 @@ func TestStaticPolicyStartWithResvList(t *testing.T) {
|
|
}
|
|
|
|
func TestStaticPolicyAddWithResvList(t *testing.T) {
|
|
-
|
|
+ infraPod := makePod("fakePod", "fakeContainer2", "200m", "200m")
|
|
+ infraPod.Namespace = "kube-system"
|
|
testCases := []staticPolicyTestWithResvList{
|
|
{
|
|
description: "GuPodSingleCore, SingleSocketHT, ExpectError",
|
|
@@ -872,6 +873,22 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
|
|
expCPUAlloc: true,
|
|
expCSet: cpuset.NewCPUSet(4, 5),
|
|
},
|
|
+ {
|
|
+ description: "InfraPod, SingleSocketHT, ExpectAllocReserved",
|
|
+ topo: topoSingleSocketHT,
|
|
+ numReservedCPUs: 2,
|
|
+ reserved: cpuset.NewCPUSet(0, 1),
|
|
+ stAssignments: state.ContainerCPUAssignments{
|
|
+ "fakePod": map[string]cpuset.CPUSet{
|
|
+ "fakeContainer100": cpuset.NewCPUSet(2, 3, 6, 7),
|
|
+ },
|
|
+ },
|
|
+ stDefaultCPUSet: cpuset.NewCPUSet(4, 5),
|
|
+ pod: infraPod,
|
|
+ expErr: nil,
|
|
+ expCPUAlloc: true,
|
|
+ expCSet: cpuset.NewCPUSet(0, 1),
|
|
+ },
|
|
}
|
|
|
|
testExcl := true
|
|
--
|
|
2.25.1
|
|
|