79db1f3eed
Changes for adding Kubernetes 1.23.1 in StarlingX, including build environment updates. The package builds successfully. Built and installed an iso with K8s 1.23.1 on AIO-SX. Depends-On: https://review.opendev.org/c/starlingx/compile/+/825651 Story: 2009830 Task: 44424 Change-Id: I3e2b793d7b88057fc597b2445bddd137bb2b4fcf Signed-off-by: Gleb Aronsky <gleb.aronsky@windriver.com>
288 lines
13 KiB
Diff
288 lines
13 KiB
Diff
From cde296d121955a9ee4f148f775d73bb746a17310 Mon Sep 17 00:00:00 2001
|
|
From: Gleb Aronsky <gleb.aronsky@windriver.com>
|
|
Date: Fri, 21 Jan 2022 17:03:57 -0500
|
|
Subject: kubelet cpumanager disable CFS quota throttling for
|
|
Guaranteed pods
|
|
|
|
This disables CFS CPU quota to avoid performance degradation due to
|
|
Linux kernel CFS quota implementation. Note that 4.18 kernel attempts
|
|
to solve the CFS throttling problem, but there are reports that it is
|
|
not completely effective.
|
|
|
|
This disables CFS quota throttling for Guaranteed pods for both
|
|
parent and container cgroups by writing -1 to cgroup cpu.cfs_quota_us.
|
|
Disabling has a dramatic latency improvement for HTTP response times.
|
|
|
|
This patch is refactored in 1.22.5 due to new internal_container_lifecycle
|
|
framework. We leverage the same mechanism to set Linux resources as:
|
|
cpu manager: specify the container CPU set during the creation
|
|
|
|
Co-authored-by: Jim Gauld <james.gauld@windriver.com>
|
|
Signed-off-by: Gleb Aronsky <gleb.aronsky@windriver.com>
|
|
|
|
---
|
|
pkg/kubelet/cm/cpumanager/cpu_manager.go | 7 +++
|
|
pkg/kubelet/cm/cpumanager/fake_cpu_manager.go | 12 +++--
|
|
pkg/kubelet/cm/helpers_linux.go | 12 ++++-
|
|
pkg/kubelet/cm/helpers_linux_test.go | 45 ++++++++++---------
|
|
.../cm/internal_container_lifecycle_linux.go | 11 ++++-
|
|
5 files changed, 61 insertions(+), 26 deletions(-)
|
|
|
|
diff --git a/pkg/kubelet/cm/cpumanager/cpu_manager.go b/pkg/kubelet/cm/cpumanager/cpu_manager.go
|
|
index a876d413870..6ad289336ea 100644
|
|
--- a/pkg/kubelet/cm/cpumanager/cpu_manager.go
|
|
+++ b/pkg/kubelet/cm/cpumanager/cpu_manager.go
|
|
@@ -72,6 +72,9 @@ type Manager interface {
|
|
// State returns a read-only interface to the internal CPU manager state.
|
|
State() state.Reader
|
|
|
|
+ // GetCPUPolicy returns the assigned CPU manager policy
|
|
+ GetCPUPolicy() string
|
|
+
|
|
// GetTopologyHints implements the topologymanager.HintProvider Interface
|
|
// and is consulted to achieve NUMA aware resource alignment among this
|
|
// and other resource controllers.
|
|
@@ -314,6 +317,10 @@ func (m *manager) State() state.Reader {
|
|
return m.state
|
|
}
|
|
|
|
+func (m *manager) GetCPUPolicy() string {
|
|
+ return m.policy.Name()
|
|
+}
|
|
+
|
|
func (m *manager) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint {
|
|
// The pod is during the admission phase. We need to save the pod to avoid it
|
|
// being cleaned before the admission ended
|
|
diff --git a/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go b/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go
|
|
index 93369705135..8082bbeebcb 100644
|
|
--- a/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go
|
|
+++ b/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go
|
|
@@ -17,7 +17,7 @@ limitations under the License.
|
|
package cpumanager
|
|
|
|
import (
|
|
- "k8s.io/api/core/v1"
|
|
+ v1 "k8s.io/api/core/v1"
|
|
"k8s.io/klog/v2"
|
|
"k8s.io/kubernetes/pkg/kubelet/cm/containermap"
|
|
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
|
|
@@ -28,7 +28,8 @@ import (
|
|
)
|
|
|
|
type fakeManager struct {
|
|
- state state.State
|
|
+ policy Policy
|
|
+ state state.State
|
|
}
|
|
|
|
func (m *fakeManager) Start(activePods ActivePodsFunc, sourcesReady config.SourcesReady, podStatusProvider status.PodStatusProvider, containerRuntime runtimeService, initialContainers containermap.ContainerMap) error {
|
|
@@ -70,6 +71,10 @@ func (m *fakeManager) State() state.Reader {
|
|
return m.state
|
|
}
|
|
|
|
+func (m *fakeManager) GetCPUPolicy() string {
|
|
+ return m.policy.Name()
|
|
+}
|
|
+
|
|
func (m *fakeManager) GetExclusiveCPUs(podUID, containerName string) cpuset.CPUSet {
|
|
klog.InfoS("GetExclusiveCPUs", "podUID", podUID, "containerName", containerName)
|
|
return cpuset.CPUSet{}
|
|
@@ -88,6 +93,7 @@ func (m *fakeManager) GetCPUAffinity(podUID, containerName string) cpuset.CPUSet
|
|
// NewFakeManager creates empty/fake cpu manager
|
|
func NewFakeManager() Manager {
|
|
return &fakeManager{
|
|
- state: state.NewMemoryState(),
|
|
+ policy: &nonePolicy{},
|
|
+ state: state.NewMemoryState(),
|
|
}
|
|
}
|
|
diff --git a/pkg/kubelet/cm/helpers_linux.go b/pkg/kubelet/cm/helpers_linux.go
|
|
index 83c80501f5d..6e8f232e4d6 100644
|
|
--- a/pkg/kubelet/cm/helpers_linux.go
|
|
+++ b/pkg/kubelet/cm/helpers_linux.go
|
|
@@ -25,7 +25,7 @@ import (
|
|
|
|
libcontainercgroups "github.com/opencontainers/runc/libcontainer/cgroups"
|
|
|
|
- "k8s.io/api/core/v1"
|
|
+ v1 "k8s.io/api/core/v1"
|
|
"k8s.io/apimachinery/pkg/types"
|
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
|
"k8s.io/kubernetes/pkg/api/v1/resource"
|
|
@@ -182,6 +182,16 @@ func ResourceConfigForPod(pod *v1.Pod, enforceCPULimits bool, cpuPeriod uint64,
|
|
// build the result
|
|
result := &ResourceConfig{}
|
|
if qosClass == v1.PodQOSGuaranteed {
|
|
+ // Disable CFS CPU quota to avoid performance degradation due to
|
|
+ // Linux kernel CFS throttle implementation.
|
|
+ // NOTE: 4.18 kernel attempts to solve CFS throttling problem,
|
|
+ // but there are reports that it is not completely effective.
|
|
+ // This will configure cgroup CFS parameters at pod level:
|
|
+ // /sys/fs/cgroup/cpu/k8s-infra/kubepods/<pod>/cpu.cfs_quota_us
|
|
+ // /sys/fs/cgroup/cpu/k8s-infra/kubepods/<pod>/cpu.cfs_period_us
|
|
+ cpuQuota = int64(-1)
|
|
+ cpuPeriod = uint64(100000)
|
|
+
|
|
result.CpuShares = &cpuShares
|
|
result.CpuQuota = &cpuQuota
|
|
result.CpuPeriod = &cpuPeriod
|
|
diff --git a/pkg/kubelet/cm/helpers_linux_test.go b/pkg/kubelet/cm/helpers_linux_test.go
|
|
index 101b21e682a..91c5782e3b4 100644
|
|
--- a/pkg/kubelet/cm/helpers_linux_test.go
|
|
+++ b/pkg/kubelet/cm/helpers_linux_test.go
|
|
@@ -25,7 +25,7 @@ import (
|
|
"testing"
|
|
"time"
|
|
|
|
- "k8s.io/api/core/v1"
|
|
+ v1 "k8s.io/api/core/v1"
|
|
"k8s.io/apimachinery/pkg/api/resource"
|
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
|
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
|
@@ -64,8 +64,9 @@ func TestResourceConfigForPod(t *testing.T) {
|
|
burstablePartialShares := MilliCPUToShares(200)
|
|
burstableQuota := MilliCPUToQuota(200, int64(defaultQuotaPeriod))
|
|
guaranteedShares := MilliCPUToShares(100)
|
|
- guaranteedQuota := MilliCPUToQuota(100, int64(defaultQuotaPeriod))
|
|
- guaranteedTunedQuota := MilliCPUToQuota(100, int64(tunedQuotaPeriod))
|
|
+ guaranteedQuotaPeriod := uint64(100000)
|
|
+ guaranteedQuota := int64(-1)
|
|
+ guaranteedTunedQuota := int64(-1)
|
|
memoryQuantity = resource.MustParse("100Mi")
|
|
cpuNoLimit := int64(-1)
|
|
guaranteedMemory := memoryQuantity.Value()
|
|
@@ -204,8 +205,8 @@ func TestResourceConfigForPod(t *testing.T) {
|
|
},
|
|
},
|
|
enforceCPULimits: true,
|
|
- quotaPeriod: defaultQuotaPeriod,
|
|
- expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedQuota, CpuPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory},
|
|
+ quotaPeriod: guaranteedQuotaPeriod,
|
|
+ expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedQuota, CpuPeriod: &guaranteedQuotaPeriod, Memory: &guaranteedMemory},
|
|
},
|
|
"guaranteed-no-cpu-enforcement": {
|
|
pod: &v1.Pod{
|
|
@@ -218,8 +219,8 @@ func TestResourceConfigForPod(t *testing.T) {
|
|
},
|
|
},
|
|
enforceCPULimits: false,
|
|
- quotaPeriod: defaultQuotaPeriod,
|
|
- expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory},
|
|
+ quotaPeriod: guaranteedQuotaPeriod,
|
|
+ expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &guarenteedQuotaPeriod, Memory: &guaranteedMemory},
|
|
},
|
|
"guaranteed-with-tuned-quota": {
|
|
pod: &v1.Pod{
|
|
@@ -232,8 +233,8 @@ func TestResourceConfigForPod(t *testing.T) {
|
|
},
|
|
},
|
|
enforceCPULimits: true,
|
|
- quotaPeriod: tunedQuotaPeriod,
|
|
- expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedTunedQuota, CpuPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory},
|
|
+ quotaPeriod: guaranteedQuotaPeriod,
|
|
+ expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedTunedQuota, CpuPeriod: &guaranteedQuotaPeriod, Memory: &guaranteedMemory},
|
|
},
|
|
"guaranteed-no-cpu-enforcement-with-tuned-quota": {
|
|
pod: &v1.Pod{
|
|
@@ -246,8 +247,8 @@ func TestResourceConfigForPod(t *testing.T) {
|
|
},
|
|
},
|
|
enforceCPULimits: false,
|
|
- quotaPeriod: tunedQuotaPeriod,
|
|
- expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory},
|
|
+ quotaPeriod: guaranteedQuotaPeriod,
|
|
+ expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &guaranteedQuotaPeriod, Memory: &guaranteedMemory},
|
|
},
|
|
"burstable-partial-limits-with-init-containers": {
|
|
pod: &v1.Pod{
|
|
@@ -309,8 +310,10 @@ func TestResourceConfigForPodWithCustomCPUCFSQuotaPeriod(t *testing.T) {
|
|
burstablePartialShares := MilliCPUToShares(200)
|
|
burstableQuota := MilliCPUToQuota(200, int64(defaultQuotaPeriod))
|
|
guaranteedShares := MilliCPUToShares(100)
|
|
- guaranteedQuota := MilliCPUToQuota(100, int64(defaultQuotaPeriod))
|
|
- guaranteedTunedQuota := MilliCPUToQuota(100, int64(tunedQuotaPeriod))
|
|
+ guaranteedQuotaPeriod := uint64(100000)
|
|
+ guaranteedQuota := int64(-1)
|
|
+ guaranteedTunedQuota := int64(-1)
|
|
+
|
|
memoryQuantity = resource.MustParse("100Mi")
|
|
cpuNoLimit := int64(-1)
|
|
guaranteedMemory := memoryQuantity.Value()
|
|
@@ -449,8 +452,8 @@ func TestResourceConfigForPodWithCustomCPUCFSQuotaPeriod(t *testing.T) {
|
|
},
|
|
},
|
|
enforceCPULimits: true,
|
|
- quotaPeriod: defaultQuotaPeriod,
|
|
- expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedQuota, CpuPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory},
|
|
+ quotaPeriod: guaranteedQuotaPeriod,
|
|
+ expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedQuota, CpuPeriod: &guaranteedQuotaPeriod, Memory: &guaranteedMemory},
|
|
},
|
|
"guaranteed-no-cpu-enforcement": {
|
|
pod: &v1.Pod{
|
|
@@ -463,8 +466,8 @@ func TestResourceConfigForPodWithCustomCPUCFSQuotaPeriod(t *testing.T) {
|
|
},
|
|
},
|
|
enforceCPULimits: false,
|
|
- quotaPeriod: defaultQuotaPeriod,
|
|
- expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory},
|
|
+ quotaPeriod: guaranteedQuotaPeriod,
|
|
+ expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &guaranteedQuotaPeriod, Memory: &guaranteedMemory},
|
|
},
|
|
"guaranteed-with-tuned-quota": {
|
|
pod: &v1.Pod{
|
|
@@ -477,8 +480,8 @@ func TestResourceConfigForPodWithCustomCPUCFSQuotaPeriod(t *testing.T) {
|
|
},
|
|
},
|
|
enforceCPULimits: true,
|
|
- quotaPeriod: tunedQuotaPeriod,
|
|
- expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedTunedQuota, CpuPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory},
|
|
+ quotaPeriod: guaranteedQuotaPeriod,
|
|
+ expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedTunedQuota, CpuPeriod: &guaranteedQuotaPeriod, Memory: &guaranteedMemory},
|
|
},
|
|
"guaranteed-no-cpu-enforcement-with-tuned-quota": {
|
|
pod: &v1.Pod{
|
|
@@ -491,8 +494,8 @@ func TestResourceConfigForPodWithCustomCPUCFSQuotaPeriod(t *testing.T) {
|
|
},
|
|
},
|
|
enforceCPULimits: false,
|
|
- quotaPeriod: tunedQuotaPeriod,
|
|
- expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory},
|
|
+ quotaPeriod: guaranteedQuotaPeriod,
|
|
+ expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &guaranteedQuotaPeriod, Memory: &guaranteedMemory},
|
|
},
|
|
}
|
|
|
|
diff --git a/pkg/kubelet/cm/internal_container_lifecycle_linux.go b/pkg/kubelet/cm/internal_container_lifecycle_linux.go
|
|
index cb7c0cfa543..956696a51e8 100644
|
|
--- a/pkg/kubelet/cm/internal_container_lifecycle_linux.go
|
|
+++ b/pkg/kubelet/cm/internal_container_lifecycle_linux.go
|
|
@@ -23,8 +23,9 @@ import (
|
|
"strconv"
|
|
"strings"
|
|
|
|
- "k8s.io/api/core/v1"
|
|
+ v1 "k8s.io/api/core/v1"
|
|
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
|
+ v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
|
|
)
|
|
|
|
func (i *internalContainerLifecycleImpl) PreCreateContainer(pod *v1.Pod, container *v1.Container, containerConfig *runtimeapi.ContainerConfig) error {
|
|
@@ -35,6 +36,14 @@ func (i *internalContainerLifecycleImpl) PreCreateContainer(pod *v1.Pod, contain
|
|
}
|
|
}
|
|
|
|
+ // Disable cgroup CFS throttle at the container level.
|
|
+ // /sys/fs/cgroup/cpu/k8s-infra/kubepods/<pod>/<container>/cpu.cfs_quota_us
|
|
+ // /sys/fs/cgroup/cpu/k8s-infra/kubepods/<pod>/<container>/cpu.cfs_period_us
|
|
+ if i.cpuManager.GetCPUPolicy() == "static" && v1qos.GetPodQOS(pod) == v1.PodQOSGuaranteed {
|
|
+ containerConfig.Linux.Resources.CpuPeriod = int64(100000)
|
|
+ containerConfig.Linux.Resources.CpuQuota = int64(-1)
|
|
+ }
|
|
+
|
|
if i.memoryManager != nil {
|
|
numaNodes := i.memoryManager.GetMemoryNUMANodes(pod, container)
|
|
if numaNodes.Len() > 0 {
|
|
--
|
|
2.25.1
|
|
|