Merge "Support for configurable HAProxy LoadBalancer"

This commit is contained in:
Zuul 2021-07-19 14:36:38 +00:00 committed by Gerrit Code Review
commit 3a371a939d
8 changed files with 173 additions and 96 deletions

View File

@ -1,2 +1,9 @@
resources:
- manager.yaml
- loadbalancer
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
images:
- name: controller
newName: quay.io/airshipit/sip
newTag: latest

View File

@ -0,0 +1,12 @@
configMapGenerator:
- name: loadbalancercontrolplane
files:
- loadBalancerControlPlane.cfg
- name: loadbalancerworker
files:
- loadBalancerWorker.cfg
generatorOptions:
disableNameSuffixHash: true
namespace: sipcluster-system

View File

@ -0,0 +1,48 @@
global
log stdout format raw local0 notice
daemon
defaults
mode http
log global
option httplog
option dontlognull
retries 1
# Configures the timeout for a connection request to be left pending in a queue
# (connection requests are queued once the maximum number of connections is reached).
timeout queue 30s
# Configures the timeout for a connection to a backend server to be established.
timeout connect 30s
# Configures the timeout for inactivity during periods when we would expect
# the client to be speaking. For usability of kubectl exec, the timeout should
# be long enough to cover inactivity due to idleness of interactive sessions.
timeout client 600s
# Configures the timeout for inactivity during periods when we would expect
# the server to be speaking. For usability of kubectl log -f, the timeout should
# be long enough to cover inactivity due to the lack of new logs.
timeout server 600s
#---------------------------------------------------------------------
{{- $servers := .Servers }}
{{- range .ContainerPorts }}
{{- $containerPort := . }}
frontend {{ $containerPort.Name }}-frontend
bind *:{{ $containerPort.ContainerPort }}
mode tcp
option tcplog
default_backend {{ $containerPort.Name }}-backend
backend {{ $containerPort.Name }}-backend
mode tcp
balance roundrobin
option httpchk GET /readyz
http-check expect status 200
option log-health-checks
# Observed apiserver returns 500 for around 10s when 2nd cp node joins.
# downinter 2s makes it check more frequently to recover from that state sooner.
# Also changing fall to 4 so that it takes longer (4 failures) for it to take down a backend.
default-server check check-ssl verify none inter 5s downinter 2s fall 4 on-marked-down shutdown-sessions
{{- range $servers }}
{{- $server := . }}
server {{ $server.Name }} {{ $server.IP }}:{{ $containerPort.ContainerPort }}
{{ end -}}
{{ end -}}

View File

@ -0,0 +1,40 @@
global
log stdout format raw local0 notice
daemon
defaults
mode tcp
log global
option tcplog
option dontlognull
retries 1
# Configures the timeout for a connection request to be left pending in a queue
# (connection requests are queued once the maximum number of connections is reached).
timeout queue 30s
# Configures the timeout for a connection to a backend server to be established.
timeout connect 30s
# Configures the timeout for inactivity during periods when we would expect
# the client to be speaking.
timeout client 600s
# Configures the timeout for inactivity during periods when we would expect
# the server to be speaking.
timeout server 600s
#---------------------------------------------------------------------
{{- $servers := .Servers }}
{{- range .ContainerPorts }}
{{- $containerPort := . }}
frontend {{ $containerPort.Name }}-frontend
bind *:{{ $containerPort.ContainerPort }}
default_backend {{ $containerPort.Name }}-backend
backend {{ $containerPort.Name }}-backend
balance roundrobin
option tcp-check
tcp-check connect
option log-health-checks
default-server check
{{- range $servers }}
{{- $server := . }}
server {{ $server.Name }} {{ $server.IP }}:{{ $containerPort.ContainerPort }}
{{ end -}}
{{ end -}}

View File

@ -16,6 +16,7 @@ package services
import (
"bytes"
"context"
"fmt"
"strings"
@ -236,7 +237,7 @@ func newLBControlPlane(name, namespace string,
logger logr.Logger,
config airshipv1.LoadBalancerServiceControlPlane,
machines *bmh.MachineList,
client client.Client) loadBalancerControlPlane {
mgrClient client.Client) loadBalancerControlPlane {
servicePorts := []corev1.ServicePort{
{
Name: "http",
@ -244,6 +245,17 @@ func newLBControlPlane(name, namespace string,
NodePort: int32(config.NodePort),
},
}
//Get template string from the secret
templateControlPlane := ""
cm := &corev1.ConfigMap{}
err := mgrClient.Get(context.Background(), client.ObjectKey{
Name: "loadbalancercontrolplane",
Namespace: namespace}, cm)
if err != nil {
logger.Error(err, "unable to retrieve template info from secret.")
}
templateControlPlane = cm.Data["loadBalancerControlPlane.cfg"]
return loadBalancerControlPlane{loadBalancer{
sipName: types.NamespacedName{
Name: name,
@ -252,7 +264,7 @@ func newLBControlPlane(name, namespace string,
logger: logger,
config: config.SIPClusterService,
machines: machines,
client: client,
client: mgrClient,
bmhRole: airshipv1.RoleControlPlane,
template: templateControlPlane,
servicePorts: servicePorts,
@ -265,7 +277,7 @@ func newLBWorker(name, namespace string,
logger logr.Logger,
config airshipv1.LoadBalancerServiceWorker,
machines *bmh.MachineList,
client client.Client) loadBalancerWorker {
mgrClient client.Client) loadBalancerWorker {
servicePorts := []corev1.ServicePort{}
for port := config.NodePortRange.Start; port <= config.NodePortRange.End; port++ {
servicePorts = append(servicePorts, corev1.ServicePort{
@ -274,6 +286,18 @@ func newLBWorker(name, namespace string,
NodePort: int32(port),
})
}
//Get Template as a secret
templateWorker := ""
cm := &corev1.ConfigMap{}
err := mgrClient.Get(context.Background(), client.ObjectKey{
Name: "loadbalancerworker",
Namespace: namespace}, cm)
if err != nil {
logger.Error(err, "unable to retrieve template info from secret.")
}
templateWorker = cm.Data["loadBalancerWorker.cfg"]
return loadBalancerWorker{loadBalancer{
sipName: types.NamespacedName{
Name: name,
@ -282,7 +306,7 @@ func newLBWorker(name, namespace string,
logger: logger,
config: config.SIPClusterService,
machines: machines,
client: client,
client: mgrClient,
bmhRole: airshipv1.RoleWorker,
template: templateWorker,
servicePorts: servicePorts,
@ -310,93 +334,3 @@ func (lb loadBalancer) generateTemplate(p proxy) ([]byte, error) {
rendered := w.Bytes()
return rendered, nil
}
var templateControlPlane = `global
log stdout format raw local0 notice
daemon
defaults
mode http
log global
option httplog
option dontlognull
retries 1
# Configures the timeout for a connection request to be left pending in a queue
# (connection requests are queued once the maximum number of connections is reached).
timeout queue 30s
# Configures the timeout for a connection to a backend server to be established.
timeout connect 30s
# Configures the timeout for inactivity during periods when we would expect
# the client to be speaking. For usability of 'kubectl exec', the timeout should
# be long enough to cover inactivity due to idleness of interactive sessions.
timeout client 600s
# Configures the timeout for inactivity during periods when we would expect
# the server to be speaking. For usability of 'kubectl log -f', the timeout should
# be long enough to cover inactivity due to the lack of new logs.
timeout server 600s
#---------------------------------------------------------------------
{{- $servers := .Servers }}
{{- range .ContainerPorts }}
{{- $containerPort := . }}
frontend {{ $containerPort.Name }}-frontend
bind *:{{ $containerPort.ContainerPort }}
mode tcp
option tcplog
default_backend {{ $containerPort.Name }}-backend
backend {{ $containerPort.Name }}-backend
mode tcp
balance roundrobin
option httpchk GET /readyz
http-check expect status 200
option log-health-checks
# Observed apiserver returns 500 for around 10s when 2nd cp node joins.
# downinter 2s makes it check more frequently to recover from that state sooner.
# Also changing fall to 4 so that it takes longer (4 failures) for it to take down a backend.
default-server check check-ssl verify none inter 5s downinter 2s fall 4 on-marked-down shutdown-sessions
{{- range $servers }}
{{- $server := . }}
server {{ $server.Name }} {{ $server.IP }}:{{ $containerPort.ContainerPort }}
{{ end -}}
{{ end -}}`
var templateWorker = `global
log stdout format raw local0 notice
daemon
defaults
mode tcp
log global
option tcplog
option dontlognull
retries 1
# Configures the timeout for a connection request to be left pending in a queue
# (connection requests are queued once the maximum number of connections is reached).
timeout queue 30s
# Configures the timeout for a connection to a backend server to be established.
timeout connect 30s
# Configures the timeout for inactivity during periods when we would expect
# the client to be speaking.
timeout client 600s
# Configures the timeout for inactivity during periods when we would expect
# the server to be speaking.
timeout server 600s
#---------------------------------------------------------------------
{{- $servers := .Servers }}
{{- range .ContainerPorts }}
{{- $containerPort := . }}
frontend {{ $containerPort.Name }}-frontend
bind *:{{ $containerPort.ContainerPort }}
default_backend {{ $containerPort.Name }}-backend
backend {{ $containerPort.Name }}-backend
balance roundrobin
option tcp-check
tcp-check connect
option log-health-checks
default-server check
{{- range $servers }}
{{- $server := . }}
server {{ $server.Name }} {{ $server.IP }}:{{ $containerPort.ContainerPort }}
{{ end -}}
{{ end -}}`

View File

@ -3,6 +3,7 @@ package services_test
import (
"context"
"encoding/json"
"io/ioutil"
"strings"
airshipv1 "sipcluster/pkg/api/v1"
@ -82,6 +83,23 @@ var _ = Describe("Service Set", func() {
bmh2.GetName(): m2,
},
}
//Secret for Template
TemplateControlPlane, err := ioutil.ReadFile("../../config/manager/loadbalancer/loadBalancerControlPlane.cfg")
if err == nil {
lbcontrolplaneTemplateConfigMap := testutil.CreateTemplateConfigMap("loadbalancercontrolplane",
"loadBalancerControlPlane.cfg", "default", string(TemplateControlPlane))
Expect(k8sClient.Create(context.Background(), lbcontrolplaneTemplateConfigMap)).Should(Succeed())
}
TemplateWorker, err := ioutil.ReadFile("../../config/manager/loadbalancer/loadBalancerWorker.cfg")
if err == nil {
lbworkerTemplateConfigMap := testutil.CreateTemplateConfigMap("loadbalancerworker",
"loadBalancerWorker.cfg", "default", string(TemplateWorker))
Expect(k8sClient.Create(context.Background(), lbworkerTemplateConfigMap)).Should(Succeed())
}
})
AfterEach(func() {
@ -89,6 +107,7 @@ var _ = Describe("Service Set", func() {
Expect(k8sClient.DeleteAllOf(context.Background(), &metal3.BareMetalHost{}, opts...)).Should(Succeed())
Expect(k8sClient.DeleteAllOf(context.Background(), &airshipv1.SIPCluster{}, opts...)).Should(Succeed())
Expect(k8sClient.DeleteAllOf(context.Background(), &corev1.Secret{}, opts...)).Should(Succeed())
Expect(k8sClient.DeleteAllOf(context.Background(), &corev1.ConfigMap{}, opts...)).Should(Succeed())
})
Context("When new SIP cluster is created", func() {

View File

@ -380,7 +380,6 @@ func CreateSIPCluster(name string, namespace string, controlPlanes int, workers
SIPClusterService: airshipv1.SIPClusterService{
NodeInterface: "oam-ipv4",
},
NodePort: 30001,
},
},
LoadBalancerWorker: []airshipv1.LoadBalancerServiceWorker{
@ -429,6 +428,24 @@ func CreateBMCAuthSecret(nodeName string, namespace string, username string, pas
}
}
// CreateTemplateSecret creates a K8s Secret with template for HAProxy configuration
func CreateTemplateConfigMap(cmname string, templatename string, namespace string,
templatedata string) *corev1.ConfigMap {
return &corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
APIVersion: corev1.SchemeGroupVersion.String(),
Kind: "ConfigMap",
},
ObjectMeta: metav1.ObjectMeta{
Name: cmname,
Namespace: namespace,
},
Data: map[string]string{
templatename: templatedata,
},
}
}
func CompareLabels(expected labels.Selector, actual map[string]string) error {
if !expected.Matches(labels.Set(actual)) {
return fmt.Errorf("labels do not match expected selector %v. Has labels %v", expected, actual)