From 07b94538c0fd054220cb166b094683db13a5ac4f Mon Sep 17 00:00:00 2001 From: "Manoj Alva(ma257n)" Date: Fri, 2 Jul 2021 06:28:03 +0000 Subject: [PATCH] Support for configurable HAProxy LoadBalancer - Moved HAProxy control plan/worker templates as configMap Relates-To: #19 Closes: #19 Change-Id: I9d2a3992827fa3a589c930e40ab3ab9d34527731 --- config/manager/kustomization.yaml | 7 + .../manager/loadbalancer/kustomization.yaml | 12 ++ .../loadbalancer/loadBalancerControlPlane.cfg | 48 +++++++ .../loadbalancer/loadBalancerWorker.cfg | 40 ++++++ .../samples/airship_v1beta1_sipcluster.yaml | 2 +- pkg/services/loadbalancer.go | 122 ++++-------------- pkg/services/services_test.go | 19 +++ testutil/testutil.go | 19 ++- 8 files changed, 173 insertions(+), 96 deletions(-) create mode 100644 config/manager/loadbalancer/kustomization.yaml create mode 100644 config/manager/loadbalancer/loadBalancerControlPlane.cfg create mode 100644 config/manager/loadbalancer/loadBalancerWorker.cfg diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index 5c5f0b8..22698d6 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -1,2 +1,9 @@ resources: - manager.yaml +- loadbalancer +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +images: +- name: controller + newName: quay.io/airshipit/sip + newTag: latest diff --git a/config/manager/loadbalancer/kustomization.yaml b/config/manager/loadbalancer/kustomization.yaml new file mode 100644 index 0000000..7de04c1 --- /dev/null +++ b/config/manager/loadbalancer/kustomization.yaml @@ -0,0 +1,12 @@ +configMapGenerator: + - name: loadbalancercontrolplane + files: + - loadBalancerControlPlane.cfg + - name: loadbalancerworker + files: + - loadBalancerWorker.cfg + +generatorOptions: + disableNameSuffixHash: true + +namespace: sipcluster-system diff --git a/config/manager/loadbalancer/loadBalancerControlPlane.cfg b/config/manager/loadbalancer/loadBalancerControlPlane.cfg new file mode 100644 index 0000000..36a3595 --- /dev/null +++ b/config/manager/loadbalancer/loadBalancerControlPlane.cfg @@ -0,0 +1,48 @@ +global + log stdout format raw local0 notice + daemon + +defaults + mode http + log global + option httplog + option dontlognull + retries 1 + # Configures the timeout for a connection request to be left pending in a queue + # (connection requests are queued once the maximum number of connections is reached). + timeout queue 30s + # Configures the timeout for a connection to a backend server to be established. + timeout connect 30s + # Configures the timeout for inactivity during periods when we would expect + # the client to be speaking. For usability of kubectl exec, the timeout should + # be long enough to cover inactivity due to idleness of interactive sessions. + timeout client 600s + # Configures the timeout for inactivity during periods when we would expect + # the server to be speaking. For usability of kubectl log -f, the timeout should + # be long enough to cover inactivity due to the lack of new logs. + timeout server 600s + +#--------------------------------------------------------------------- +{{- $servers := .Servers }} +{{- range .ContainerPorts }} +{{- $containerPort := . }} +frontend {{ $containerPort.Name }}-frontend + bind *:{{ $containerPort.ContainerPort }} + mode tcp + option tcplog + default_backend {{ $containerPort.Name }}-backend +backend {{ $containerPort.Name }}-backend + mode tcp + balance roundrobin + option httpchk GET /readyz + http-check expect status 200 + option log-health-checks + # Observed apiserver returns 500 for around 10s when 2nd cp node joins. + # downinter 2s makes it check more frequently to recover from that state sooner. + # Also changing fall to 4 so that it takes longer (4 failures) for it to take down a backend. + default-server check check-ssl verify none inter 5s downinter 2s fall 4 on-marked-down shutdown-sessions +{{- range $servers }} +{{- $server := . }} + server {{ $server.Name }} {{ $server.IP }}:{{ $containerPort.ContainerPort }} +{{ end -}} +{{ end -}} diff --git a/config/manager/loadbalancer/loadBalancerWorker.cfg b/config/manager/loadbalancer/loadBalancerWorker.cfg new file mode 100644 index 0000000..ac828e9 --- /dev/null +++ b/config/manager/loadbalancer/loadBalancerWorker.cfg @@ -0,0 +1,40 @@ +global +log stdout format raw local0 notice +daemon + +defaults +mode tcp +log global +option tcplog +option dontlognull +retries 1 +# Configures the timeout for a connection request to be left pending in a queue +# (connection requests are queued once the maximum number of connections is reached). +timeout queue 30s +# Configures the timeout for a connection to a backend server to be established. +timeout connect 30s +# Configures the timeout for inactivity during periods when we would expect +# the client to be speaking. +timeout client 600s +# Configures the timeout for inactivity during periods when we would expect +# the server to be speaking. +timeout server 600s + +#--------------------------------------------------------------------- +{{- $servers := .Servers }} +{{- range .ContainerPorts }} +{{- $containerPort := . }} +frontend {{ $containerPort.Name }}-frontend + bind *:{{ $containerPort.ContainerPort }} + default_backend {{ $containerPort.Name }}-backend +backend {{ $containerPort.Name }}-backend + balance roundrobin + option tcp-check + tcp-check connect + option log-health-checks +default-server check +{{- range $servers }} +{{- $server := . }} + server {{ $server.Name }} {{ $server.IP }}:{{ $containerPort.ContainerPort }} +{{ end -}} +{{ end -}} diff --git a/config/samples/airship_v1beta1_sipcluster.yaml b/config/samples/airship_v1beta1_sipcluster.yaml index b095fc0..fa7a088 100644 --- a/config/samples/airship_v1beta1_sipcluster.yaml +++ b/config/samples/airship_v1beta1_sipcluster.yaml @@ -58,4 +58,4 @@ spec: end: 30011 nodeInterfaceId: oam-ipv4 # NOTE: clusterIP has not yet been implemented. - # clusterIP: 1.2.3.4 # IP of the base cluster VIP \ No newline at end of file + # clusterIP: 1.2.3.4 # IP of the base cluster VIP diff --git a/pkg/services/loadbalancer.go b/pkg/services/loadbalancer.go index a30955c..46d24c8 100644 --- a/pkg/services/loadbalancer.go +++ b/pkg/services/loadbalancer.go @@ -16,6 +16,7 @@ package services import ( "bytes" + "context" "fmt" "strings" @@ -236,7 +237,7 @@ func newLBControlPlane(name, namespace string, logger logr.Logger, config airshipv1.LoadBalancerServiceControlPlane, machines *bmh.MachineList, - client client.Client) loadBalancerControlPlane { + mgrClient client.Client) loadBalancerControlPlane { servicePorts := []corev1.ServicePort{ { Name: "http", @@ -244,6 +245,17 @@ func newLBControlPlane(name, namespace string, NodePort: int32(config.NodePort), }, } + //Get template string from the secret + templateControlPlane := "" + cm := &corev1.ConfigMap{} + err := mgrClient.Get(context.Background(), client.ObjectKey{ + Name: "loadbalancercontrolplane", + Namespace: namespace}, cm) + if err != nil { + logger.Error(err, "unable to retrieve template info from secret.") + } + templateControlPlane = cm.Data["loadBalancerControlPlane.cfg"] + return loadBalancerControlPlane{loadBalancer{ sipName: types.NamespacedName{ Name: name, @@ -252,7 +264,7 @@ func newLBControlPlane(name, namespace string, logger: logger, config: config.SIPClusterService, machines: machines, - client: client, + client: mgrClient, bmhRole: airshipv1.RoleControlPlane, template: templateControlPlane, servicePorts: servicePorts, @@ -265,7 +277,7 @@ func newLBWorker(name, namespace string, logger logr.Logger, config airshipv1.LoadBalancerServiceWorker, machines *bmh.MachineList, - client client.Client) loadBalancerWorker { + mgrClient client.Client) loadBalancerWorker { servicePorts := []corev1.ServicePort{} for port := config.NodePortRange.Start; port <= config.NodePortRange.End; port++ { servicePorts = append(servicePorts, corev1.ServicePort{ @@ -274,6 +286,18 @@ func newLBWorker(name, namespace string, NodePort: int32(port), }) } + + //Get Template as a secret + templateWorker := "" + cm := &corev1.ConfigMap{} + err := mgrClient.Get(context.Background(), client.ObjectKey{ + Name: "loadbalancerworker", + Namespace: namespace}, cm) + if err != nil { + logger.Error(err, "unable to retrieve template info from secret.") + } + templateWorker = cm.Data["loadBalancerWorker.cfg"] + return loadBalancerWorker{loadBalancer{ sipName: types.NamespacedName{ Name: name, @@ -282,7 +306,7 @@ func newLBWorker(name, namespace string, logger: logger, config: config.SIPClusterService, machines: machines, - client: client, + client: mgrClient, bmhRole: airshipv1.RoleWorker, template: templateWorker, servicePorts: servicePorts, @@ -310,93 +334,3 @@ func (lb loadBalancer) generateTemplate(p proxy) ([]byte, error) { rendered := w.Bytes() return rendered, nil } - -var templateControlPlane = `global - log stdout format raw local0 notice - daemon - -defaults - mode http - log global - option httplog - option dontlognull - retries 1 - # Configures the timeout for a connection request to be left pending in a queue - # (connection requests are queued once the maximum number of connections is reached). - timeout queue 30s - # Configures the timeout for a connection to a backend server to be established. - timeout connect 30s - # Configures the timeout for inactivity during periods when we would expect - # the client to be speaking. For usability of 'kubectl exec', the timeout should - # be long enough to cover inactivity due to idleness of interactive sessions. - timeout client 600s - # Configures the timeout for inactivity during periods when we would expect - # the server to be speaking. For usability of 'kubectl log -f', the timeout should - # be long enough to cover inactivity due to the lack of new logs. - timeout server 600s - -#--------------------------------------------------------------------- -{{- $servers := .Servers }} -{{- range .ContainerPorts }} -{{- $containerPort := . }} -frontend {{ $containerPort.Name }}-frontend - bind *:{{ $containerPort.ContainerPort }} - mode tcp - option tcplog - default_backend {{ $containerPort.Name }}-backend -backend {{ $containerPort.Name }}-backend - mode tcp - balance roundrobin - option httpchk GET /readyz - http-check expect status 200 - option log-health-checks - # Observed apiserver returns 500 for around 10s when 2nd cp node joins. - # downinter 2s makes it check more frequently to recover from that state sooner. - # Also changing fall to 4 so that it takes longer (4 failures) for it to take down a backend. - default-server check check-ssl verify none inter 5s downinter 2s fall 4 on-marked-down shutdown-sessions -{{- range $servers }} -{{- $server := . }} - server {{ $server.Name }} {{ $server.IP }}:{{ $containerPort.ContainerPort }} -{{ end -}} -{{ end -}}` - -var templateWorker = `global -log stdout format raw local0 notice -daemon - -defaults -mode tcp -log global -option tcplog -option dontlognull -retries 1 -# Configures the timeout for a connection request to be left pending in a queue -# (connection requests are queued once the maximum number of connections is reached). -timeout queue 30s -# Configures the timeout for a connection to a backend server to be established. -timeout connect 30s -# Configures the timeout for inactivity during periods when we would expect -# the client to be speaking. -timeout client 600s -# Configures the timeout for inactivity during periods when we would expect -# the server to be speaking. -timeout server 600s - -#--------------------------------------------------------------------- -{{- $servers := .Servers }} -{{- range .ContainerPorts }} -{{- $containerPort := . }} -frontend {{ $containerPort.Name }}-frontend - bind *:{{ $containerPort.ContainerPort }} - default_backend {{ $containerPort.Name }}-backend -backend {{ $containerPort.Name }}-backend - balance roundrobin - option tcp-check - tcp-check connect - option log-health-checks -default-server check -{{- range $servers }} -{{- $server := . }} - server {{ $server.Name }} {{ $server.IP }}:{{ $containerPort.ContainerPort }} -{{ end -}} -{{ end -}}` diff --git a/pkg/services/services_test.go b/pkg/services/services_test.go index 9a9e2a1..83e2c63 100644 --- a/pkg/services/services_test.go +++ b/pkg/services/services_test.go @@ -3,6 +3,7 @@ package services_test import ( "context" "encoding/json" + "io/ioutil" "strings" airshipv1 "sipcluster/pkg/api/v1" @@ -82,6 +83,23 @@ var _ = Describe("Service Set", func() { bmh2.GetName(): m2, }, } + + //Secret for Template + TemplateControlPlane, err := ioutil.ReadFile("../../config/manager/loadbalancer/loadBalancerControlPlane.cfg") + if err == nil { + lbcontrolplaneTemplateConfigMap := testutil.CreateTemplateConfigMap("loadbalancercontrolplane", + "loadBalancerControlPlane.cfg", "default", string(TemplateControlPlane)) + Expect(k8sClient.Create(context.Background(), lbcontrolplaneTemplateConfigMap)).Should(Succeed()) + } + + TemplateWorker, err := ioutil.ReadFile("../../config/manager/loadbalancer/loadBalancerWorker.cfg") + + if err == nil { + lbworkerTemplateConfigMap := testutil.CreateTemplateConfigMap("loadbalancerworker", + "loadBalancerWorker.cfg", "default", string(TemplateWorker)) + Expect(k8sClient.Create(context.Background(), lbworkerTemplateConfigMap)).Should(Succeed()) + } + }) AfterEach(func() { @@ -89,6 +107,7 @@ var _ = Describe("Service Set", func() { Expect(k8sClient.DeleteAllOf(context.Background(), &metal3.BareMetalHost{}, opts...)).Should(Succeed()) Expect(k8sClient.DeleteAllOf(context.Background(), &airshipv1.SIPCluster{}, opts...)).Should(Succeed()) Expect(k8sClient.DeleteAllOf(context.Background(), &corev1.Secret{}, opts...)).Should(Succeed()) + Expect(k8sClient.DeleteAllOf(context.Background(), &corev1.ConfigMap{}, opts...)).Should(Succeed()) }) Context("When new SIP cluster is created", func() { diff --git a/testutil/testutil.go b/testutil/testutil.go index 841502b..bcc1fe5 100644 --- a/testutil/testutil.go +++ b/testutil/testutil.go @@ -380,7 +380,6 @@ func CreateSIPCluster(name string, namespace string, controlPlanes int, workers SIPClusterService: airshipv1.SIPClusterService{ NodeInterface: "oam-ipv4", }, - NodePort: 30001, }, }, LoadBalancerWorker: []airshipv1.LoadBalancerServiceWorker{ @@ -429,6 +428,24 @@ func CreateBMCAuthSecret(nodeName string, namespace string, username string, pas } } +// CreateTemplateSecret creates a K8s Secret with template for HAProxy configuration +func CreateTemplateConfigMap(cmname string, templatename string, namespace string, + templatedata string) *corev1.ConfigMap { + return &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: corev1.SchemeGroupVersion.String(), + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: cmname, + Namespace: namespace, + }, + Data: map[string]string{ + templatename: templatedata, + }, + } +} + func CompareLabels(expected labels.Selector, actual map[string]string) error { if !expected.Matches(labels.Set(actual)) { return fmt.Errorf("labels do not match expected selector %v. Has labels %v", expected, actual)