Apply fixes according to the linters

This commit is contained in:
Ian Howell 2020-12-07 13:55:35 -06:00
parent 0c82079fd3
commit d0de55a6c3
22 changed files with 213 additions and 199 deletions

View File

@ -23,13 +23,13 @@ While ViNO is responsible for setting up VM infrastructure, such as:
- networking
- bmh objects, with labels:
* location - i.e. `rack: 8` and `node: rdm8r008c002` - should follow k8s semi-standard
* vm role - i.e. `node-type: worker`
* vm role - i.e. `node-type: worker`
* vm flavor - i.e `node-flavor: foobar`
* networks - i.e. `networks: [foo, bar]`
and the details for ViNO can be found [here](https://hackmd.io/KSu8p4QeTc2kXIjlrso2eA)
and the details for ViNO can be found [here](https://hackmd.io/KSu8p4QeTc2kXIjlrso2eA)
The Cluster Support Infrastructure Provider, or SIP, is responsible for the lifecycle of:
- identifying the correct `BareMetalHost` resources to label (or unlabel) based on scheduling constraints.
- identifying the correct `BareMetalHost` resources to label (or unlabel) based on scheduling constraints.
- extract IP address information from `BareMetalHost` objects to use in the creation of supporting infrastructure.
- creating support infra for the tenant k8s cluster:
* load balancers (for tenant k8s api)
@ -49,9 +49,9 @@ Pseudo Algorithm at a high level after reading the `SIPCluster` CR:
#### Identity BMH VM's
- Gather BMH's that meet the criteria expected for the groups
- Check for existing labeled BMH's
- Check for existing labeled BMH's
- Complete the expected scheduling contraints :
- If master
- If master
- collect into list of bmh's to label
- If worker
- collect into list of bmh's to label

View File

@ -1,6 +1,6 @@
# The following manifests contain a self-signed issuer CR and a certificate CR.
# More document can be found at https://docs.cert-manager.io
# WARNING: Targets CertManager 0.11 check https://docs.cert-manager.io/en/latest/tasks/upgrading/index.html for
# WARNING: Targets CertManager 0.11 check https://docs.cert-manager.io/en/latest/tasks/upgrading/index.html for
# breaking changes
apiVersion: cert-manager.io/v1alpha2
kind: Issuer

View File

@ -1,4 +1,4 @@
# This configuration is for teaching kustomize how to update name ref and var substitution
# This configuration is for teaching kustomize how to update name ref and var substitution
nameReference:
- kind: Issuer
group: cert-manager.io

View File

@ -58,7 +58,7 @@ spec:
type: object
optional:
properties:
clusterIp:
clusterIP:
type: string
sshkey:
type: string
@ -91,13 +91,13 @@ spec:
identify the required BMH's to allow CAPI to build a cluster
type: string
vm-flavor:
description: VmFlavor is essentially a Flavor label identifying
description: VMFlavor is essentially a Flavor label identifying
the type of Node that meets the construction reqirements
type: string
type: object
description: Nodes are the list of Nodes objects workers, or master
that definee eexpectations of the Tenant cluster VmRole is either
Control or Workers VmRole VmRoles `json:"vm-role,omitempty"`
that definee eexpectations of the Tenant cluster VMRole is either
Control or Workers VMRole VMRoles `json:"vm-role,omitempty"`
type: object
required:
- infra

View File

@ -16,12 +16,12 @@ bases:
- ../crd
- ../rbac
- ../manager
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
# crd/kustomization.yaml
#- ../webhook
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required.
#- ../certmanager
# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
#- ../prometheus
patchesStrategicMerge:
@ -30,7 +30,7 @@ patchesStrategicMerge:
# endpoint w/o any authn/z, please comment the following line.
- manager_auth_proxy_patch.yaml
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
# crd/kustomization.yaml
#- manager_webhook_patch.yaml

View File

@ -1,4 +1,4 @@
# This patch inject a sidecar container which is a HTTP proxy for the
# This patch inject a sidecar container which is a HTTP proxy for the
# controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews.
apiVersion: apps/v1
kind: Deployment

View File

@ -1,4 +1,3 @@
#
# Adds namespace to all resources.
namespace: sip-system

View File

@ -24,7 +24,7 @@ spec:
infra:
loadbalancer:
optional:
clusterIp: 1.2.3.4 #<-- this aligns to the VIP IP for undercloud k8s
clusterIP: 1.2.3.4 #<-- this aligns to the VIP IP for undercloud k8s
image: haproxy:foo
nodeLabels:
- airship-masters

View File

@ -41,6 +41,7 @@ var (
setupLog = ctrl.Log.WithName("setup")
)
//nolint:errcheck
func init() {
_ = clientgoscheme.AddToScheme(scheme)
@ -52,7 +53,6 @@ func init() {
// Add Kubernetes Coree??
_ = corev1.AddToScheme(scheme)
}
func main() {

View File

@ -31,9 +31,9 @@ type SIPClusterSpec struct {
Config *SipConfig `json:"config,omitempty"`
// Nodes are the list of Nodes objects workers, or master that definee eexpectations
// of the Tenant cluster
// VmRole is either Control or Workers
// VmRole VmRoles `json:"vm-role,omitempty"`
Nodes map[VmRoles]NodeSet `json:"nodes,omitempty"`
// VMRole is either Control or Workers
// VMRole VMRoles `json:"vm-role,omitempty"`
Nodes map[VMRoles]NodeSet `json:"nodes,omitempty"`
// Infra is the collection of expeected configuration details
// for the multiple infrastructure services or pods that SIP manages
@ -49,7 +49,7 @@ type SipConfig struct {
ClusterName string `json:"cluster-name,omitempty"`
}
// VmRoles defines the states the provisioner will report
// VMRoles defines the states the provisioner will report
// the tenant has having.
type InfraService string
@ -77,16 +77,16 @@ const (
//
type NodeSet struct {
// VmFlavor is essentially a Flavor label identifying the
// VMFlavor is essentially a Flavor label identifying the
// type of Node that meets the construction reqirements
VmFlavor string `json:"vm-flavor,omitempty"`
VMFlavor string `json:"vm-flavor,omitempty"`
// PlaceHolder until we define the real expected
// Implementation
// Scheduling define constraints the allows the SIP Scheduler
// to identify the required BMH's to allow CAPI to build a cluster
Scheduling SpreadTopology `json:"spreadTopology,omitempty"`
// Count defines the scale expectations for the Nodes
Count *VmCount `json:"count,omitempty"`
Count *VMCount `json:"count,omitempty"`
}
type SpreadTopology string
@ -109,25 +109,22 @@ type InfraConfig struct {
}
type OptsConfig struct {
SshKey string `json:"sshkey,omitempty"`
ClusterIp string `json:"clusterIp,omitempty"`
SSHKey string `json:"sshkey,omitempty"`
ClusterIP string `json:"clusterIP,omitempty"`
}
// VmRoles defines the states the provisioner will report
// VMRoles defines the states the provisioner will report
// the tenant has having.
type VmRoles string
type VMRoles string
// Possible Node or VM Roles for a Tenant
const (
// VmMaster means the state is unknown
VmMaster VmRoles = "master"
// VmWorker means the state is unknown
VmWorker VmRoles = "worker"
VMMaster VMRoles = "master"
VMWorker = "worker"
)
// VmCount
type VmCount struct {
// VMCount
type VMCount struct {
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
// Important: Run "make" to regenerate code after modifying this file
Active int `json:"active,omitempty"`

View File

@ -61,7 +61,7 @@ func (in *NodeSet) DeepCopyInto(out *NodeSet) {
*out = *in
if in.Count != nil {
in, out := &in.Count, &out.Count
*out = new(VmCount)
*out = new(VMCount)
**out = **in
}
}
@ -160,7 +160,7 @@ func (in *SIPClusterSpec) DeepCopyInto(out *SIPClusterSpec) {
}
if in.Nodes != nil {
in, out := &in.Nodes, &out.Nodes
*out = make(map[VmRoles]NodeSet, len(*in))
*out = make(map[VMRoles]NodeSet, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
@ -215,16 +215,16 @@ func (in *SipConfig) DeepCopy() *SipConfig {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VmCount) DeepCopyInto(out *VmCount) {
func (in *VMCount) DeepCopyInto(out *VMCount) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VmCount.
func (in *VmCount) DeepCopy() *VmCount {
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VMCount.
func (in *VMCount) DeepCopy() *VMCount {
if in == nil {
return nil
}
out := new(VmCount)
out := new(VMCount)
in.DeepCopyInto(out)
return out
}

View File

@ -65,9 +65,9 @@ func (r *SIPClusterReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error)
// Tghis only works if I add a finalizer to CRD TODO
if sip.ObjectMeta.DeletionTimestamp.IsZero() {
// machines
err, machines := r.gatherVBMH(sip)
machines, err := r.gatherVBMH(sip)
if err != nil {
//log.Error(err, "unable to gather vBMHs")
log.Error(err, "unable to gather vBMHs")
return ctrl.Result{}, err
}
@ -82,24 +82,22 @@ func (r *SIPClusterReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error)
log.Error(err, "unable to finish creation/update ..")
return ctrl.Result{}, err
}
} else {
} else if containsString(sip.ObjectMeta.Finalizers, sipFinalizerName) {
// Deleting the SIP , what do we do now
if containsString(sip.ObjectMeta.Finalizers, sipFinalizerName) {
// our finalizer is present, so lets handle any external dependency
err := r.finalize(sip)
if err != nil {
log.Error(err, "unable to finalize")
return ctrl.Result{}, err
}
// remove our finalizer from the list and update it.
sip.ObjectMeta.Finalizers = removeString(sip.ObjectMeta.Finalizers, sipFinalizerName)
if err := r.Update(context.Background(), &sip); err != nil {
return ctrl.Result{}, err
}
// our finalizer is present, so lets handle any external dependency
err := r.finalize(sip)
if err != nil {
log.Error(err, "unable to finalize")
return ctrl.Result{}, err
}
// remove our finalizer from the list and update it.
sip.ObjectMeta.Finalizers = removeString(sip.ObjectMeta.Finalizers, sipFinalizerName)
if err := r.Update(context.Background(), &sip); err != nil {
return ctrl.Result{}, err
}
}
return ctrl.Result{}, nil
}
@ -155,9 +153,9 @@ func removeString(slice []string, s string) (result []string) {
*/
// machines
func (r *SIPClusterReconciler) gatherVBMH(sip airshipv1.SIPCluster) (error, *airshipvms.MachineList) {
func (r *SIPClusterReconciler) gatherVBMH(sip airshipv1.SIPCluster) (*airshipvms.MachineList, error) {
// 1- Let me retrieve all BMH that are unlabeled or already labeled with the target Tenant/CNF
// 2- Let me now select the one's that meet teh scheduling criteria
// 2- Let me now select the one's that meet the scheduling criteria
// If I schedule successfully then
// If Not complete schedule , then throw an error.
logger := r.Log.WithValues("SIPCluster", r.NamespacedName)
@ -171,7 +169,7 @@ func (r *SIPClusterReconciler) gatherVBMH(sip airshipv1.SIPCluster) (error, *air
logger.Info("gathering machines", "machines", machines.String())
err := machines.Schedule(sip, r.Client)
if err != nil {
return err, machines
return machines, err
}
// we extract the information in a generic way
@ -179,14 +177,13 @@ func (r *SIPClusterReconciler) gatherVBMH(sip airshipv1.SIPCluster) (error, *air
// If there are some issues finnding information the vBMH
// Are flagged Unschedulable
// Loop and Try to find new vBMH to complete tge schedule
//fmt.Printf("gatherVBMH.Extrapolate sip:%v machines:%v\n", sip, machines)
if machines.Extrapolate(sip, r.Client) {
logger.Info("successfuly extrapolated machines")
logger.Info("successfully extrapolated machines")
break
}
}
return nil, machines
return machines, nil
}
func (r *SIPClusterReconciler) deployInfra(sip airshipv1.SIPCluster, machines *airshipvms.MachineList) error {
@ -217,20 +214,14 @@ func (r *SIPClusterReconciler) deployInfra(sip airshipv1.SIPCluster, machines *a
finish shoulld take care of any wrpa up tasks..
*/
func (r *SIPClusterReconciler) finish(sip airshipv1.SIPCluster, machines *airshipvms.MachineList) error {
// UnLabel the vBMH's
err := machines.ApplyLabels(sip, r.Client)
if err != nil {
return err
}
return nil
return machines.ApplyLabels(sip, r.Client)
}
/**
Deal with Deletion andd Finalizers if any is needed
Such as i'e what are we doing with the lables on teh vBMH's
Deal with Deletion and Finalizers if any is needed
Such as i'e what are we doing with the lables on the vBMH's
**/
func (r *SIPClusterReconciler) finalize(sip airshipv1.SIPCluster) error {
logger := r.Log.WithValues("SIPCluster", sip.GetNamespace()+"/"+sip.GetName())
@ -248,16 +239,19 @@ func (r *SIPClusterReconciler) finalize(sip airshipv1.SIPCluster) error {
}
}
// Clean Up common servicce stuff
airshipsvc.FinalizeCommon(sip, r.Client)
err := airshipsvc.FinalizeCommon(sip, r.Client)
if err != nil {
return err
}
// 1- Let me retrieve all vBMH mapped for this SIP Cluster
// 2- Let me now select the one's that meet teh scheduling criteria
// 2- Let me now select the one's that meet the scheduling criteria
// If I schedule successfully then
// If Not complete schedule , then throw an error.
machines := &airshipvms.MachineList{}
logger.Info("finalize sip machines", "machines", machines.String())
// Update the list of Machines.
err := machines.GetCluster(sip, r.Client)
err = machines.GetCluster(sip, r.Client)
if err != nil {
return err
}

View File

@ -36,7 +36,7 @@ import (
var _ = Describe("SIPCluster controller", func() {
Context("When it detects a new SIPCluster", func() {
It("Should schedule available nodes", func() {
By("Labelling nodes")
By("Labeling nodes")
// Create vBMH test objects
nodes := []string{"master", "master", "master", "worker", "worker", "worker", "worker"}
@ -74,7 +74,7 @@ var _ = Describe("SIPCluster controller", func() {
})
It("Should not schedule nodes when there is an insufficient number of available master nodes", func() {
By("Not labelling any nodes")
By("Not labeling any nodes")
// Create vBMH test objects
nodes := []string{"master", "master", "worker", "worker", "worker", "worker"}
@ -111,7 +111,7 @@ var _ = Describe("SIPCluster controller", func() {
})
It("Should not schedule nodes when there is an insufficient number of available worker nodes", func() {
By("Not labelling any nodes")
By("Not labeling any nodes")
// Create vBMH test objects
nodes := []string{"master", "master", "master", "worker", "worker"}

View File

@ -20,6 +20,8 @@ import (
"path/filepath"
"testing"
airshipv1 "sipcluster/pkg/api/v1"
metal3 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@ -31,7 +33,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
airshipv1 "sipcluster/pkg/api/v1"
// +kubebuilder:scaffold:imports
)

View File

@ -14,8 +14,6 @@
package services
import ()
// ErrAuthTypeNotSupported is returned when wrong AuthType is provided
type ErrInfraServiceNotSupported struct {
}

View File

@ -31,7 +31,7 @@ import (
// Init : prepares the Service
// Deploy : deploys the service
// Validate : will make sure that the deployment is successfull
// Validate : will make sure that the deployment is successful
type InfrastructureService interface {
//
Deploy(airshipv1.SIPCluster, *airshipvms.MachineList, client.Client) error
@ -49,10 +49,12 @@ func (s *Service) Deploy(sip airshipv1.SIPCluster, machines *airshipvms.MachineL
// do something, might decouple this a bit
// If the serviucces are defined as Helm Chart , then deploy might be simply
//. Lets make sure that teh namespace is in pace.
// Lets make sure that the namespace is in place.
// will be called the name of the cluster.
s.createNS(sip.Spec.Config.ClusterName, c)
// Take the data from teh appropriate Machines
if err := s.createNS(sip.Spec.Config.ClusterName, c); err != nil {
return err
}
// Take the data from the appropriate Machines
// Prepare the Config
fmt.Printf("Deploy Service:%v \n", s.serviceName)
return nil
@ -83,14 +85,12 @@ func (s *Service) createNS(serviceNamespaceName string, c client.Client) error {
}
return nil
}
func (s *Service) Validate() error {
// do something, might decouple this a bit
fmt.Printf("Validate Service:%v \n", s.serviceName)
return nil
}
func (s *Service) Finalize(sip airshipv1.SIPCluster, c client.Client) error {
@ -112,16 +112,16 @@ func FinalizeCommon(sip airshipv1.SIPCluster, c client.Client) error {
}
return nil
}
// Service Factory
func NewService(infraName airshipv1.InfraService, infraCfg airshipv1.InfraConfig) (InfrastructureService, error) {
if infraName == airshipv1.LoadBalancerService {
switch infraName {
case airshipv1.LoadBalancerService:
return newLoadBalancer(infraCfg), nil
} else if infraName == airshipv1.JumpHostService {
case airshipv1.JumpHostService:
return newJumpHost(infraCfg), nil
} else if infraName == airshipv1.AuthHostService {
case airshipv1.AuthHostService:
return newAuthHost(infraCfg), nil
}
return nil, ErrInfraServiceNotSupported{}

View File

@ -33,9 +33,15 @@ func newJumpHost(infraCfg airshipv1.InfraConfig) InfrastructureService {
/*
The SIP Cluster operator will manufacture a jump host pod specifically for this tenant cluster. Much like we did above for master nodes by extracting IP addresses, we would need to extract the `oam-ipv4` ip address for all nodes and create a configmap to bind mount into the pod so it understands what host IPs represent the clusters.
The SIP Cluster operator will manufacture a jump host pod specifically for this
tenant cluster. Much like we did above for master nodes by extracting IP
addresses, we would need to extract the `oam-ipv4` ip address for all nodes and
create a configmap to bind mount into the pod so it understands what host IPs
represent the clusters.
The expectation is the Jump Pod runs `sshd` protected by `uam` to allow operators to SSH directly to the Jump Pod and authenticate via UAM to immediately access their cluster.
The expectation is the Jump Pod runs `sshd` protected by `uam` to allow
operators to SSH directly to the Jump Pod and authenticate via UAM to
immediately access their cluster.
It will provide the following functionality over SSH:
@ -46,6 +52,8 @@ It will provide the following functionality over SSH:
- A kubectl binary and kubeconfig (cluster-admin) for the cluster
- SSH access to the cluster node VMs
- Libvirt console logs for the VMs
- We will secure libvirt with tls and provide keys to every jump host with curated interfaces to extract logs remotely for all VMs for their clusters.
- We will secure libvirt with tls and provide keys to every jump host
with curated interfaces to extract logs remotely for all VMs for their
clusters.
*/

View File

@ -30,21 +30,21 @@ func (l *LoadBalancer) Deploy(sip airshipv1.SIPCluster, machines *airshipvms.Mac
// do something, might decouple this a bit
// If the serviucces are defined as Helm Chart , then deploy might be simply
// Take the data from teh appropriate Machines
// Take the data from the appropriate Machines
// Prepare the Config
l.Service.Deploy(sip, machines, c)
err := l.Prepare(sip, machines, c)
err := l.Service.Deploy(sip, machines, c)
if err != nil {
return err
}
return nil
return l.Prepare(sip, machines, c)
}
func (l *LoadBalancer) Prepare(sip airshipv1.SIPCluster, machines *airshipvms.MachineList, c client.Client) error {
fmt.Printf("%s.Prepare machines:%s \n", l.Service.serviceName, machines)
for _, machine := range machines.Machines {
if machine.VMRole == airshipv1.VmMaster {
fmt.Printf("%s.Prepare for machine:%s ip is %s\n", l.Service.serviceName, machine, machine.Data.IpOnInterface[sip.Spec.InfraServices[l.Service.serviceName].NodeInterface])
if machine.VMRole == airshipv1.VMMaster {
ip := machine.Data.IPOnInterface[sip.Spec.InfraServices[l.Service.serviceName].NodeInterface]
fmt.Printf("%s.Prepare for machine:%s ip is %s\n", l.Service.serviceName, machine, ip)
}
}
return nil
@ -63,11 +63,20 @@ func newLoadBalancer(infraCfg airshipv1.InfraConfig) InfrastructureService {
:::warning
For the loadbalanced interface a **static asignment** via network data is required. For now, we will not support updates to this field without manual intervention. In other words, there is no expectation that the SIP operator watches `BareMetalHost` objects and reacts to changes in the future. The expectation would instead to re-deliver the `SIPCluster` object to force a no-op update to load balancer configuration is updated.
For the loadbalanced interface a **static asignment** via network data is
required. For now, we will not support updates to this field without manual
intervention. In other words, there is no expectation that the SIP operator
watches `BareMetalHost` objects and reacts to changes in the future. The
expectation would instead to re-deliver the `SIPCluster` object to force a
no-op update to load balancer configuration is updated.
:::
By extracting these IP address from the appropriate/defined interface for each master node, we can build our loadbalancer service endpoint list to feed to haproxy. In other words, the SIP Cluster will now manufacture an haproxy configuration file that directs traffic to all IP endpoints found above over port 6443. For example:
By extracting these IP address from the appropriate/defined interface for each
master node, we can build our loadbalancer service endpoint list to feed to
haproxy. In other words, the SIP Cluster will now manufacture an haproxy
configuration file that directs traffic to all IP endpoints found above over
port 6443. For example:
``` gotpl
@ -93,10 +102,14 @@ backend kube-apiservers
{% end %}
```
This will be saved as a configmap and mounted into the cluster specific haproxy daemonset across all undercloud control nodes.
This will be saved as a configmap and mounted into the cluster specific haproxy
daemonset across all undercloud control nodes.
We will then create a Kubernetes NodePort `Service` that will direct traffic on the infrastructure `nodePort` defined in the SIP Cluster definition to these haproxy workloads.
We will then create a Kubernetes NodePort `Service` that will direct traffic on
the infrastructure `nodePort` defined in the SIP Cluster definition to these
haproxy workloads.
At this point, the SIP Cluster controller can now label the VMs appropriately so they'll be scheduled by the Cluster-API process.
At this point, the SIP Cluster controller can now label the VMs appropriately
so they'll be scheduled by the Cluster-API process.
*/

View File

@ -14,23 +14,25 @@ func (e ErrorConstraintNotFound) Error() string {
}
type ErrorUnableToFullySchedule struct {
TargetNode airshipv1.VmRoles
TargetNode airshipv1.VMRoles
TargetFlavor string
}
func (e ErrorUnableToFullySchedule) Error() string {
return fmt.Sprintf("Unable to complete a schedule with a target of %v nodes, with a flavor of %v ", e.TargetNode, e.TargetFlavor)
return fmt.Sprintf("Unable to complete a schedule with a target of %v nodes, with a flavor of %v",
e.TargetNode, e.TargetFlavor)
}
type ErrorHostIpNotFound struct {
type ErrorHostIPNotFound struct {
HostName string
ServiceName airshipv1.InfraService
IPInterface string
Message string
}
func (e ErrorHostIpNotFound) Error() string {
return fmt.Sprintf("Unable to identify the vBMH Host %v IP address on interface %v required by Infrastructure Service %v %s ", e.HostName, e.IPInterface, e.ServiceName, e.Message)
func (e ErrorHostIPNotFound) Error() string {
return fmt.Sprintf("Unable to identify the vBMH Host %v IP address on interface %v required by "+
"Infrastructure Service %v %s ", e.HostName, e.IPInterface, e.ServiceName, e.Message)
}
// ErrorUknownSpreadTopology is returned when wrong AuthType is provided

View File

@ -52,7 +52,7 @@ const (
// UnableToSchedule, This BMH has something wrong with it
// The BMH itself doesnt depict the error situation
// i.e. teh NetworkData is missing something
// i.e. the NetworkData is missing something
UnableToSchedule ScheduledState = "UnableToSchedule"
)
@ -78,7 +78,7 @@ const (
SipNodeTypeLabel = BaseAirshipSelector + "/" + SipNodeTypeLabelName
)
// MAchine represents an individual BMH CR, and teh appropriate
// MAchine represents an individual BMH CR, and the appropriate
// attributes required to manage the SIP Cluster scheduling and
// rocesing needs about thhem
type Machine struct {
@ -87,23 +87,25 @@ type Machine struct {
// scheduleLabels
// I expect to build this over time / if not might not be needed
ScheduleLabels map[string]string
VMRole airshipv1.VmRoles
VMRole airshipv1.VMRoles
// Data will contain whatever information is needed from the server
// IF it ends up een just the IP then maybe we can collapse into a field
Data *MachineData
}
func (m *Machine) String() string {
return fmt.Sprintf("Machine {\n\tBmh:%s\n\tScheduleStatus:%s\n\tVmRole:%v\n}\n", m.BMH.ObjectMeta.Name, m.ScheduleStatus, m.VMRole)
// TODO(howell): cleanup this manual marshaling
return fmt.Sprintf("Machine {\n\tBmh:%s\n\tScheduleStatus:%s\n\tVMRole:%v\n}\n",
m.BMH.ObjectMeta.Name, m.ScheduleStatus, m.VMRole)
}
func NewMachine(bmh metal3.BareMetalHost, nodeRole airshipv1.VmRoles, schedState ScheduledState) (m *Machine) {
func NewMachine(bmh metal3.BareMetalHost, nodeRole airshipv1.VMRoles, schedState ScheduledState) (m *Machine) {
return &Machine{
BMH: bmh,
ScheduleStatus: schedState,
VMRole: nodeRole,
Data: &MachineData{
IpOnInterface: make(map[string]string),
IPOnInterface: make(map[string]string),
},
}
}
@ -111,7 +113,7 @@ func NewMachine(bmh metal3.BareMetalHost, nodeRole airshipv1.VmRoles, schedState
type MachineData struct {
// Collect all IP's for the interfaces defined
// In the list of Services
IpOnInterface map[string]string
IPOnInterface map[string]string
}
// MachineList contains the list of Scheduled or ToBeScheduled machines
@ -120,12 +122,11 @@ type MachineList struct {
// ViNO Machines
Machines map[string]*Machine
// Keep track of how many we have mark for scheduled.
ReadyForScheduleCount map[airshipv1.VmRoles]int
ReadyForScheduleCount map[airshipv1.VMRoles]int
Log logr.Logger
}
func (ml *MachineList) hasMachine(bmh metal3.BareMetalHost) bool {
if &bmh == nil {
return false
}
@ -133,13 +134,11 @@ func (ml *MachineList) hasMachine(bmh metal3.BareMetalHost) bool {
}
func (ml *MachineList) String() string {
// TODO(howell): This output probably isn't formatted properly
var sb strings.Builder
for mName, machine := range ml.Machines {
sb.WriteString("[" + mName + "]:" + machine.String())
}
return sb.String()
}
@ -169,17 +168,17 @@ func (ml *MachineList) Schedule(sip airshipv1.SIPCluster, c client.Client) error
return nil
}
func (ml *MachineList) init(nodes map[airshipv1.VmRoles]airshipv1.NodeSet) {
func (ml *MachineList) init(nodes map[airshipv1.VMRoles]airshipv1.NodeSet) {
// Only Initialize 1st time
if len(ml.Machines) == 0 {
mlSize := 0
mlNodeTypes := 0
for _, nodeCfg := range nodes {
mlSize = mlSize + nodeCfg.Count.Active + nodeCfg.Count.Standby
mlNodeTypes = mlNodeTypes + 1
mlNodeTypes++
}
//fmt.Printf("Schedule.init mlSize:%d\n", mlSize)
ml.ReadyForScheduleCount = make(map[airshipv1.VmRoles]int, mlNodeTypes)
fmt.Printf("Schedule.init mlSize:%d\n", mlSize)
ml.ReadyForScheduleCount = make(map[airshipv1.VMRoles]int, mlNodeTypes)
ml.Machines = make(map[string]*Machine, 0)
}
}
@ -200,7 +199,7 @@ func (ml *MachineList) getBMHs(c client.Client) (*metal3.BareMetalHostList, erro
logger.Info("Getting all available BaremetalHosts that are not scheduled")
err := c.List(context.Background(), bmhList, client.MatchingLabels(scheduleLabels))
if err != nil {
logger.Info("Recieved an error while getting BaremetalHost list", "error", err.Error())
logger.Info("Received an error while getting BaremetalHost list", "error", err.Error())
return bmhList, err
}
logger.Info("Got a list of hosts", "BaremetalHostCount", len(bmhList.Items))
@ -210,7 +209,8 @@ func (ml *MachineList) getBMHs(c client.Client) (*metal3.BareMetalHostList, erro
return bmhList, fmt.Errorf("Unable to identify vBMH available for scheduling. Selecting %v ", scheduleLabels)
}
func (ml *MachineList) identifyNodes(sip airshipv1.SIPCluster, bmhList *metal3.BareMetalHostList, c client.Client) error {
func (ml *MachineList) identifyNodes(sip airshipv1.SIPCluster,
bmhList *metal3.BareMetalHostList, c client.Client) error {
logger := ml.Log.WithValues("SIPCluster", ml.NamespacedName)
// If using the SIP Sheduled label, we now have a list of vBMH;'s
// that are not scheduled
@ -220,10 +220,10 @@ func (ml *MachineList) identifyNodes(sip airshipv1.SIPCluster, bmhList *metal3.B
// Only deals with AntiAffinity at :
// - Racks : Dont select two machines in the same rack
// - Server : Dont select two machines in the same server
logger.Info("Trying to identifiy BaremetalHosts that match scheduling parameters",
logger.Info("Trying to identify BaremetalHosts that match scheduling parameters",
"initial BMH count", len(bmhList.Items))
for nodeRole, nodeCfg := range sip.Spec.Nodes {
logger := logger.WithValues("role", nodeRole)
logger := logger.WithValues("role", nodeRole) //nolint:govet
ml.ReadyForScheduleCount[nodeRole] = 0
logger.Info("Getting host constraints")
scheduleSetMap, err := ml.initScheduleMaps(nodeRole, nodeCfg.Scheduling)
@ -239,7 +239,8 @@ func (ml *MachineList) identifyNodes(sip airshipv1.SIPCluster, bmhList *metal3.B
return nil
}
func (ml *MachineList) initScheduleMaps(role airshipv1.VmRoles, constraint airshipv1.SpreadTopology) (*ScheduleSet, error) {
func (ml *MachineList) initScheduleMaps(role airshipv1.VMRoles,
constraint airshipv1.SpreadTopology) (*ScheduleSet, error) {
logger := ml.Log.WithValues("SIPCluster", ml.NamespacedName, "role", role, "spread topology", constraint)
var labelName string
switch constraint {
@ -258,10 +259,10 @@ func (ml *MachineList) initScheduleMaps(role airshipv1.VmRoles, constraint airsh
set: make(map[string]bool),
labelName: labelName,
}, nil
}
func (ml *MachineList) countScheduledAndTobeScheduled(nodeRole airshipv1.VmRoles, c client.Client, sipCfg *airshipv1.SipConfig) int {
func (ml *MachineList) countScheduledAndTobeScheduled(nodeRole airshipv1.VMRoles,
c client.Client, sipCfg *airshipv1.SipConfig) int {
bmhList := &metal3.BareMetalHostList{}
scheduleLabels := map[string]string{
@ -282,14 +283,14 @@ func (ml *MachineList) countScheduledAndTobeScheduled(nodeRole airshipv1.VmRoles
// With what is already there.
logger.Info("Got already scheduled BaremetalHosts from kubernetes", "count", len(bmhList.Items))
for _, bmh := range bmhList.Items {
logger := logger.WithValues("BMH name", bmh.GetName())
logger := logger.WithValues("BMH name", bmh.GetName()) //nolint:govet
readyScheduled := !ml.hasMachine(bmh)
logger.Info("Checking if BMH is already marked to be scheduled", "ready to be scheduled", readyScheduled)
if readyScheduled {
logger.Info("BMH host is not yet marked as ready to be scheduled, marking it as ready to be scheduled")
// Add it to the list.
ml.Machines[bmh.ObjectMeta.Name] = NewMachine(bmh, nodeRole, Scheduled)
ml.ReadyForScheduleCount[nodeRole] = ml.ReadyForScheduleCount[nodeRole] + 1
ml.ReadyForScheduleCount[nodeRole]++
}
}
// ReadyForScheduleCount should include:
@ -299,8 +300,9 @@ func (ml *MachineList) countScheduledAndTobeScheduled(nodeRole airshipv1.VmRoles
return ml.ReadyForScheduleCount[nodeRole]
}
func (ml *MachineList) scheduleIt(nodeRole airshipv1.VmRoles, nodeCfg airshipv1.NodeSet, bmList *metal3.BareMetalHostList,
scheduleSet *ScheduleSet, c client.Client, sipCfg *airshipv1.SipConfig) error {
func (ml *MachineList) scheduleIt(nodeRole airshipv1.VMRoles, nodeCfg airshipv1.NodeSet,
bmList *metal3.BareMetalHostList, scheduleSet *ScheduleSet,
c client.Client, sipCfg *airshipv1.SipConfig) error {
logger := ml.Log.WithValues("SIPCluster", ml.NamespacedName, "role", nodeRole)
validBmh := true
// Count the expectations stated in the CR
@ -314,19 +316,19 @@ func (ml *MachineList) scheduleIt(nodeRole airshipv1.VmRoles, nodeCfg airshipv1.
if nodeTarget == 0 {
return nil
}
logger.Info("Checking list of BMH initialy received as not scheduled anywhere yet")
logger.Info("Checking list of BMH initially received as not scheduled anywhere yet")
for _, bmh := range bmList.Items {
logger := logger.WithValues("BaremetalHost Name", bmh.GetName())
logger := logger.WithValues("BaremetalHost Name", bmh.GetName()) //nolint:govet
if !ml.hasMachine(bmh) {
logger.Info("BaremetalHost not yet marked as ready to be scheduled")
constraint := nodeCfg.Scheduling
// Do I care about this constraint
logger := logger.WithValues("constraint", constraint)
logger := logger.WithValues("constraint", constraint) //nolint:govet
if scheduleSet.Active() {
logger.Info("constraint is active")
// Check if bmh has the label
bmhConstraintCondition, flavorMatch := scheduleSet.GetLabels(bmh.Labels, nodeCfg.VmFlavor)
bmhConstraintCondition, flavorMatch := scheduleSet.GetLabels(bmh.Labels, nodeCfg.VMFlavor)
logger.Info("Checked BMH constraint condition and flavor match",
"constraint condition", bmhConstraintCondition,
"flavor match", flavorMatch)
@ -336,7 +338,6 @@ func (ml *MachineList) scheduleIt(nodeRole airshipv1.VmRoles, nodeCfg airshipv1.
// If its in the list already for the constraint , theen this bmh is disqualified. Skip it
if scheduleSet.Exists(bmhConstraintCondition) {
logger.Info("Constraint slot is alrady taken some BMH from this constraint is already allocated, skipping it")
validBmh = false
break
} else {
scheduleSet.Add(bmhConstraintCondition)
@ -348,9 +349,11 @@ func (ml *MachineList) scheduleIt(nodeRole airshipv1.VmRoles, nodeCfg airshipv1.
if validBmh {
// Lets add it to the list as a schedulable thing
ml.Machines[bmh.ObjectMeta.Name] = NewMachine(bmh, nodeRole, ToBeScheduled)
ml.ReadyForScheduleCount[nodeRole] = ml.ReadyForScheduleCount[nodeRole] + 1
// TODO Probable should remove the bmh from the list so if there are other node targets they dont even take it into account
nodeTarget = nodeTarget - 1
ml.ReadyForScheduleCount[nodeRole]++
// TODO Probable should remove the bmh from the
// list so if there are other node targets they
// dont even take it into account
nodeTarget--
logger.Info("Marked node as ready to be scheduled", "BMH count to be scheduled", nodeTarget)
if nodeTarget == 0 {
break
@ -361,14 +364,13 @@ func (ml *MachineList) scheduleIt(nodeRole airshipv1.VmRoles, nodeCfg airshipv1.
// ...
validBmh = true
}
}
if nodeTarget > 0 {
logger.Info("Failed to get enough BMHs to complete scheduling")
return ErrorUnableToFullySchedule{
TargetNode: nodeRole,
TargetFlavor: nodeCfg.VmFlavor,
TargetFlavor: nodeCfg.VMFlavor,
}
}
return nil
@ -380,17 +382,19 @@ func (ml *MachineList) Extrapolate(sip airshipv1.SIPCluster, c client.Client) bo
extrapolateSuccess := true
fmt.Printf("Schedule.Extrapolate ml.Vbmhs:%d\n", len(ml.Machines))
for _, machine := range ml.Machines {
fmt.Printf("Schedule.Extrapolate machine.Data.IpOnInterface len:%d machine:%v \n", len(machine.Data.IpOnInterface), machine)
fmt.Printf("Schedule.Extrapolate machine.Data.IPOnInterface len:%d machine:%v\n",
len(machine.Data.IPOnInterface), machine)
// Skip if I alread extrapolated tehh data for this machine
if len(machine.Data.IpOnInterface) > 0 {
if len(machine.Data.IPOnInterface) > 0 {
continue
}
bmh := machine.BMH
// Identify Network Data Secret name
networkDataSecret := &corev1.Secret{}
//fmt.Printf("Schedule.Extrapolate Namespace:%s Name:%s\n", bmh.Spec.NetworkData.Namespace, bmh.Spec.NetworkData.Name)
fmt.Printf("Schedule.Extrapolate Namespace:%s Name:%s\n", bmh.Spec.NetworkData.Namespace,
bmh.Spec.NetworkData.Name)
// c is a created client.
err := c.Get(context.Background(), client.ObjectKey{
Namespace: bmh.Spec.NetworkData.Namespace,
@ -398,21 +402,20 @@ func (ml *MachineList) Extrapolate(sip airshipv1.SIPCluster, c client.Client) bo
}, networkDataSecret)
if err != nil {
machine.ScheduleStatus = UnableToSchedule
ml.ReadyForScheduleCount[machine.VMRole] = ml.ReadyForScheduleCount[machine.VMRole] - 1
ml.ReadyForScheduleCount[machine.VMRole]--
extrapolateSuccess = false
}
//fmt.Printf("Schedule.Extrapolate networkDataSecret:%v\n", networkDataSecret)
// Assuming there might be other data
// Retrieve IP's for Service defined Network Interfaces
err = ml.getIp(machine, networkDataSecret, sip.Spec.InfraServices)
err = ml.getIP(machine, networkDataSecret, sip.Spec.InfraServices)
if err != nil {
// Lets mark the machine as NotScheduleable.
// Update the count of what I have found so far,
machine.ScheduleStatus = UnableToSchedule
ml.ReadyForScheduleCount[machine.VMRole] = ml.ReadyForScheduleCount[machine.VMRole] - 1
ml.ReadyForScheduleCount[machine.VMRole]--
extrapolateSuccess = false
}
}
fmt.Printf("Schedule.Extrapolate extrapolateSuccess:%t\n", extrapolateSuccess)
return extrapolateSuccess
@ -566,36 +569,40 @@ func (ml *MachineList) Extrapolate(sip airshipv1.SIPCluster, c client.Client) bo
}
***/
func (ml *MachineList) getIp(machine *Machine, networkDataSecret *corev1.Secret, infraServices map[airshipv1.InfraService]airshipv1.InfraConfig) error {
func (ml *MachineList) getIP(machine *Machine, networkDataSecret *corev1.Secret,
infraServices map[airshipv1.InfraService]airshipv1.InfraConfig) error {
var secretData interface{}
// Now I have the Secret
// Lets find the IP's for all Interfaces defined in Cfg
foundIp := false
foundIP := false
for svcName, svcCfg := range infraServices {
// Did I already find teh IP for these interface
if machine.Data.IpOnInterface[svcCfg.NodeInterface] == "" {
json.Unmarshal(networkDataSecret.Data["networkData"], &secretData)
//fmt.Printf("Schedule.Extrapolate.getIp secretData:%v\n", secretData)
// Did I already find the IP for these interface
if machine.Data.IPOnInterface[svcCfg.NodeInterface] == "" {
err := json.Unmarshal(networkDataSecret.Data["networkData"], &secretData)
if err != nil {
return err
}
fmt.Printf("Schedule.Extrapolate.getIP secretData:%v\n", secretData)
queryFilter := fmt.Sprintf("$..networks[? (@.id==\"%s\")].ip_address", svcCfg.NodeInterface)
//fmt.Printf("Schedule.Extrapolate.getIp queryFilter:%v\n", queryFilter)
ip_address, err := jsonpath.Get(queryFilter, secretData)
fmt.Printf("Schedule.Extrapolate.getIP queryFilter:%v\n", queryFilter)
ipAddress, err := jsonpath.Get(queryFilter, secretData)
if err == nil {
foundIp = true
for _, value := range ip_address.([]interface{}) {
machine.Data.IpOnInterface[svcCfg.NodeInterface] = value.(string)
foundIP = true
for _, value := range ipAddress.([]interface{}) {
machine.Data.IPOnInterface[svcCfg.NodeInterface] = value.(string) //nolint:errcheck
}
}
// Skip if error
// Should signal that I need to exclude this machine
// Which also means I am now short potentially.
fmt.Printf("Schedule.Extrapolate.getIp machine.Data.IpOnInterface[%s]:%v\n", svcCfg.NodeInterface, machine.Data.IpOnInterface[svcCfg.NodeInterface])
fmt.Printf("Schedule.Extrapolate.getIP machine.Data.IpOnInterface[%s]:%v\n",
svcCfg.NodeInterface, machine.Data.IPOnInterface[svcCfg.NodeInterface])
}
if !foundIp {
return &ErrorHostIpNotFound{
if !foundIP {
return &ErrorHostIPNotFound{
HostName: machine.BMH.ObjectMeta.Name,
ServiceName: svcName,
IPInterface: svcCfg.NodeInterface,
@ -612,7 +619,7 @@ func (ml *MachineList) getIp(machine *Machine, networkDataSecret *corev1.Secret,
type ScheduleSet struct {
// Defines if this set is actually active
active bool
// Holds list of elements in teh Set
// Holds list of elements in the Set
set map[string]bool
// Holds the label name that identifies the constraint
labelName string
@ -632,7 +639,7 @@ func (ss *ScheduleSet) Add(labelValue string) {
ss.set[labelValue] = true
}
func (ss *ScheduleSet) GetLabels(labels map[string]string, flavorLabel string) (string, bool) {
//fmt.Printf("Schedule.scheduleIt.GetLabels labels:%v, flavorLabel:%s\n", labels, flavorLabel)
fmt.Printf("Schedule.scheduleIt.GetLabels labels:%v, flavorLabel:%s\n", labels, flavorLabel)
if labels == nil {
return "", false
}
@ -646,12 +653,11 @@ func (ss *ScheduleSet) GetLabels(labels map[string]string, flavorLabel string) (
}
/*
ApplyLabel : marks the appropriate machine labels to teh vBMH's that
ApplyLabel : marks the appropriate machine labels to the vBMH's that
have benn selected by the scheduling.
This is done only after the Infrastcuture Services have been deployed
*/
func (ml *MachineList) ApplyLabels(sip airshipv1.SIPCluster, c client.Client) error {
fmt.Printf("ApplyLabels %s size:%d\n", ml.String(), len(ml.Machines))
for _, machine := range ml.Machines {
// Only Add LAbels to Machines that are not amrked to be scheduled
@ -677,10 +683,8 @@ func (ml *MachineList) ApplyLabels(sip airshipv1.SIPCluster, c client.Client) er
// RemoveLabels removes sip related labels
func (ml *MachineList) RemoveLabels(sip airshipv1.SIPCluster, c client.Client) error {
fmt.Printf("ApplyLabels %s size:%d\n", ml.String(), len(ml.Machines))
for _, machine := range ml.Machines {
bmh := &machine.BMH
fmt.Printf("RemoveLabels bmh.ObjectMeta.Name:%s\n", bmh.ObjectMeta.Name)
delete(bmh.Labels, SipClusterLabel)
@ -700,8 +704,7 @@ func (ml *MachineList) RemoveLabels(sip airshipv1.SIPCluster, c client.Client) e
}
func (ml *MachineList) GetCluster(sip airshipv1.SIPCluster, c client.Client) error {
// Initialize teh Target list
// Initialize the Target list
ml.init(sip.Spec.Nodes)
bmhList := &metal3.BareMetalHostList{}
@ -719,9 +722,9 @@ func (ml *MachineList) GetCluster(sip airshipv1.SIPCluster, c client.Client) err
ml.Machines[bmh.ObjectMeta.Name] = &Machine{
BMH: bmh,
ScheduleStatus: Scheduled,
VMRole: airshipv1.VmRoles(bmh.Labels[SipNodeTypeLabel]),
VMRole: airshipv1.VMRoles(bmh.Labels[SipNodeTypeLabel]),
Data: &MachineData{
IpOnInterface: make(map[string]string),
IPOnInterface: make(map[string]string),
},
}
}

View File

@ -25,7 +25,7 @@ var _ = Describe("MachineList", func() {
nodes := map[string]*Machine{}
for n := 0; n < numNodes; n++ {
bmh, _ := testutil.CreateBMH(n, "default", "master", 6)
nodes[bmh.Name] = NewMachine(*bmh, airshipv1.VmMaster, NotScheduled)
nodes[bmh.Name] = NewMachine(*bmh, airshipv1.VMMaster, NotScheduled)
}
machineList = &MachineList{
@ -105,19 +105,19 @@ var _ = Describe("MachineList", func() {
Namespace: "default",
},
Machines: map[string]*Machine{
bmh.Name: NewMachine(*bmh, airshipv1.VmMaster, NotScheduled),
bmh.Name: NewMachine(*bmh, airshipv1.VMMaster, NotScheduled),
},
Log: ctrl.Log.WithName("controllers").WithName("SIPCluster"),
Log: ctrl.Log.WithName("controllers").WithName("SIPCluster"),
}
sipCluster := testutil.CreateSIPCluster("subcluster-1", "default", 1, 3)
sipCluster.Spec.InfraServices = map[airshipv1.InfraService]airshipv1.InfraConfig{
airshipv1.LoadBalancerService: airshipv1.InfraConfig{
airshipv1.LoadBalancerService: {
Image: "haproxy:latest",
NodeLabels: map[string]string{
"test": "true",
},
NodePorts: []int{7000, 7001, 7002},
NodePorts: []int{7000, 7001, 7002},
NodeInterface: "oam-ipv4",
},
}
@ -125,7 +125,7 @@ var _ = Describe("MachineList", func() {
Expect(ml.Extrapolate(*sipCluster, k8sClient)).To(BeTrue())
// NOTE(drewwalters96): Interface data is b64 encoded in the testutil convenience function.
Expect(ml.Machines[bmh.Name].Data.IpOnInterface).To(Equal(map[string]string{"oam-ipv4": "32.68.51.139"}))
Expect(ml.Machines[bmh.Name].Data.IPOnInterface).To(Equal(map[string]string{"oam-ipv4": "32.68.51.139"}))
})
It("Should not retrieve the BMH IP from the BMH's NetworkData secret if no infraServices are defined", func() {
@ -142,23 +142,22 @@ var _ = Describe("MachineList", func() {
Namespace: "default",
},
Machines: map[string]*Machine{
bmh.Name: NewMachine(*bmh, airshipv1.VmMaster, NotScheduled),
bmh.Name: NewMachine(*bmh, airshipv1.VMMaster, NotScheduled),
},
Log: ctrl.Log.WithName("controllers").WithName("SIPCluster"),
Log: ctrl.Log.WithName("controllers").WithName("SIPCluster"),
}
k8sClient := mockClient.NewFakeClient(objs...)
sipCluster := testutil.CreateSIPCluster("subcluster-1", "default", 1, 3)
Expect(ml.Extrapolate(*sipCluster, k8sClient)).To(BeTrue())
Expect(len(ml.Machines[bmh.Name].Data.IpOnInterface)).To(Equal(0))
Expect(len(ml.Machines[bmh.Name].Data.IPOnInterface)).To(Equal(0))
})
It("Should not retrieve the BMH IP if it has been previously extrapolated", func() {
// Store an IP address for each machine
var objs []runtime.Object
for _, machine := range machineList.Machines {
machine.Data.IpOnInterface = map[string]string{
machine.Data.IPOnInterface = map[string]string{
"oam-ipv4": "32.68.51.139",
}
objs = append(objs, &machine.BMH)

View File

@ -163,7 +163,7 @@ const (
}`
)
// CreateBMH initializes a BaremetalHost with specific parameteres for use in test cases.
// CreateBMH initializes a BaremetalHost with specific parameters for use in test cases.
func CreateBMH(node int, namespace string, role string, rack int) (*metal3.BareMetalHost, *corev1.Secret) {
rackLabel := fmt.Sprintf("r%d", rack)
networkDataName := fmt.Sprintf("node%d-network-data", node)
@ -211,19 +211,19 @@ func CreateSIPCluster(name string, namespace string, masters int, workers int) *
Config: &airshipv1.SipConfig{
ClusterName: name,
},
Nodes: map[airshipv1.VmRoles]airshipv1.NodeSet{
airshipv1.VmMaster: airshipv1.NodeSet{
VmFlavor: "airshipit.org/vino-flavor=master",
Nodes: map[airshipv1.VMRoles]airshipv1.NodeSet{
airshipv1.VMMaster: {
VMFlavor: "airshipit.org/vino-flavor=master",
Scheduling: airshipv1.ServerAntiAffinity,
Count: &airshipv1.VmCount{
Count: &airshipv1.VMCount{
Active: masters,
Standby: 0,
},
},
airshipv1.VmWorker: airshipv1.NodeSet{
VmFlavor: "airshipit.org/vino-flavor=worker",
airshipv1.VMWorker: {
VMFlavor: "airshipit.org/vino-flavor=worker",
Scheduling: airshipv1.ServerAntiAffinity,
Count: &airshipv1.VmCount{
Count: &airshipv1.VMCount{
Active: workers,
Standby: 0,
},