Add service controller
- Add a service controller in stackube and create lbaas v2 pools for new services, also add members for endpoints. - Fix getting network for system namespaces. Change-Id: I7942a2d26dd33b4ceb75ec51c03933205a60aea7 Implements: blueprint service-loadbalancer Signed-off-by: Pengfei Ni <feiskyer@gmail.com>
This commit is contained in:
parent
1b3a7cf0ae
commit
c847f5e5a5
@ -11,12 +11,15 @@ import (
|
||||
"git.openstack.org/openstack/stackube/pkg/auth-controller/tenant"
|
||||
"git.openstack.org/openstack/stackube/pkg/network-controller"
|
||||
"git.openstack.org/openstack/stackube/pkg/openstack"
|
||||
"git.openstack.org/openstack/stackube/pkg/service-controller"
|
||||
"git.openstack.org/openstack/stackube/pkg/util"
|
||||
|
||||
extclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/pflag"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -28,26 +31,28 @@ var (
|
||||
userGateway = pflag.String("user-gateway", "10.244.0.1", "user Pod network gateway")
|
||||
)
|
||||
|
||||
func startControllers(kubeconfig, cloudconfig string) error {
|
||||
func startControllers(kubeClient *kubernetes.Clientset,
|
||||
osClient *openstack.Client, kubeExtClient *extclientset.Clientset) error {
|
||||
// Creates a new Tenant controller
|
||||
tc, err := tenant.NewTenantController(kubeconfig, cloudconfig)
|
||||
tenantController, err := tenant.NewTenantController(kubeClient, osClient, kubeExtClient)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Creates a new Network controller
|
||||
nc, err := network.NewNetworkController(
|
||||
kubeconfig, cloudconfig)
|
||||
networkController, err := network.NewNetworkController(osClient, kubeExtClient)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Creates a new RBAC controller
|
||||
rm, err := rbacmanager.NewRBACController(kubeconfig,
|
||||
tc.GetKubeCRDClient(),
|
||||
*userCIDR,
|
||||
*userGateway,
|
||||
)
|
||||
rbacController, err := rbacmanager.NewRBACController(kubeClient, osClient.CRDClient, *userCIDR, *userGateway)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Creates a new service controller
|
||||
serviceController, err := service.NewServiceController(kubeClient, osClient)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -56,11 +61,14 @@ func startControllers(kubeconfig, cloudconfig string) error {
|
||||
wg, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
// start auth controllers in stackube
|
||||
wg.Go(func() error { return tc.Run(ctx.Done()) })
|
||||
wg.Go(func() error { return rm.Run(ctx.Done()) })
|
||||
wg.Go(func() error { return tenantController.Run(ctx.Done()) })
|
||||
wg.Go(func() error { return rbacController.Run(ctx.Done()) })
|
||||
|
||||
// start network controller
|
||||
wg.Go(func() error { return nc.Run(ctx.Done()) })
|
||||
wg.Go(func() error { return networkController.Run(ctx.Done()) })
|
||||
|
||||
// start service controller
|
||||
wg.Go(func() error { return serviceController.Run(ctx.Done()) })
|
||||
|
||||
term := make(chan os.Signal)
|
||||
signal.Notify(term, os.Interrupt, syscall.SIGTERM)
|
||||
@ -80,23 +88,28 @@ func startControllers(kubeconfig, cloudconfig string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func verifyClientSetting() error {
|
||||
func initClients() (*kubernetes.Clientset, *openstack.Client, *extclientset.Clientset, error) {
|
||||
// Create kubernetes client config. Use kubeconfig if given, otherwise assume in-cluster.
|
||||
config, err := util.NewClusterConfig(*kubeconfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Init kubernetes cluster failed: %v", err)
|
||||
return nil, nil, nil, fmt.Errorf("failed to build kubeconfig: %v", err)
|
||||
}
|
||||
|
||||
_, err = kubernetes.NewForConfig(config)
|
||||
kubeClient, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Init kubernetes clientset failed: %v", err)
|
||||
return nil, nil, nil, fmt.Errorf("failed to create kubernetes clientset: %v", err)
|
||||
}
|
||||
|
||||
_, err = openstack.NewClient(*cloudconfig, *kubeconfig)
|
||||
kubeExtClient, err := extclientset.NewForConfig(config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Init openstack client failed: %v", err)
|
||||
return nil, nil, nil, fmt.Errorf("failed to create kubernetes apiextensions clientset: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
// Create OpenStack client from config file.
|
||||
osClient, err := openstack.NewClient(*cloudconfig, *kubeconfig)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("could't initialize openstack client: %v", err)
|
||||
}
|
||||
|
||||
return kubeClient, osClient, kubeExtClient, nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
@ -104,14 +117,14 @@ func main() {
|
||||
util.InitLogs()
|
||||
defer util.FlushLogs()
|
||||
|
||||
// Verify client setting at the beginning and fail early if there are errors.
|
||||
err := verifyClientSetting()
|
||||
// Initilize kubernetes and openstack clients.
|
||||
kubeClient, osClient, kubeExtClient, err := initClients()
|
||||
if err != nil {
|
||||
glog.Fatal(err)
|
||||
}
|
||||
|
||||
// Start stackube controllers.
|
||||
if err := startControllers(*kubeconfig, *cloudconfig); err != nil {
|
||||
if err := startControllers(kubeClient, osClient, kubeExtClient); err != nil {
|
||||
glog.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
@ -34,22 +34,10 @@ type Controller struct {
|
||||
}
|
||||
|
||||
// New creates a new RBAC controller.
|
||||
func NewRBACController(kubeconfig string,
|
||||
kubeCRDClient *crdClient.CRDClient,
|
||||
userCIDR string,
|
||||
userGateway string,
|
||||
) (*Controller, error) {
|
||||
cfg, err := util.NewClusterConfig(kubeconfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("init cluster config failed: %v", err)
|
||||
}
|
||||
client, err := kubernetes.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("init kubernetes client failed: %v", err)
|
||||
}
|
||||
|
||||
func NewRBACController(kubeClient *kubernetes.Clientset, kubeCRDClient *crdClient.CRDClient, userCIDR string,
|
||||
userGateway string) (*Controller, error) {
|
||||
o := &Controller{
|
||||
kclient: client,
|
||||
kclient: kubeClient,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "rbacmanager"),
|
||||
kubeCRDClient: kubeCRDClient,
|
||||
userCIDR: userCIDR,
|
||||
|
@ -7,7 +7,6 @@ import (
|
||||
crdClient "git.openstack.org/openstack/stackube/pkg/kubecrd"
|
||||
"git.openstack.org/openstack/stackube/pkg/openstack"
|
||||
|
||||
"git.openstack.org/openstack/stackube/pkg/util"
|
||||
"github.com/golang/glog"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
@ -27,38 +26,19 @@ type TenantController struct {
|
||||
}
|
||||
|
||||
// NewTenantController creates a new tenant controller.
|
||||
func NewTenantController(kubeconfig, cloudconfig string) (*TenantController, error) {
|
||||
// Create the client config. Use kubeconfig if given, otherwise assume in-cluster.
|
||||
config, err := util.NewClusterConfig(kubeconfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to build kubeconfig: %v", err)
|
||||
}
|
||||
clientset, err := apiextensionsclient.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create kubeclient from config: %v", err)
|
||||
}
|
||||
|
||||
func NewTenantController(kubeClient *kubernetes.Clientset,
|
||||
osClient *openstack.Client,
|
||||
kubeExtClient *apiextensionsclient.Clientset) (*TenantController, error) {
|
||||
// initialize CRD if it does not exist
|
||||
_, err = crdClient.CreateTenantCRD(clientset)
|
||||
_, err := crdClient.CreateTenantCRD(kubeExtClient)
|
||||
if err != nil && !apierrors.IsAlreadyExists(err) {
|
||||
return nil, fmt.Errorf("failed to create CRD to kube-apiserver: %v", err)
|
||||
}
|
||||
|
||||
k8sClient, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create kubernetes client: %v", err)
|
||||
}
|
||||
|
||||
// Create OpenStack client from config
|
||||
openStackClient, err := openstack.NewClient(cloudconfig, kubeconfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("init openstack client failed: %v", err)
|
||||
}
|
||||
|
||||
c := &TenantController{
|
||||
kubeCRDClient: openStackClient.CRDClient,
|
||||
k8sClient: k8sClient,
|
||||
openstackClient: openStackClient,
|
||||
kubeCRDClient: osClient.CRDClient,
|
||||
k8sClient: kubeClient,
|
||||
openstackClient: osClient,
|
||||
}
|
||||
|
||||
if err = c.createClusterRoles(); err != nil {
|
||||
|
@ -3,6 +3,7 @@ package network
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
@ -11,75 +12,58 @@ import (
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
crv1 "git.openstack.org/openstack/stackube/pkg/apis/v1"
|
||||
crdClient "git.openstack.org/openstack/stackube/pkg/kubecrd"
|
||||
osDriver "git.openstack.org/openstack/stackube/pkg/openstack"
|
||||
"git.openstack.org/openstack/stackube/pkg/kubecrd"
|
||||
"git.openstack.org/openstack/stackube/pkg/openstack"
|
||||
"git.openstack.org/openstack/stackube/pkg/util"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// Watcher is an network of watching on resource create/update/delete events
|
||||
type NetworkController struct {
|
||||
kubeCRDClient *crdClient.CRDClient
|
||||
driver *osDriver.Client
|
||||
kubeCRDClient *kubecrd.CRDClient
|
||||
driver *openstack.Client
|
||||
networkInformer cache.Controller
|
||||
}
|
||||
|
||||
func (c *NetworkController) GetKubeCRDClient() *crdClient.CRDClient {
|
||||
func (c *NetworkController) GetKubeCRDClient() *kubecrd.CRDClient {
|
||||
return c.kubeCRDClient
|
||||
}
|
||||
|
||||
func (c *NetworkController) Run(stopCh <-chan struct{}) error {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
source := cache.NewListWatchFromClient(
|
||||
c.kubeCRDClient.Client,
|
||||
crv1.NetworkResourcePlural,
|
||||
apiv1.NamespaceAll,
|
||||
fields.Everything())
|
||||
|
||||
_, networkInformer := cache.NewInformer(
|
||||
source,
|
||||
&crv1.Network{},
|
||||
0,
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: c.onAdd,
|
||||
UpdateFunc: c.onUpdate,
|
||||
DeleteFunc: c.onDelete,
|
||||
})
|
||||
|
||||
go networkInformer.Run(stopCh)
|
||||
go c.networkInformer.Run(stopCh)
|
||||
<-stopCh
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewNetworkController(kubeconfig, openstackConfigFile string) (*NetworkController, error) {
|
||||
// Create the client config. Use kubeconfig if given, otherwise assume in-cluster.
|
||||
config, err := util.NewClusterConfig(kubeconfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to build kubeconfig: %v", err)
|
||||
}
|
||||
clientset, err := apiextensionsclient.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create kubeclient from config: %v", err)
|
||||
}
|
||||
|
||||
func NewNetworkController(osClient *openstack.Client, kubeExtClient *apiextensionsclient.Clientset) (*NetworkController, error) {
|
||||
// initialize CRD if it does not exist
|
||||
_, err = crdClient.CreateNetworkCRD(clientset)
|
||||
_, err := kubecrd.CreateNetworkCRD(kubeExtClient)
|
||||
if err != nil && !apierrors.IsAlreadyExists(err) {
|
||||
return nil, fmt.Errorf("failed to create CRD to kube-apiserver: %v", err)
|
||||
}
|
||||
|
||||
// Create OpenStack client from config
|
||||
openstack, err := osDriver.NewClient(openstackConfigFile, kubeconfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Couldn't initialize openstack: %#v", err)
|
||||
}
|
||||
|
||||
source := cache.NewListWatchFromClient(
|
||||
osClient.CRDClient.Client,
|
||||
crv1.NetworkResourcePlural,
|
||||
apiv1.NamespaceAll,
|
||||
fields.Everything())
|
||||
networkController := &NetworkController{
|
||||
kubeCRDClient: openstack.CRDClient,
|
||||
driver: openstack,
|
||||
kubeCRDClient: osClient.CRDClient,
|
||||
driver: osClient,
|
||||
}
|
||||
_, networkInformer := cache.NewInformer(
|
||||
source,
|
||||
&crv1.Network{},
|
||||
0,
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: networkController.onAdd,
|
||||
UpdateFunc: networkController.onUpdate,
|
||||
DeleteFunc: networkController.onDelete,
|
||||
})
|
||||
networkController.networkInformer = networkInformer
|
||||
|
||||
return networkController, nil
|
||||
}
|
||||
|
||||
|
766
pkg/openstack/loadbalancer.go
Normal file
766
pkg/openstack/loadbalancer.go
Normal file
@ -0,0 +1,766 @@
|
||||
package openstack
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/gophercloud/gophercloud"
|
||||
"github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips"
|
||||
"github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners"
|
||||
"github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers"
|
||||
"github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors"
|
||||
"github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools"
|
||||
"github.com/gophercloud/gophercloud/pagination"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultMonitorDelay = 10
|
||||
defaultMonitorRetry = 3
|
||||
defaultMonotorTimeout = 3
|
||||
|
||||
// loadbalancerActive* is configuration of exponential backoff for
|
||||
// going into ACTIVE loadbalancer provisioning status. Starting with 1
|
||||
// seconds, multiplying by 1.2 with each step and taking 19 steps at maximum
|
||||
// it will time out after 128s, which roughly corresponds to 120s
|
||||
loadbalancerActiveInitDealy = 1 * time.Second
|
||||
loadbalancerActiveFactor = 1.2
|
||||
loadbalancerActiveSteps = 19
|
||||
|
||||
// loadbalancerDelete* is configuration of exponential backoff for
|
||||
// waiting for delete operation to complete. Starting with 1
|
||||
// seconds, multiplying by 1.2 with each step and taking 13 steps at maximum
|
||||
// it will time out after 32s, which roughly corresponds to 30s
|
||||
loadbalancerDeleteInitDealy = 1 * time.Second
|
||||
loadbalancerDeleteFactor = 1.2
|
||||
loadbalancerDeleteSteps = 13
|
||||
|
||||
activeStatus = "ACTIVE"
|
||||
errorStatus = "ERROR"
|
||||
)
|
||||
|
||||
// LoadBalancer contains all essential information of kubernetes service.
|
||||
type LoadBalancer struct {
|
||||
Name string
|
||||
ServicePort int
|
||||
TenantID string
|
||||
SubnetID string
|
||||
Protocol string
|
||||
InternalIP string
|
||||
ExternalIP string
|
||||
SessionAffinity bool
|
||||
Endpoints []Endpoint
|
||||
}
|
||||
|
||||
// Endpoint represents a container endpoint.
|
||||
type Endpoint struct {
|
||||
Address string
|
||||
Port int
|
||||
}
|
||||
|
||||
// LoadBalancerStatus contains the status of a load balancer.
|
||||
type LoadBalancerStatus struct {
|
||||
InternalIP string
|
||||
ExternalIP string
|
||||
}
|
||||
|
||||
// EnsureLoadBalancer ensures a load balancer is created.
|
||||
func (os *Client) EnsureLoadBalancer(lb *LoadBalancer) (*LoadBalancerStatus, error) {
|
||||
// removes old one if already exists.
|
||||
loadbalancer, err := os.getLoadBalanceByName(lb.Name)
|
||||
if err != nil {
|
||||
if isNotFound(err) {
|
||||
// create a new one.
|
||||
lbOpts := loadbalancers.CreateOpts{
|
||||
Name: lb.Name,
|
||||
Description: "Stackube service",
|
||||
VipSubnetID: lb.SubnetID,
|
||||
TenantID: lb.TenantID,
|
||||
}
|
||||
loadbalancer, err = loadbalancers.Create(os.Network, lbOpts).Extract()
|
||||
if err != nil {
|
||||
glog.Errorf("Create load balancer %q failed: %v", lb.Name, err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
glog.V(3).Infof("LoadBalancer %s already exists", lb.Name)
|
||||
}
|
||||
|
||||
status, err := os.waitLoadBalancerStatus(loadbalancer.ID)
|
||||
if err != nil {
|
||||
glog.Errorf("Waiting for load balancer provision failed: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
glog.V(3).Infof("Load balancer %q becomes %q", lb.Name, status)
|
||||
|
||||
// get old listeners
|
||||
var listener *listeners.Listener
|
||||
oldListeners, err := os.getListenersByLoadBalancerID(loadbalancer.ID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting LB %s listeners: %v", loadbalancer.Name, err)
|
||||
}
|
||||
for i := range oldListeners {
|
||||
l := oldListeners[i]
|
||||
if l.ProtocolPort == lb.ServicePort {
|
||||
listener = &l
|
||||
} else {
|
||||
// delete the obsolete listener
|
||||
if err := os.ensureListenerDeleted(loadbalancer.ID, l); err != nil {
|
||||
return nil, fmt.Errorf("error deleting listener %q: %v", l.Name, err)
|
||||
}
|
||||
os.waitLoadBalancerStatus(loadbalancer.ID)
|
||||
}
|
||||
}
|
||||
|
||||
// create the listener.
|
||||
if listener == nil {
|
||||
lisOpts := listeners.CreateOpts{
|
||||
LoadbalancerID: loadbalancer.ID,
|
||||
// Only tcp is supported now.
|
||||
Protocol: listeners.ProtocolTCP,
|
||||
ProtocolPort: lb.ServicePort,
|
||||
TenantID: lb.TenantID,
|
||||
Name: lb.Name,
|
||||
}
|
||||
listener, err = listeners.Create(os.Network, lisOpts).Extract()
|
||||
if err != nil {
|
||||
glog.Errorf("Create listener %q failed: %v", lb.Name, err)
|
||||
return nil, err
|
||||
}
|
||||
os.waitLoadBalancerStatus(loadbalancer.ID)
|
||||
}
|
||||
|
||||
// create the load balancer pool.
|
||||
pool, err := os.getPoolByListenerID(loadbalancer.ID, listener.ID)
|
||||
if err != nil && !isNotFound(err) {
|
||||
return nil, fmt.Errorf("error getting pool for listener %q: %v", listener.ID, err)
|
||||
}
|
||||
if pool == nil {
|
||||
poolOpts := pools.CreateOpts{
|
||||
Name: lb.Name,
|
||||
ListenerID: listener.ID,
|
||||
Protocol: pools.ProtocolTCP,
|
||||
LBMethod: pools.LBMethodRoundRobin,
|
||||
TenantID: lb.TenantID,
|
||||
}
|
||||
if lb.SessionAffinity {
|
||||
poolOpts.Persistence = &pools.SessionPersistence{Type: "SOURCE_IP"}
|
||||
}
|
||||
pool, err = pools.Create(os.Network, poolOpts).Extract()
|
||||
if err != nil {
|
||||
glog.Errorf("Create pool %q failed: %v", lb.Name, err)
|
||||
return nil, err
|
||||
}
|
||||
os.waitLoadBalancerStatus(loadbalancer.ID)
|
||||
}
|
||||
|
||||
// create load balancer members.
|
||||
members, err := os.getMembersByPoolID(pool.ID)
|
||||
if err != nil && !isNotFound(err) {
|
||||
return nil, fmt.Errorf("error getting members for pool %q: %v", pool.ID, err)
|
||||
}
|
||||
for _, ep := range lb.Endpoints {
|
||||
if !memberExists(members, ep.Address, ep.Port) {
|
||||
memberName := fmt.Sprintf("%s-%s-%d", lb.Name, ep.Address, ep.Port)
|
||||
_, err = pools.CreateMember(os.Network, pool.ID, pools.CreateMemberOpts{
|
||||
Name: memberName,
|
||||
ProtocolPort: ep.Port,
|
||||
Address: ep.Address,
|
||||
SubnetID: lb.SubnetID,
|
||||
}).Extract()
|
||||
if err != nil {
|
||||
glog.Errorf("Create member %q failed: %v", memberName, err)
|
||||
return nil, err
|
||||
}
|
||||
os.waitLoadBalancerStatus(loadbalancer.ID)
|
||||
} else {
|
||||
members = popMember(members, ep.Address, ep.Port)
|
||||
}
|
||||
}
|
||||
// delete obsolete members
|
||||
for _, member := range members {
|
||||
glog.V(4).Infof("Deleting obsolete member %s for pool %s address %s", member.ID,
|
||||
pool.ID, member.Address)
|
||||
err := pools.DeleteMember(os.Network, pool.ID, member.ID).ExtractErr()
|
||||
if err != nil && !isNotFound(err) {
|
||||
return nil, fmt.Errorf("error deleting member %s for pool %s address %s: %v",
|
||||
member.ID, pool.ID, member.Address, err)
|
||||
}
|
||||
}
|
||||
|
||||
// create loadbalancer monitor.
|
||||
if pool.MonitorID == "" {
|
||||
_, err = monitors.Create(os.Network, monitors.CreateOpts{
|
||||
Name: lb.Name,
|
||||
Type: monitors.TypeTCP,
|
||||
PoolID: pool.ID,
|
||||
TenantID: lb.TenantID,
|
||||
Delay: defaultMonitorDelay,
|
||||
Timeout: defaultMonotorTimeout,
|
||||
MaxRetries: defaultMonitorRetry,
|
||||
}).Extract()
|
||||
if err != nil {
|
||||
glog.Errorf("Create monitor for pool %q failed: %v", pool.ID, err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// associate external IP for the vip.
|
||||
fip, err := os.associateFloatingIP(lb.TenantID, loadbalancer.VipPortID, lb.ExternalIP)
|
||||
if err != nil {
|
||||
glog.Errorf("associateFloatingIP for port %q failed: %v", loadbalancer.VipPortID, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &LoadBalancerStatus{
|
||||
InternalIP: loadbalancer.VipAddress,
|
||||
ExternalIP: fip,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetLoadBalancer gets a load balancer by name.
|
||||
func (os *Client) GetLoadBalancer(name string) (*LoadBalancer, error) {
|
||||
// get load balancer
|
||||
lb, err := os.getLoadBalanceByName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// get listener
|
||||
listener, err := os.getListenerByName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// get members
|
||||
endpoints := make([]Endpoint, 0)
|
||||
for _, pool := range listener.Pools {
|
||||
for _, m := range pool.Members {
|
||||
endpoints = append(endpoints, Endpoint{
|
||||
Address: m.Address,
|
||||
Port: m.ProtocolPort,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return &LoadBalancer{
|
||||
Name: lb.Name,
|
||||
ServicePort: listener.ProtocolPort,
|
||||
TenantID: lb.TenantID,
|
||||
SubnetID: lb.VipSubnetID,
|
||||
Protocol: listener.Protocol,
|
||||
InternalIP: lb.VipAddress,
|
||||
SessionAffinity: listener.Pools[0].Persistence.Type != "",
|
||||
Endpoints: endpoints,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// LoadBalancerExist returns whether a load balancer has already been exist.
|
||||
func (os *Client) LoadBalancerExist(name string) (bool, error) {
|
||||
_, err := os.getLoadBalanceByName(name)
|
||||
if err != nil {
|
||||
if isNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// EnsureLoadBalancerDeleted ensures a load balancer is deleted.
|
||||
func (os *Client) EnsureLoadBalancerDeleted(name string) error {
|
||||
// get load balancer
|
||||
lb, err := os.getLoadBalanceByName(name)
|
||||
if err != nil {
|
||||
if isNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// delete floatingip
|
||||
floatingIP, err := os.getFloatingIPByPortID(lb.VipPortID)
|
||||
if err != nil && !isNotFound(err) {
|
||||
return fmt.Errorf("error getting floating ip by port %q: %v", lb.VipPortID, err)
|
||||
}
|
||||
if floatingIP != nil {
|
||||
err = floatingips.Delete(os.Network, floatingIP.ID).ExtractErr()
|
||||
if err != nil && !isNotFound(err) {
|
||||
return fmt.Errorf("error deleting floating ip %q: %v", floatingIP.ID, err)
|
||||
}
|
||||
}
|
||||
|
||||
// get listeners and corelative pools and members
|
||||
var poolIDs []string
|
||||
var monitorIDs []string
|
||||
var memberIDs []string
|
||||
listenerList, err := os.getListenersByLoadBalancerID(lb.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error getting load balancer %s listeners: %v", lb.ID, err)
|
||||
}
|
||||
|
||||
for _, listener := range listenerList {
|
||||
pool, err := os.getPoolByListenerID(lb.ID, listener.ID)
|
||||
if err != nil && !isNotFound(err) {
|
||||
return fmt.Errorf("error getting pool for listener %s: %v", listener.ID, err)
|
||||
}
|
||||
poolIDs = append(poolIDs, pool.ID)
|
||||
if pool.MonitorID != "" {
|
||||
monitorIDs = append(monitorIDs, pool.MonitorID)
|
||||
}
|
||||
}
|
||||
for _, pool := range poolIDs {
|
||||
membersList, err := os.getMembersByPoolID(pool)
|
||||
if err != nil && !isNotFound(err) {
|
||||
return fmt.Errorf("Error getting pool members %s: %v", pool, err)
|
||||
}
|
||||
for _, member := range membersList {
|
||||
memberIDs = append(memberIDs, member.ID)
|
||||
}
|
||||
}
|
||||
|
||||
// delete all monitors
|
||||
for _, monitorID := range monitorIDs {
|
||||
err := monitors.Delete(os.Network, monitorID).ExtractErr()
|
||||
if err != nil && !isNotFound(err) {
|
||||
return err
|
||||
}
|
||||
os.waitLoadBalancerStatus(lb.ID)
|
||||
}
|
||||
|
||||
// delete all members and pools
|
||||
for _, poolID := range poolIDs {
|
||||
// delete all members for this pool
|
||||
for _, memberID := range memberIDs {
|
||||
err := pools.DeleteMember(os.Network, poolID, memberID).ExtractErr()
|
||||
if err != nil && !isNotFound(err) {
|
||||
return err
|
||||
}
|
||||
os.waitLoadBalancerStatus(lb.ID)
|
||||
}
|
||||
|
||||
// delete pool
|
||||
err := pools.Delete(os.Network, poolID).ExtractErr()
|
||||
if err != nil && !isNotFound(err) {
|
||||
return err
|
||||
}
|
||||
os.waitLoadBalancerStatus(lb.ID)
|
||||
}
|
||||
|
||||
// delete all listeners
|
||||
for _, listener := range listenerList {
|
||||
err := listeners.Delete(os.Network, listener.ID).ExtractErr()
|
||||
if err != nil && !isNotFound(err) {
|
||||
return err
|
||||
}
|
||||
os.waitLoadBalancerStatus(lb.ID)
|
||||
}
|
||||
|
||||
// delete the load balancer
|
||||
err = loadbalancers.Delete(os.Network, lb.ID).ExtractErr()
|
||||
if err != nil && !isNotFound(err) {
|
||||
return err
|
||||
}
|
||||
os.waitLoadBalancerStatus(lb.ID)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (os *Client) ensureListenerDeleted(loadbalancerID string, listener listeners.Listener) error {
|
||||
for _, pool := range listener.Pools {
|
||||
for _, member := range pool.Members {
|
||||
// delete member
|
||||
if err := pools.DeleteMember(os.Network, pool.ID, member.ID).ExtractErr(); err != nil && !isNotFound(err) {
|
||||
return err
|
||||
}
|
||||
os.waitLoadBalancerStatus(loadbalancerID)
|
||||
}
|
||||
|
||||
// delete monitor
|
||||
if err := monitors.Delete(os.Network, pool.MonitorID).ExtractErr(); err != nil && !isNotFound(err) {
|
||||
return err
|
||||
}
|
||||
os.waitLoadBalancerStatus(loadbalancerID)
|
||||
|
||||
// delete pool
|
||||
if err := pools.Delete(os.Network, pool.ID).ExtractErr(); err != nil && !isNotFound(err) {
|
||||
return err
|
||||
}
|
||||
os.waitLoadBalancerStatus(loadbalancerID)
|
||||
}
|
||||
|
||||
// delete listener
|
||||
if err := listeners.Delete(os.Network, listener.ID).ExtractErr(); err != nil && !isNotFound(err) {
|
||||
return err
|
||||
}
|
||||
os.waitLoadBalancerStatus(loadbalancerID)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (os *Client) waitLoadBalancerStatus(loadbalancerID string) (string, error) {
|
||||
backoff := wait.Backoff{
|
||||
Duration: loadbalancerActiveInitDealy,
|
||||
Factor: loadbalancerActiveFactor,
|
||||
Steps: loadbalancerActiveSteps,
|
||||
}
|
||||
|
||||
var provisioningStatus string
|
||||
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
|
||||
loadbalancer, err := loadbalancers.Get(os.Network, loadbalancerID).Extract()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
provisioningStatus = loadbalancer.ProvisioningStatus
|
||||
if loadbalancer.ProvisioningStatus == activeStatus {
|
||||
return true, nil
|
||||
} else if loadbalancer.ProvisioningStatus == errorStatus {
|
||||
return true, fmt.Errorf("Loadbalancer has gone into ERROR state")
|
||||
} else {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
})
|
||||
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("Loadbalancer failed to go into ACTIVE provisioning status within alloted time")
|
||||
}
|
||||
return provisioningStatus, err
|
||||
}
|
||||
|
||||
func (os *Client) waitLoadbalancerDeleted(loadbalancerID string) error {
|
||||
backoff := wait.Backoff{
|
||||
Duration: loadbalancerDeleteInitDealy,
|
||||
Factor: loadbalancerDeleteFactor,
|
||||
Steps: loadbalancerDeleteSteps,
|
||||
}
|
||||
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
|
||||
_, err := loadbalancers.Get(os.Network, loadbalancerID).Extract()
|
||||
if err != nil {
|
||||
if err == ErrNotFound {
|
||||
return true, nil
|
||||
} else {
|
||||
return false, err
|
||||
}
|
||||
} else {
|
||||
return false, nil
|
||||
}
|
||||
})
|
||||
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("Loadbalancer failed to delete within the alloted time")
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (os *Client) getListenersByLoadBalancerID(id string) ([]listeners.Listener, error) {
|
||||
var existingListeners []listeners.Listener
|
||||
err := listeners.List(os.Network, listeners.ListOpts{LoadbalancerID: id}).EachPage(func(page pagination.Page) (bool, error) {
|
||||
listenerList, err := listeners.ExtractListeners(page)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, l := range listenerList {
|
||||
for _, lb := range l.Loadbalancers {
|
||||
if lb.ID == id {
|
||||
existingListeners = append(existingListeners, l)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return existingListeners, nil
|
||||
}
|
||||
|
||||
func (os *Client) getLoadBalanceByName(name string) (*loadbalancers.LoadBalancer, error) {
|
||||
var lb *loadbalancers.LoadBalancer
|
||||
|
||||
opts := loadbalancers.ListOpts{Name: name}
|
||||
pager := loadbalancers.List(os.Network, opts)
|
||||
err := pager.EachPage(func(page pagination.Page) (bool, error) {
|
||||
lbs, err := loadbalancers.ExtractLoadBalancers(page)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
switch len(lbs) {
|
||||
case 0:
|
||||
return false, ErrNotFound
|
||||
case 1:
|
||||
lb = &lbs[0]
|
||||
return true, nil
|
||||
default:
|
||||
return false, ErrMultipleResults
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if lb == nil {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
|
||||
return lb, nil
|
||||
}
|
||||
|
||||
func (os *Client) getPoolByListenerID(loadbalancerID string, listenerID string) (*pools.Pool, error) {
|
||||
listenerPools := make([]pools.Pool, 0, 1)
|
||||
err := pools.List(os.Network, pools.ListOpts{LoadbalancerID: loadbalancerID}).EachPage(
|
||||
func(page pagination.Page) (bool, error) {
|
||||
poolsList, err := pools.ExtractPools(page)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, p := range poolsList {
|
||||
for _, l := range p.Listeners {
|
||||
if l.ID == listenerID {
|
||||
listenerPools = append(listenerPools, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(listenerPools) > 1 {
|
||||
return false, ErrMultipleResults
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
if isNotFound(err) {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(listenerPools) == 0 {
|
||||
return nil, ErrNotFound
|
||||
} else if len(listenerPools) > 1 {
|
||||
return nil, ErrMultipleResults
|
||||
}
|
||||
|
||||
return &listenerPools[0], nil
|
||||
}
|
||||
|
||||
// getPoolByName gets openstack pool by name.
|
||||
func (os *Client) getPoolByName(name string) (*pools.Pool, error) {
|
||||
var pool *pools.Pool
|
||||
|
||||
opts := pools.ListOpts{Name: name}
|
||||
pager := pools.List(os.Network, opts)
|
||||
err := pager.EachPage(func(page pagination.Page) (bool, error) {
|
||||
ps, err := pools.ExtractPools(page)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
switch len(ps) {
|
||||
case 0:
|
||||
return false, ErrNotFound
|
||||
case 1:
|
||||
pool = &ps[0]
|
||||
return true, nil
|
||||
default:
|
||||
return false, ErrMultipleResults
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if pool == nil {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
|
||||
return pool, nil
|
||||
}
|
||||
|
||||
func (os *Client) getListenerByName(name string) (*listeners.Listener, error) {
|
||||
var listener *listeners.Listener
|
||||
|
||||
opts := listeners.ListOpts{Name: name}
|
||||
pager := listeners.List(os.Network, opts)
|
||||
err := pager.EachPage(func(page pagination.Page) (bool, error) {
|
||||
lists, err := listeners.ExtractListeners(page)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
switch len(lists) {
|
||||
case 0:
|
||||
return false, ErrNotFound
|
||||
case 1:
|
||||
listener = &lists[0]
|
||||
return true, nil
|
||||
default:
|
||||
return false, ErrMultipleResults
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if listener == nil {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
|
||||
return listener, nil
|
||||
}
|
||||
|
||||
func (os *Client) getMembersByPoolID(id string) ([]pools.Member, error) {
|
||||
var members []pools.Member
|
||||
err := pools.ListMembers(os.Network, id, pools.ListMembersOpts{}).EachPage(func(page pagination.Page) (bool, error) {
|
||||
membersList, err := pools.ExtractMembers(page)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
members = append(members, membersList...)
|
||||
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return members, nil
|
||||
}
|
||||
|
||||
func (os *Client) getFloatingIPByPortID(portID string) (*floatingips.FloatingIP, error) {
|
||||
opts := floatingips.ListOpts{
|
||||
PortID: portID,
|
||||
}
|
||||
pager := floatingips.List(os.Network, opts)
|
||||
|
||||
floatingIPList := make([]floatingips.FloatingIP, 0, 1)
|
||||
|
||||
err := pager.EachPage(func(page pagination.Page) (bool, error) {
|
||||
f, err := floatingips.ExtractFloatingIPs(page)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
floatingIPList = append(floatingIPList, f...)
|
||||
if len(floatingIPList) > 1 {
|
||||
return false, ErrMultipleResults
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
if isNotFound(err) {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(floatingIPList) == 0 {
|
||||
return nil, ErrNotFound
|
||||
} else if len(floatingIPList) > 1 {
|
||||
return nil, ErrMultipleResults
|
||||
}
|
||||
|
||||
return &floatingIPList[0], nil
|
||||
}
|
||||
|
||||
// Check if a member exists for node
|
||||
func memberExists(members []pools.Member, addr string, port int) bool {
|
||||
for _, member := range members {
|
||||
if member.Address == addr && member.ProtocolPort == port {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (os *Client) associateFloatingIP(tenantID, portID, floatingIPAddress string) (string, error) {
|
||||
var fip *floatingips.FloatingIP
|
||||
opts := floatingips.ListOpts{FloatingIP: floatingIPAddress}
|
||||
pager := floatingips.List(os.Network, opts)
|
||||
err := pager.EachPage(func(page pagination.Page) (bool, error) {
|
||||
floatingipList, err := floatingips.ExtractFloatingIPs(page)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if len(floatingipList) > 0 {
|
||||
fip = &floatingipList[0]
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if fip != nil {
|
||||
if fip.PortID != "" {
|
||||
if fip.PortID == portID {
|
||||
glog.V(3).Infof("FIP %q has already been associated with port %q", floatingIPAddress, portID)
|
||||
return fip.FloatingIP, nil
|
||||
}
|
||||
// fip has already been used
|
||||
return fip.FloatingIP, fmt.Errorf("FloatingIP %v is already been binded to %v", floatingIPAddress, fip.PortID)
|
||||
}
|
||||
|
||||
// Update floatingip
|
||||
floatOpts := floatingips.UpdateOpts{PortID: &portID}
|
||||
_, err = floatingips.Update(os.Network, fip.ID, floatOpts).Extract()
|
||||
if err != nil {
|
||||
glog.Errorf("Bind floatingip %v to %v failed: %v", floatingIPAddress, portID, err)
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
// Create floatingip
|
||||
opts := floatingips.CreateOpts{
|
||||
FloatingNetworkID: os.ExtNetID,
|
||||
TenantID: tenantID,
|
||||
FloatingIP: floatingIPAddress,
|
||||
PortID: portID,
|
||||
}
|
||||
fip, err = floatingips.Create(os.Network, opts).Extract()
|
||||
if err != nil {
|
||||
glog.Errorf("Create floatingip failed: %v", err)
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
return fip.FloatingIP, nil
|
||||
}
|
||||
|
||||
func popMember(members []pools.Member, addr string, port int) []pools.Member {
|
||||
for i, member := range members {
|
||||
if member.Address == addr && member.ProtocolPort == port {
|
||||
members[i] = members[len(members)-1]
|
||||
members = members[:len(members)-1]
|
||||
}
|
||||
}
|
||||
|
||||
return members
|
||||
}
|
||||
|
||||
func isNotFound(err error) bool {
|
||||
if err == ErrNotFound {
|
||||
return true
|
||||
}
|
||||
|
||||
if _, ok := err.(*gophercloud.ErrResourceNotFound); ok {
|
||||
return true
|
||||
}
|
||||
|
||||
if _, ok := err.(*gophercloud.ErrDefault404); ok {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
@ -197,15 +197,6 @@ func getServiceHealthCheckNodePort(service *v1.Service) int32 {
|
||||
return service.Spec.HealthCheckNodePort
|
||||
}
|
||||
|
||||
func loadBalancerStatusDeepCopy(lb *v1.LoadBalancerStatus) *v1.LoadBalancerStatus {
|
||||
c := &v1.LoadBalancerStatus{}
|
||||
c.Ingress = make([]v1.LoadBalancerIngress, len(lb.Ingress))
|
||||
for i := range lb.Ingress {
|
||||
c.Ingress[i] = lb.Ingress[i]
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func getRouterNetns(routerID string) string {
|
||||
return "qrouter-" + routerID
|
||||
}
|
||||
|
@ -24,7 +24,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
defaultSyncPerios = 15 * time.Minute
|
||||
defaultResyncPeriod = 15 * time.Minute
|
||||
minSyncPeriod = 5 * time.Second
|
||||
syncPeriod = 30 * time.Second
|
||||
burstSyncs = 2
|
||||
@ -81,7 +81,7 @@ func NewProxier(kubeConfig, openstackConfig string) (*Proxier, error) {
|
||||
return nil, fmt.Errorf("failed to build clientset: %v", err)
|
||||
}
|
||||
|
||||
factory := informers.NewSharedInformerFactory(clientset, defaultSyncPerios)
|
||||
factory := informers.NewSharedInformerFactory(clientset, defaultResyncPeriod)
|
||||
proxier := &Proxier{
|
||||
kubeClientset: clientset,
|
||||
osClient: osClient,
|
||||
@ -488,7 +488,7 @@ func (p *Proxier) syncProxyRules() {
|
||||
// Step 3: compose iptables chain.
|
||||
netns := getRouterNetns(nsInfo.router)
|
||||
if !netnsExist(netns) {
|
||||
glog.V(3).Infof("Netns %q doesn't exist, omit the service")
|
||||
glog.V(3).Infof("Netns %q doesn't exist, omit the services in namespace %q", netns, namespace)
|
||||
continue
|
||||
}
|
||||
|
||||
@ -522,8 +522,8 @@ func (p *Proxier) syncProxyRules() {
|
||||
svcNameString := svcInfo.serviceNameString
|
||||
|
||||
// Step 5.1: check service type.
|
||||
// Only ClusterIP service is supported now.
|
||||
// TODO: support other types of services.
|
||||
// Only ClusterIP service is supported. NodePort service is not supported since networks are L2 isolated.
|
||||
// LoadBalancer service is handled in service controller.
|
||||
if svcInfo.serviceType != v1.ServiceTypeClusterIP {
|
||||
glog.V(3).Infof("Only ClusterIP service is supported, omitting service %q", svcName.NamespacedName)
|
||||
continue
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"git.openstack.org/openstack/stackube/pkg/util"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
@ -93,7 +94,7 @@ func newServiceInfo(svcPortName servicePortName, port *v1.ServicePort, service *
|
||||
nodePort: int(port.NodePort),
|
||||
serviceType: service.Spec.Type,
|
||||
// Deep-copy in case the service instance changes
|
||||
loadBalancerStatus: *loadBalancerStatusDeepCopy(&service.Status.LoadBalancer),
|
||||
loadBalancerStatus: *util.LoadBalancerStatusDeepCopy(&service.Status.LoadBalancer),
|
||||
sessionAffinityType: service.Spec.SessionAffinity,
|
||||
stickyMaxAgeMinutes: 180,
|
||||
externalIPs: make([]string, len(service.Spec.ExternalIPs)),
|
||||
|
19
pkg/service-controller/helpers.go
Normal file
19
pkg/service-controller/helpers.go
Normal file
@ -0,0 +1,19 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
lbPrefix = "stackube"
|
||||
)
|
||||
|
||||
func buildServiceName(service *v1.Service) string {
|
||||
return fmt.Sprintf("%s_%s", service.Namespace, service.Name)
|
||||
}
|
||||
|
||||
func buildLoadBalancerName(service *v1.Service) string {
|
||||
return fmt.Sprintf("%s_%s_%s", lbPrefix, service.Namespace, service.Name)
|
||||
}
|
671
pkg/service-controller/service_controller.go
Normal file
671
pkg/service-controller/service_controller.go
Normal file
@ -0,0 +1,671 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
informersV1 "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
|
||||
"git.openstack.org/openstack/stackube/pkg/openstack"
|
||||
"git.openstack.org/openstack/stackube/pkg/util"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
// Interval of synchronizing service status from apiserver
|
||||
serviceSyncPeriod = 30 * time.Second
|
||||
resyncPeriod = 5 * time.Minute
|
||||
|
||||
// How long to wait before retrying the processing of a service change.
|
||||
minRetryDelay = 5 * time.Second
|
||||
maxRetryDelay = 300 * time.Second
|
||||
|
||||
clientRetryCount = 5
|
||||
clientRetryInterval = 5 * time.Second
|
||||
concurrentServiceSyncs = 1
|
||||
retryable = true
|
||||
notRetryable = false
|
||||
|
||||
doNotRetry = time.Duration(0)
|
||||
)
|
||||
|
||||
type cachedService struct {
|
||||
// The cached state of the service
|
||||
state *v1.Service
|
||||
// Controls error back-off
|
||||
lastRetryDelay time.Duration
|
||||
}
|
||||
|
||||
type serviceCache struct {
|
||||
mu sync.Mutex // protects serviceMap
|
||||
serviceMap map[string]*cachedService
|
||||
}
|
||||
|
||||
type ServiceController struct {
|
||||
cache *serviceCache
|
||||
|
||||
kubeClientset *kubernetes.Clientset
|
||||
osClient *openstack.Client
|
||||
factory informers.SharedInformerFactory
|
||||
serviceInformer informersV1.ServiceInformer
|
||||
endpointInformer informersV1.EndpointsInformer
|
||||
|
||||
// services that need to be synced
|
||||
workingQueue workqueue.DelayingInterface
|
||||
}
|
||||
|
||||
// NewServiceController returns a new service controller to keep openstack lbaas resources
|
||||
// (load balancers) in sync with the registry.
|
||||
func NewServiceController(kubeClient *kubernetes.Clientset,
|
||||
osClient *openstack.Client) (*ServiceController, error) {
|
||||
factory := informers.NewSharedInformerFactory(kubeClient, resyncPeriod)
|
||||
s := &ServiceController{
|
||||
osClient: osClient,
|
||||
factory: factory,
|
||||
kubeClientset: kubeClient,
|
||||
cache: &serviceCache{serviceMap: make(map[string]*cachedService)},
|
||||
workingQueue: workqueue.NewNamedDelayingQueue("service"),
|
||||
serviceInformer: factory.Core().V1().Services(),
|
||||
endpointInformer: factory.Core().V1().Endpoints(),
|
||||
}
|
||||
|
||||
s.serviceInformer.Informer().AddEventHandlerWithResyncPeriod(
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: s.enqueueService,
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
oldSvc, ok1 := old.(*v1.Service)
|
||||
curSvc, ok2 := cur.(*v1.Service)
|
||||
if ok1 && ok2 && s.needsUpdate(oldSvc, curSvc) {
|
||||
s.enqueueService(cur)
|
||||
}
|
||||
},
|
||||
DeleteFunc: s.enqueueService,
|
||||
},
|
||||
serviceSyncPeriod,
|
||||
)
|
||||
|
||||
s.endpointInformer.Informer().AddEventHandlerWithResyncPeriod(
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: s.enqueueEndpoints,
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
oldEndpoint, ok1 := old.(*v1.Endpoints)
|
||||
curEndpoint, ok2 := cur.(*v1.Endpoints)
|
||||
if ok1 && ok2 && s.needsUpdateEndpoint(oldEndpoint, curEndpoint) {
|
||||
s.enqueueEndpoints(cur)
|
||||
}
|
||||
},
|
||||
DeleteFunc: s.enqueueEndpoints,
|
||||
},
|
||||
serviceSyncPeriod,
|
||||
)
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// obj could be an *v1.Service, or a DeletionFinalStateUnknown marker item.
|
||||
func (s *ServiceController) enqueueService(obj interface{}) {
|
||||
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't get key for object %#v: %v", obj, err)
|
||||
return
|
||||
}
|
||||
s.workingQueue.Add(key)
|
||||
}
|
||||
|
||||
// Run starts a background goroutine that watches for changes to services that
|
||||
// have (or had) LoadBalancers=true and ensures that they have
|
||||
// load balancers created and deleted appropriately.
|
||||
// serviceSyncPeriod controls how often we check the cluster's services to
|
||||
// ensure that the correct load balancers exist.
|
||||
//
|
||||
// It's an error to call Run() more than once for a given ServiceController
|
||||
// object.
|
||||
func (s *ServiceController) Run(stopCh <-chan struct{}) error {
|
||||
defer runtime.HandleCrash()
|
||||
defer s.workingQueue.ShutDown()
|
||||
|
||||
glog.Info("Starting service controller")
|
||||
defer glog.Info("Shutting down service controller")
|
||||
|
||||
go s.factory.Start(stopCh)
|
||||
|
||||
if !cache.WaitForCacheSync(stopCh, s.serviceInformer.Informer().HasSynced) {
|
||||
return fmt.Errorf("failed to cache services")
|
||||
}
|
||||
if !cache.WaitForCacheSync(stopCh, s.endpointInformer.Informer().HasSynced) {
|
||||
return fmt.Errorf("failed to cache endpoints")
|
||||
}
|
||||
|
||||
glog.Infof("Service informer cached")
|
||||
|
||||
for i := 0; i < concurrentServiceSyncs; i++ {
|
||||
go wait.Until(s.worker, time.Second, stopCh)
|
||||
}
|
||||
|
||||
<-stopCh
|
||||
return nil
|
||||
}
|
||||
|
||||
// obj could be an *v1.Endpoint, or a DeletionFinalStateUnknown marker item.
|
||||
func (s *ServiceController) enqueueEndpoints(obj interface{}) {
|
||||
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't get key for object %#v: %v", obj, err)
|
||||
return
|
||||
}
|
||||
s.workingQueue.Add(key)
|
||||
}
|
||||
|
||||
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
|
||||
// It enforces that the syncHandler is never invoked concurrently with the same key.
|
||||
func (s *ServiceController) worker() {
|
||||
for {
|
||||
func() {
|
||||
key, quit := s.workingQueue.Get()
|
||||
if quit {
|
||||
return
|
||||
}
|
||||
defer s.workingQueue.Done(key)
|
||||
err := s.syncService(key.(string))
|
||||
if err != nil {
|
||||
glog.Errorf("Error syncing service: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// Returns an error if processing the service update failed, along with a time.Duration
|
||||
// indicating whether processing should be retried; zero means no-retry; otherwise
|
||||
// we should retry in that Duration.
|
||||
func (s *ServiceController) processServiceUpdate(cachedService *cachedService, service *v1.Service,
|
||||
key string) (error, time.Duration) {
|
||||
// cache the service, we need the info for service deletion
|
||||
cachedService.state = service
|
||||
err, retry := s.createLoadBalancerIfNeeded(key, service)
|
||||
if err != nil {
|
||||
message := "Error creating load balancer"
|
||||
if retry {
|
||||
message += " (will retry): "
|
||||
} else {
|
||||
message += " (will not retry): "
|
||||
}
|
||||
message += err.Error()
|
||||
glog.V(3).Infof("Create service %q failed: %v, message: %q", buildServiceName(service), err, message)
|
||||
|
||||
return err, cachedService.nextRetryDelay()
|
||||
}
|
||||
// Always update the cache upon success.
|
||||
// NOTE: Since we update the cached service if and only if we successfully
|
||||
// processed it, a cached service being nil implies that it hasn't yet
|
||||
// been successfully processed.
|
||||
s.cache.set(key, cachedService)
|
||||
|
||||
cachedService.resetRetryDelay()
|
||||
return nil, doNotRetry
|
||||
}
|
||||
|
||||
// Returns whatever error occurred along with a boolean indicator of whether it should be retried.
|
||||
func (s *ServiceController) createLoadBalancerIfNeeded(key string, service *v1.Service) (error, bool) {
|
||||
// Save the state so we can avoid a write if it doesn't change
|
||||
previousState := util.LoadBalancerStatusDeepCopy(&service.Status.LoadBalancer)
|
||||
var newState *v1.LoadBalancerStatus
|
||||
var err error
|
||||
|
||||
lbName := buildLoadBalancerName(service)
|
||||
if !wantsLoadBalancer(service) {
|
||||
needDelete := true
|
||||
exists, err := s.osClient.LoadBalancerExist(lbName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error getting LB for service %s: %v", key, err), retryable
|
||||
}
|
||||
if !exists {
|
||||
needDelete = false
|
||||
}
|
||||
|
||||
if needDelete {
|
||||
glog.Infof("Deleting existing load balancer for service %s that no longer needs a load balancer.", key)
|
||||
if err := s.osClient.EnsureLoadBalancerDeleted(lbName); err != nil {
|
||||
glog.Errorf("EnsureLoadBalancerDeleted %q failed: %v", lbName, err)
|
||||
return err, retryable
|
||||
}
|
||||
}
|
||||
|
||||
newState = &v1.LoadBalancerStatus{}
|
||||
} else {
|
||||
glog.V(2).Infof("Ensuring LB for service %s", key)
|
||||
|
||||
// The load balancer doesn't exist yet, so create it.
|
||||
newState, err = s.createLoadBalancer(service)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to create load balancer for service %s: %v", key, err), retryable
|
||||
}
|
||||
glog.V(3).Infof("LoadBalancer %q created", lbName)
|
||||
}
|
||||
|
||||
// Write the state if changed
|
||||
// TODO: Be careful here ... what if there were other changes to the service?
|
||||
if !util.LoadBalancerStatusEqual(previousState, newState) {
|
||||
// Make a copy so we don't mutate the shared informer cache
|
||||
copy, err := scheme.Scheme.DeepCopy(service)
|
||||
if err != nil {
|
||||
return err, retryable
|
||||
}
|
||||
service = copy.(*v1.Service)
|
||||
|
||||
// Update the status on the copy
|
||||
service.Status.LoadBalancer = *newState
|
||||
|
||||
if err := s.persistUpdate(service); err != nil {
|
||||
return fmt.Errorf("Failed to persist updated status to apiserver, even after retries. Giving up: %v", err), notRetryable
|
||||
}
|
||||
} else {
|
||||
glog.V(2).Infof("Not persisting unchanged LoadBalancerStatus for service %s to kubernetes.", key)
|
||||
}
|
||||
|
||||
return nil, notRetryable
|
||||
}
|
||||
|
||||
func (s *ServiceController) persistUpdate(service *v1.Service) error {
|
||||
var err error
|
||||
for i := 0; i < clientRetryCount; i++ {
|
||||
_, err = s.kubeClientset.CoreV1Client.Services(service.Namespace).UpdateStatus(service)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
// If the object no longer exists, we don't want to recreate it. Just bail
|
||||
// out so that we can process the delete, which we should soon be receiving
|
||||
// if we haven't already.
|
||||
if errors.IsNotFound(err) {
|
||||
glog.Infof("Not persisting update to service '%s/%s' that no longer exists: %v",
|
||||
service.Namespace, service.Name, err)
|
||||
return nil
|
||||
}
|
||||
// TODO: Try to resolve the conflict if the change was unrelated to load
|
||||
// balancer status. For now, just pass it up the stack.
|
||||
if errors.IsConflict(err) {
|
||||
return fmt.Errorf("Not persisting update to service '%s/%s' that has been changed since we received it: %v",
|
||||
service.Namespace, service.Name, err)
|
||||
}
|
||||
glog.Warningf("Failed to persist updated LoadBalancerStatus to service '%s/%s' after creating its load balancer: %v",
|
||||
service.Namespace, service.Name, err)
|
||||
time.Sleep(clientRetryInterval)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *ServiceController) createLoadBalancer(service *v1.Service) (*v1.LoadBalancerStatus, error) {
|
||||
// Only one protocol and one externalIPs supported per service.
|
||||
if len(service.Spec.ExternalIPs) > 1 {
|
||||
return nil, fmt.Errorf("multiple floatingips are not supported")
|
||||
}
|
||||
if len(service.Spec.Ports) > 1 {
|
||||
return nil, fmt.Errorf("multiple floatingips are not supported")
|
||||
}
|
||||
|
||||
// Only support one network and network's name is same with namespace.
|
||||
networkName := util.BuildNetworkName(service.Namespace, service.Namespace)
|
||||
network, err := s.osClient.GetNetwork(networkName)
|
||||
if err != nil {
|
||||
glog.Errorf("Get network by name %q failed: %v", networkName, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// get endpoints for the service.
|
||||
endpoints, err := s.getEndpoints(service)
|
||||
if err != nil {
|
||||
glog.Errorf("Get endpoints for service %q failed: %v", buildServiceName(service), err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// create the loadbalancer.
|
||||
lbName := buildLoadBalancerName(service)
|
||||
svcPort := service.Spec.Ports[0]
|
||||
externalIP := ""
|
||||
if len(service.Spec.ExternalIPs) > 0 {
|
||||
externalIP = service.Spec.ExternalIPs[0]
|
||||
}
|
||||
lb, err := s.osClient.EnsureLoadBalancer(&openstack.LoadBalancer{
|
||||
Name: lbName,
|
||||
Endpoints: endpoints,
|
||||
ServicePort: int(svcPort.Port),
|
||||
TenantID: network.TenantID,
|
||||
SubnetID: network.Subnets[0].Uid,
|
||||
Protocol: string(svcPort.Protocol),
|
||||
ExternalIP: externalIP,
|
||||
SessionAffinity: service.Spec.SessionAffinity != v1.ServiceAffinityNone,
|
||||
})
|
||||
if err != nil {
|
||||
glog.Errorf("EnsureLoadBalancer %q failed: %v", lbName, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &v1.LoadBalancerStatus{
|
||||
Ingress: []v1.LoadBalancerIngress{{IP: lb.ExternalIP}},
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
func (s *ServiceController) getEndpoints(service *v1.Service) ([]openstack.Endpoint, error) {
|
||||
endpoints, err := s.kubeClientset.CoreV1Client.Endpoints(service.Namespace).Get(service.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
results := make([]openstack.Endpoint, 0)
|
||||
for i := range endpoints.Subsets {
|
||||
ep := endpoints.Subsets[i]
|
||||
for _, ip := range ep.Addresses {
|
||||
for _, port := range ep.Ports {
|
||||
results = append(results, openstack.Endpoint{
|
||||
Address: ip.IP,
|
||||
Port: int(port.Port),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// ListKeys implements the interface required by DeltaFIFO to list the keys we
|
||||
// already know about.
|
||||
func (s *serviceCache) ListKeys() []string {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
keys := make([]string, 0, len(s.serviceMap))
|
||||
for k := range s.serviceMap {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
// GetByKey returns the value stored in the serviceMap under the given key
|
||||
func (s *serviceCache) GetByKey(key string) (interface{}, bool, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if v, ok := s.serviceMap[key]; ok {
|
||||
return v, true, nil
|
||||
}
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
// ListKeys implements the interface required by DeltaFIFO to list the keys we
|
||||
// already know about.
|
||||
func (s *serviceCache) allServices() []*v1.Service {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
services := make([]*v1.Service, 0, len(s.serviceMap))
|
||||
for _, v := range s.serviceMap {
|
||||
services = append(services, v.state)
|
||||
}
|
||||
return services
|
||||
}
|
||||
|
||||
func (s *serviceCache) get(serviceName string) (*cachedService, bool) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
service, ok := s.serviceMap[serviceName]
|
||||
return service, ok
|
||||
}
|
||||
|
||||
func (s *serviceCache) getOrCreate(serviceName string) *cachedService {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
service, ok := s.serviceMap[serviceName]
|
||||
if !ok {
|
||||
service = &cachedService{}
|
||||
s.serviceMap[serviceName] = service
|
||||
}
|
||||
return service
|
||||
}
|
||||
|
||||
func (s *serviceCache) set(serviceName string, service *cachedService) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.serviceMap[serviceName] = service
|
||||
}
|
||||
|
||||
func (s *serviceCache) delete(serviceName string) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
delete(s.serviceMap, serviceName)
|
||||
}
|
||||
|
||||
func (s *ServiceController) needsUpdateEndpoint(oldEndpoint, newEndpoint *v1.Endpoints) bool {
|
||||
if len(oldEndpoint.Subsets) != len(newEndpoint.Subsets) {
|
||||
return true
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(oldEndpoint.Subsets, newEndpoint.Subsets) {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *ServiceController) needsUpdate(oldService *v1.Service, newService *v1.Service) bool {
|
||||
if !wantsLoadBalancer(oldService) && !wantsLoadBalancer(newService) {
|
||||
return false
|
||||
}
|
||||
if wantsLoadBalancer(oldService) != wantsLoadBalancer(newService) {
|
||||
glog.V(3).Infof("service %q's type changed to LoadBalancer", buildServiceName(newService))
|
||||
return true
|
||||
}
|
||||
|
||||
if wantsLoadBalancer(newService) && !reflect.DeepEqual(oldService.Spec.LoadBalancerSourceRanges, newService.Spec.LoadBalancerSourceRanges) {
|
||||
glog.V(3).Infof("service %q's LoadBalancerSourceRanges changed: %v -> %v", buildServiceName(newService),
|
||||
oldService.Spec.LoadBalancerSourceRanges, newService.Spec.LoadBalancerSourceRanges)
|
||||
return true
|
||||
}
|
||||
|
||||
if !portsEqualForLB(oldService, newService) || oldService.Spec.SessionAffinity != newService.Spec.SessionAffinity {
|
||||
return true
|
||||
}
|
||||
if !loadBalancerIPsAreEqual(oldService, newService) {
|
||||
glog.V(3).Infof("service %q's LoadbalancerIP changed: %v -> %v", buildServiceName(newService),
|
||||
oldService.Spec.LoadBalancerIP, newService.Spec.LoadBalancerIP)
|
||||
return true
|
||||
}
|
||||
if len(oldService.Spec.ExternalIPs) != len(newService.Spec.ExternalIPs) {
|
||||
glog.V(3).Infof("service %q's ExternalIPs changed: %v -> %v", buildServiceName(newService),
|
||||
len(oldService.Spec.ExternalIPs), len(newService.Spec.ExternalIPs))
|
||||
return true
|
||||
}
|
||||
for i := range oldService.Spec.ExternalIPs {
|
||||
if oldService.Spec.ExternalIPs[i] != newService.Spec.ExternalIPs[i] {
|
||||
glog.V(3).Infof("service %q's ExternalIP added: %v", buildServiceName(newService),
|
||||
newService.Spec.ExternalIPs[i])
|
||||
return true
|
||||
}
|
||||
}
|
||||
if !reflect.DeepEqual(oldService.Annotations, newService.Annotations) {
|
||||
return true
|
||||
}
|
||||
if oldService.UID != newService.UID {
|
||||
glog.V(3).Infof("service %q's UID changed: %v -> %v", buildServiceName(newService),
|
||||
oldService.UID, newService.UID)
|
||||
return true
|
||||
}
|
||||
if oldService.Spec.ExternalTrafficPolicy != newService.Spec.ExternalTrafficPolicy {
|
||||
glog.V(3).Infof("service %q's ExternalTrafficPolicy changed: %v -> %v", buildServiceName(newService),
|
||||
oldService.Spec.ExternalTrafficPolicy, newService.Spec.ExternalTrafficPolicy)
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func getPortsForLB(service *v1.Service) ([]*v1.ServicePort, error) {
|
||||
var protocol v1.Protocol
|
||||
|
||||
ports := []*v1.ServicePort{}
|
||||
for i := range service.Spec.Ports {
|
||||
sp := &service.Spec.Ports[i]
|
||||
// The check on protocol was removed here. The cloud provider itself is now responsible for all protocol validation
|
||||
ports = append(ports, sp)
|
||||
if protocol == "" {
|
||||
protocol = sp.Protocol
|
||||
} else if protocol != sp.Protocol && wantsLoadBalancer(service) {
|
||||
// TODO: Convert error messages to use event recorder
|
||||
return nil, fmt.Errorf("mixed protocol external load balancers are not supported.")
|
||||
}
|
||||
}
|
||||
return ports, nil
|
||||
}
|
||||
|
||||
func portsEqualForLB(x, y *v1.Service) bool {
|
||||
xPorts, err := getPortsForLB(x)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
yPorts, err := getPortsForLB(y)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return portSlicesEqualForLB(xPorts, yPorts)
|
||||
}
|
||||
|
||||
func portSlicesEqualForLB(x, y []*v1.ServicePort) bool {
|
||||
if len(x) != len(y) {
|
||||
return false
|
||||
}
|
||||
|
||||
for i := range x {
|
||||
if !portEqualForLB(x[i], y[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func portEqualForLB(x, y *v1.ServicePort) bool {
|
||||
// TODO: Should we check name? (In theory, an LB could expose it)
|
||||
if x.Name != y.Name {
|
||||
return false
|
||||
}
|
||||
|
||||
if x.Protocol != y.Protocol {
|
||||
return false
|
||||
}
|
||||
|
||||
if x.Port != y.Port {
|
||||
return false
|
||||
}
|
||||
|
||||
// We don't check TargetPort; that is not relevant for load balancing
|
||||
// TODO: Should we blank it out? Or just check it anyway?
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func wantsLoadBalancer(service *v1.Service) bool {
|
||||
return service.Spec.Type == v1.ServiceTypeLoadBalancer
|
||||
}
|
||||
|
||||
func loadBalancerIPsAreEqual(oldService, newService *v1.Service) bool {
|
||||
return oldService.Spec.LoadBalancerIP == newService.Spec.LoadBalancerIP
|
||||
}
|
||||
|
||||
// Computes the next retry, using exponential backoff
|
||||
// mutex must be held.
|
||||
func (s *cachedService) nextRetryDelay() time.Duration {
|
||||
s.lastRetryDelay = s.lastRetryDelay * 2
|
||||
if s.lastRetryDelay < minRetryDelay {
|
||||
s.lastRetryDelay = minRetryDelay
|
||||
}
|
||||
if s.lastRetryDelay > maxRetryDelay {
|
||||
s.lastRetryDelay = maxRetryDelay
|
||||
}
|
||||
return s.lastRetryDelay
|
||||
}
|
||||
|
||||
// Resets the retry exponential backoff. mutex must be held.
|
||||
func (s *cachedService) resetRetryDelay() {
|
||||
s.lastRetryDelay = time.Duration(0)
|
||||
}
|
||||
|
||||
// syncService will sync the Service with the given key if it has had its expectations fulfilled,
|
||||
// meaning it did not expect to see any more of its pods created or deleted. This function is not meant to be
|
||||
// invoked concurrently with the same key.
|
||||
func (s *ServiceController) syncService(key string) error {
|
||||
startTime := time.Now()
|
||||
var cachedService *cachedService
|
||||
var retryDelay time.Duration
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished syncing service %q (%v)", key, time.Now().Sub(startTime))
|
||||
}()
|
||||
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// service holds the latest service info from apiserver
|
||||
service, err := s.serviceInformer.Lister().Services(namespace).Get(name)
|
||||
switch {
|
||||
case errors.IsNotFound(err):
|
||||
// service absence in store means watcher caught the deletion, ensure LB info is cleaned
|
||||
glog.Infof("Service has been deleted %v", key)
|
||||
err, retryDelay = s.processServiceDeletion(key)
|
||||
case err != nil:
|
||||
glog.Infof("Unable to retrieve service %v from store: %v", key, err)
|
||||
s.workingQueue.Add(key)
|
||||
return err
|
||||
default:
|
||||
cachedService = s.cache.getOrCreate(key)
|
||||
err, retryDelay = s.processServiceUpdate(cachedService, service, key)
|
||||
}
|
||||
|
||||
if retryDelay != 0 {
|
||||
// Add the failed service back to the queue so we'll retry it.
|
||||
glog.Errorf("Failed to process service. Retrying in %s: %v", retryDelay, err)
|
||||
go func(obj interface{}, delay time.Duration) {
|
||||
// put back the service key to working queue, it is possible that more entries of the service
|
||||
// were added into the queue during the delay, but it does not mess as when handling the retry,
|
||||
// it always get the last service info from service store
|
||||
s.workingQueue.AddAfter(obj, delay)
|
||||
}(key, retryDelay)
|
||||
} else if err != nil {
|
||||
runtime.HandleError(fmt.Errorf("Failed to process service. Not retrying: %v", err))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns an error if processing the service deletion failed, along with a time.Duration
|
||||
// indicating whether processing should be retried; zero means no-retry; otherwise
|
||||
// we should retry after that Duration.
|
||||
func (s *ServiceController) processServiceDeletion(key string) (error, time.Duration) {
|
||||
cachedService, ok := s.cache.get(key)
|
||||
if !ok {
|
||||
return fmt.Errorf("Service %s not in cache even though the watcher thought it was. Ignoring the deletion.", key), doNotRetry
|
||||
}
|
||||
service := cachedService.state
|
||||
// delete load balancer info only if the service type is LoadBalancer
|
||||
if !wantsLoadBalancer(service) {
|
||||
return nil, doNotRetry
|
||||
}
|
||||
|
||||
lbName := buildLoadBalancerName(service)
|
||||
err := s.osClient.EnsureLoadBalancerDeleted(lbName)
|
||||
if err != nil {
|
||||
glog.Errorf("Error deleting load balancer (will retry): %v", err)
|
||||
return err, cachedService.nextRetryDelay()
|
||||
}
|
||||
glog.V(3).Infof("Loadbalancer %q deleted", lbName)
|
||||
s.cache.delete(key)
|
||||
|
||||
cachedService.resetRetryDelay()
|
||||
return nil, doNotRetry
|
||||
}
|
@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -67,3 +68,38 @@ func WaitForCRDReady(clientset apiextensionsclient.Interface, crdName string) er
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func LoadBalancerStatusDeepCopy(lb *v1.LoadBalancerStatus) *v1.LoadBalancerStatus {
|
||||
c := &v1.LoadBalancerStatus{}
|
||||
c.Ingress = make([]v1.LoadBalancerIngress, len(lb.Ingress))
|
||||
for i := range lb.Ingress {
|
||||
c.Ingress[i] = lb.Ingress[i]
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func LoadBalancerStatusEqual(l, r *v1.LoadBalancerStatus) bool {
|
||||
return ingressSliceEqual(l.Ingress, r.Ingress)
|
||||
}
|
||||
|
||||
func ingressSliceEqual(lhs, rhs []v1.LoadBalancerIngress) bool {
|
||||
if len(lhs) != len(rhs) {
|
||||
return false
|
||||
}
|
||||
for i := range lhs {
|
||||
if !ingressEqual(&lhs[i], &rhs[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func ingressEqual(lhs, rhs *v1.LoadBalancerIngress) bool {
|
||||
if lhs.IP != rhs.IP {
|
||||
return false
|
||||
}
|
||||
if lhs.Hostname != rhs.Hostname {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
@ -2,6 +2,7 @@ package util
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
@ -19,7 +20,8 @@ var ErrMultipleResults = errors.New("MultipleResults")
|
||||
|
||||
func BuildNetworkName(namespace, name string) string {
|
||||
if IsSystemNamespace(namespace) {
|
||||
namespace = SystemTenant
|
||||
// All system namespaces shares same network.
|
||||
return namePrefix + "-" + SystemTenant + "-" + SystemTenant
|
||||
}
|
||||
return namePrefix + "-" + namespace + "-" + name
|
||||
}
|
||||
|
146
vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/requests.go
generated
vendored
Normal file
146
vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/requests.go
generated
vendored
Normal file
@ -0,0 +1,146 @@
|
||||
package floatingips
|
||||
|
||||
import (
|
||||
"github.com/gophercloud/gophercloud"
|
||||
"github.com/gophercloud/gophercloud/pagination"
|
||||
)
|
||||
|
||||
// ListOpts allows the filtering and sorting of paginated collections through
|
||||
// the API. Filtering is achieved by passing in struct field values that map to
|
||||
// the floating IP attributes you want to see returned. SortKey allows you to
|
||||
// sort by a particular network attribute. SortDir sets the direction, and is
|
||||
// either `asc' or `desc'. Marker and Limit are used for pagination.
|
||||
type ListOpts struct {
|
||||
ID string `q:"id"`
|
||||
FloatingNetworkID string `q:"floating_network_id"`
|
||||
PortID string `q:"port_id"`
|
||||
FixedIP string `q:"fixed_ip_address"`
|
||||
FloatingIP string `q:"floating_ip_address"`
|
||||
TenantID string `q:"tenant_id"`
|
||||
Limit int `q:"limit"`
|
||||
Marker string `q:"marker"`
|
||||
SortKey string `q:"sort_key"`
|
||||
SortDir string `q:"sort_dir"`
|
||||
RouterID string `q:"router_id"`
|
||||
}
|
||||
|
||||
// List returns a Pager which allows you to iterate over a collection of
|
||||
// floating IP resources. It accepts a ListOpts struct, which allows you to
|
||||
// filter and sort the returned collection for greater efficiency.
|
||||
func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager {
|
||||
q, err := gophercloud.BuildQueryString(&opts)
|
||||
if err != nil {
|
||||
return pagination.Pager{Err: err}
|
||||
}
|
||||
u := rootURL(c) + q.String()
|
||||
return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page {
|
||||
return FloatingIPPage{pagination.LinkedPageBase{PageResult: r}}
|
||||
})
|
||||
}
|
||||
|
||||
// CreateOptsBuilder is the interface type must satisfy to be used as Create
|
||||
// options.
|
||||
type CreateOptsBuilder interface {
|
||||
ToFloatingIPCreateMap() (map[string]interface{}, error)
|
||||
}
|
||||
|
||||
// CreateOpts contains all the values needed to create a new floating IP
|
||||
// resource. The only required fields are FloatingNetworkID and PortID which
|
||||
// refer to the external network and internal port respectively.
|
||||
type CreateOpts struct {
|
||||
FloatingNetworkID string `json:"floating_network_id" required:"true"`
|
||||
FloatingIP string `json:"floating_ip_address,omitempty"`
|
||||
PortID string `json:"port_id,omitempty"`
|
||||
FixedIP string `json:"fixed_ip_address,omitempty"`
|
||||
TenantID string `json:"tenant_id,omitempty"`
|
||||
}
|
||||
|
||||
// ToFloatingIPCreateMap allows CreateOpts to satisfy the CreateOptsBuilder
|
||||
// interface
|
||||
func (opts CreateOpts) ToFloatingIPCreateMap() (map[string]interface{}, error) {
|
||||
return gophercloud.BuildRequestBody(opts, "floatingip")
|
||||
}
|
||||
|
||||
// Create accepts a CreateOpts struct and uses the values provided to create a
|
||||
// new floating IP resource. You can create floating IPs on external networks
|
||||
// only. If you provide a FloatingNetworkID which refers to a network that is
|
||||
// not external (i.e. its `router:external' attribute is False), the operation
|
||||
// will fail and return a 400 error.
|
||||
//
|
||||
// If you do not specify a FloatingIP address value, the operation will
|
||||
// automatically allocate an available address for the new resource. If you do
|
||||
// choose to specify one, it must fall within the subnet range for the external
|
||||
// network - otherwise the operation returns a 400 error. If the FloatingIP
|
||||
// address is already in use, the operation returns a 409 error code.
|
||||
//
|
||||
// You can associate the new resource with an internal port by using the PortID
|
||||
// field. If you specify a PortID that is not valid, the operation will fail and
|
||||
// return 404 error code.
|
||||
//
|
||||
// You must also configure an IP address for the port associated with the PortID
|
||||
// you have provided - this is what the FixedIP refers to: an IP fixed to a port.
|
||||
// Because a port might be associated with multiple IP addresses, you can use
|
||||
// the FixedIP field to associate a particular IP address rather than have the
|
||||
// API assume for you. If you specify an IP address that is not valid, the
|
||||
// operation will fail and return a 400 error code. If the PortID and FixedIP
|
||||
// are already associated with another resource, the operation will fail and
|
||||
// returns a 409 error code.
|
||||
func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) {
|
||||
b, err := opts.ToFloatingIPCreateMap()
|
||||
if err != nil {
|
||||
r.Err = err
|
||||
return
|
||||
}
|
||||
_, r.Err = c.Post(rootURL(c), b, &r.Body, nil)
|
||||
return
|
||||
}
|
||||
|
||||
// Get retrieves a particular floating IP resource based on its unique ID.
|
||||
func Get(c *gophercloud.ServiceClient, id string) (r GetResult) {
|
||||
_, r.Err = c.Get(resourceURL(c, id), &r.Body, nil)
|
||||
return
|
||||
}
|
||||
|
||||
// UpdateOptsBuilder is the interface type must satisfy to be used as Update
|
||||
// options.
|
||||
type UpdateOptsBuilder interface {
|
||||
ToFloatingIPUpdateMap() (map[string]interface{}, error)
|
||||
}
|
||||
|
||||
// UpdateOpts contains the values used when updating a floating IP resource. The
|
||||
// only value that can be updated is which internal port the floating IP is
|
||||
// linked to. To associate the floating IP with a new internal port, provide its
|
||||
// ID. To disassociate the floating IP from all ports, provide an empty string.
|
||||
type UpdateOpts struct {
|
||||
PortID *string `json:"port_id"`
|
||||
}
|
||||
|
||||
// ToFloatingIPUpdateMap allows UpdateOpts to satisfy the UpdateOptsBuilder
|
||||
// interface
|
||||
func (opts UpdateOpts) ToFloatingIPUpdateMap() (map[string]interface{}, error) {
|
||||
return gophercloud.BuildRequestBody(opts, "floatingip")
|
||||
}
|
||||
|
||||
// Update allows floating IP resources to be updated. Currently, the only way to
|
||||
// "update" a floating IP is to associate it with a new internal port, or
|
||||
// disassociated it from all ports. See UpdateOpts for instructions of how to
|
||||
// do this.
|
||||
func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) {
|
||||
b, err := opts.ToFloatingIPUpdateMap()
|
||||
if err != nil {
|
||||
r.Err = err
|
||||
return
|
||||
}
|
||||
_, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{
|
||||
OkCodes: []int{200},
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Delete will permanently delete a particular floating IP resource. Please
|
||||
// ensure this is what you want - you can also disassociate the IP from existing
|
||||
// internal ports.
|
||||
func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) {
|
||||
_, r.Err = c.Delete(resourceURL(c, id), nil)
|
||||
return
|
||||
}
|
110
vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/results.go
generated
vendored
Normal file
110
vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/results.go
generated
vendored
Normal file
@ -0,0 +1,110 @@
|
||||
package floatingips
|
||||
|
||||
import (
|
||||
"github.com/gophercloud/gophercloud"
|
||||
"github.com/gophercloud/gophercloud/pagination"
|
||||
)
|
||||
|
||||
// FloatingIP represents a floating IP resource. A floating IP is an external
|
||||
// IP address that is mapped to an internal port and, optionally, a specific
|
||||
// IP address on a private network. In other words, it enables access to an
|
||||
// instance on a private network from an external network. For this reason,
|
||||
// floating IPs can only be defined on networks where the `router:external'
|
||||
// attribute (provided by the external network extension) is set to True.
|
||||
type FloatingIP struct {
|
||||
// Unique identifier for the floating IP instance.
|
||||
ID string `json:"id"`
|
||||
|
||||
// UUID of the external network where the floating IP is to be created.
|
||||
FloatingNetworkID string `json:"floating_network_id"`
|
||||
|
||||
// Address of the floating IP on the external network.
|
||||
FloatingIP string `json:"floating_ip_address"`
|
||||
|
||||
// UUID of the port on an internal network that is associated with the floating IP.
|
||||
PortID string `json:"port_id"`
|
||||
|
||||
// The specific IP address of the internal port which should be associated
|
||||
// with the floating IP.
|
||||
FixedIP string `json:"fixed_ip_address"`
|
||||
|
||||
// Owner of the floating IP. Only admin users can specify a tenant identifier
|
||||
// other than its own.
|
||||
TenantID string `json:"tenant_id"`
|
||||
|
||||
// The condition of the API resource.
|
||||
Status string `json:"status"`
|
||||
|
||||
//The ID of the router used for this Floating-IP
|
||||
RouterID string `json:"router_id"`
|
||||
}
|
||||
|
||||
type commonResult struct {
|
||||
gophercloud.Result
|
||||
}
|
||||
|
||||
// Extract a result and extracts a FloatingIP resource.
|
||||
func (r commonResult) Extract() (*FloatingIP, error) {
|
||||
var s struct {
|
||||
FloatingIP *FloatingIP `json:"floatingip"`
|
||||
}
|
||||
err := r.ExtractInto(&s)
|
||||
return s.FloatingIP, err
|
||||
}
|
||||
|
||||
// CreateResult represents the result of a create operation.
|
||||
type CreateResult struct {
|
||||
commonResult
|
||||
}
|
||||
|
||||
// GetResult represents the result of a get operation.
|
||||
type GetResult struct {
|
||||
commonResult
|
||||
}
|
||||
|
||||
// UpdateResult represents the result of an update operation.
|
||||
type UpdateResult struct {
|
||||
commonResult
|
||||
}
|
||||
|
||||
// DeleteResult represents the result of an update operation.
|
||||
type DeleteResult struct {
|
||||
gophercloud.ErrResult
|
||||
}
|
||||
|
||||
// FloatingIPPage is the page returned by a pager when traversing over a
|
||||
// collection of floating IPs.
|
||||
type FloatingIPPage struct {
|
||||
pagination.LinkedPageBase
|
||||
}
|
||||
|
||||
// NextPageURL is invoked when a paginated collection of floating IPs has reached
|
||||
// the end of a page and the pager seeks to traverse over a new one. In order
|
||||
// to do this, it needs to construct the next page's URL.
|
||||
func (r FloatingIPPage) NextPageURL() (string, error) {
|
||||
var s struct {
|
||||
Links []gophercloud.Link `json:"floatingips_links"`
|
||||
}
|
||||
err := r.ExtractInto(&s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return gophercloud.ExtractNextURL(s.Links)
|
||||
}
|
||||
|
||||
// IsEmpty checks whether a NetworkPage struct is empty.
|
||||
func (r FloatingIPPage) IsEmpty() (bool, error) {
|
||||
is, err := ExtractFloatingIPs(r)
|
||||
return len(is) == 0, err
|
||||
}
|
||||
|
||||
// ExtractFloatingIPs accepts a Page struct, specifically a FloatingIPPage struct,
|
||||
// and extracts the elements into a slice of FloatingIP structs. In other words,
|
||||
// a generic collection is mapped into a relevant slice.
|
||||
func ExtractFloatingIPs(r pagination.Page) ([]FloatingIP, error) {
|
||||
var s struct {
|
||||
FloatingIPs []FloatingIP `json:"floatingips"`
|
||||
}
|
||||
err := (r.(FloatingIPPage)).ExtractInto(&s)
|
||||
return s.FloatingIPs, err
|
||||
}
|
13
vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/urls.go
generated
vendored
Normal file
13
vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/urls.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
package floatingips
|
||||
|
||||
import "github.com/gophercloud/gophercloud"
|
||||
|
||||
const resourcePath = "floatingips"
|
||||
|
||||
func rootURL(c *gophercloud.ServiceClient) string {
|
||||
return c.ServiceURL(resourcePath)
|
||||
}
|
||||
|
||||
func resourceURL(c *gophercloud.ServiceClient, id string) string {
|
||||
return c.ServiceURL(resourcePath, id)
|
||||
}
|
182
vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/requests.go
generated
vendored
Normal file
182
vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/requests.go
generated
vendored
Normal file
@ -0,0 +1,182 @@
|
||||
package listeners
|
||||
|
||||
import (
|
||||
"github.com/gophercloud/gophercloud"
|
||||
"github.com/gophercloud/gophercloud/pagination"
|
||||
)
|
||||
|
||||
type Protocol string
|
||||
|
||||
// Supported attributes for create/update operations.
|
||||
const (
|
||||
ProtocolTCP Protocol = "TCP"
|
||||
ProtocolHTTP Protocol = "HTTP"
|
||||
ProtocolHTTPS Protocol = "HTTPS"
|
||||
)
|
||||
|
||||
// ListOptsBuilder allows extensions to add additional parameters to the
|
||||
// List request.
|
||||
type ListOptsBuilder interface {
|
||||
ToListenerListQuery() (string, error)
|
||||
}
|
||||
|
||||
// ListOpts allows the filtering and sorting of paginated collections through
|
||||
// the API. Filtering is achieved by passing in struct field values that map to
|
||||
// the floating IP attributes you want to see returned. SortKey allows you to
|
||||
// sort by a particular listener attribute. SortDir sets the direction, and is
|
||||
// either `asc' or `desc'. Marker and Limit are used for pagination.
|
||||
type ListOpts struct {
|
||||
ID string `q:"id"`
|
||||
Name string `q:"name"`
|
||||
AdminStateUp *bool `q:"admin_state_up"`
|
||||
TenantID string `q:"tenant_id"`
|
||||
LoadbalancerID string `q:"loadbalancer_id"`
|
||||
DefaultPoolID string `q:"default_pool_id"`
|
||||
Protocol string `q:"protocol"`
|
||||
ProtocolPort int `q:"protocol_port"`
|
||||
ConnectionLimit int `q:"connection_limit"`
|
||||
Limit int `q:"limit"`
|
||||
Marker string `q:"marker"`
|
||||
SortKey string `q:"sort_key"`
|
||||
SortDir string `q:"sort_dir"`
|
||||
}
|
||||
|
||||
// ToListenerListQuery formats a ListOpts into a query string.
|
||||
func (opts ListOpts) ToListenerListQuery() (string, error) {
|
||||
q, err := gophercloud.BuildQueryString(opts)
|
||||
return q.String(), err
|
||||
}
|
||||
|
||||
// List returns a Pager which allows you to iterate over a collection of
|
||||
// routers. It accepts a ListOpts struct, which allows you to filter and sort
|
||||
// the returned collection for greater efficiency.
|
||||
//
|
||||
// Default policy settings return only those routers that are owned by the
|
||||
// tenant who submits the request, unless an admin user submits the request.
|
||||
func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager {
|
||||
url := rootURL(c)
|
||||
if opts != nil {
|
||||
query, err := opts.ToListenerListQuery()
|
||||
if err != nil {
|
||||
return pagination.Pager{Err: err}
|
||||
}
|
||||
url += query
|
||||
}
|
||||
return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page {
|
||||
return ListenerPage{pagination.LinkedPageBase{PageResult: r}}
|
||||
})
|
||||
}
|
||||
|
||||
// CreateOptsBuilder is the interface options structs have to satisfy in order
|
||||
// to be used in the main Create operation in this package. Since many
|
||||
// extensions decorate or modify the common logic, it is useful for them to
|
||||
// satisfy a basic interface in order for them to be used.
|
||||
type CreateOptsBuilder interface {
|
||||
ToListenerCreateMap() (map[string]interface{}, error)
|
||||
}
|
||||
|
||||
// CreateOpts is the common options struct used in this package's Create
|
||||
// operation.
|
||||
type CreateOpts struct {
|
||||
// The load balancer on which to provision this listener.
|
||||
LoadbalancerID string `json:"loadbalancer_id" required:"true"`
|
||||
// The protocol - can either be TCP, HTTP or HTTPS.
|
||||
Protocol Protocol `json:"protocol" required:"true"`
|
||||
// The port on which to listen for client traffic.
|
||||
ProtocolPort int `json:"protocol_port" required:"true"`
|
||||
// Indicates the owner of the Listener. Required for admins.
|
||||
TenantID string `json:"tenant_id,omitempty"`
|
||||
// Human-readable name for the Listener. Does not have to be unique.
|
||||
Name string `json:"name,omitempty"`
|
||||
// The ID of the default pool with which the Listener is associated.
|
||||
DefaultPoolID string `json:"default_pool_id,omitempty"`
|
||||
// Human-readable description for the Listener.
|
||||
Description string `json:"description,omitempty"`
|
||||
// The maximum number of connections allowed for the Listener.
|
||||
ConnLimit *int `json:"connection_limit,omitempty"`
|
||||
// A reference to a container of TLS secrets.
|
||||
DefaultTlsContainerRef string `json:"default_tls_container_ref,omitempty"`
|
||||
// A list of references to TLS secrets.
|
||||
SniContainerRefs []string `json:"sni_container_refs,omitempty"`
|
||||
// The administrative state of the Listener. A valid value is true (UP)
|
||||
// or false (DOWN).
|
||||
AdminStateUp *bool `json:"admin_state_up,omitempty"`
|
||||
}
|
||||
|
||||
// ToListenerCreateMap casts a CreateOpts struct to a map.
|
||||
func (opts CreateOpts) ToListenerCreateMap() (map[string]interface{}, error) {
|
||||
return gophercloud.BuildRequestBody(opts, "listener")
|
||||
}
|
||||
|
||||
// Create is an operation which provisions a new Listeners based on the
|
||||
// configuration defined in the CreateOpts struct. Once the request is
|
||||
// validated and progress has started on the provisioning process, a
|
||||
// CreateResult will be returned.
|
||||
//
|
||||
// Users with an admin role can create Listeners on behalf of other tenants by
|
||||
// specifying a TenantID attribute different than their own.
|
||||
func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) {
|
||||
b, err := opts.ToListenerCreateMap()
|
||||
if err != nil {
|
||||
r.Err = err
|
||||
return
|
||||
}
|
||||
_, r.Err = c.Post(rootURL(c), b, &r.Body, nil)
|
||||
return
|
||||
}
|
||||
|
||||
// Get retrieves a particular Listeners based on its unique ID.
|
||||
func Get(c *gophercloud.ServiceClient, id string) (r GetResult) {
|
||||
_, r.Err = c.Get(resourceURL(c, id), &r.Body, nil)
|
||||
return
|
||||
}
|
||||
|
||||
// UpdateOptsBuilder is the interface options structs have to satisfy in order
|
||||
// to be used in the main Update operation in this package. Since many
|
||||
// extensions decorate or modify the common logic, it is useful for them to
|
||||
// satisfy a basic interface in order for them to be used.
|
||||
type UpdateOptsBuilder interface {
|
||||
ToListenerUpdateMap() (map[string]interface{}, error)
|
||||
}
|
||||
|
||||
// UpdateOpts is the common options struct used in this package's Update
|
||||
// operation.
|
||||
type UpdateOpts struct {
|
||||
// Human-readable name for the Listener. Does not have to be unique.
|
||||
Name string `json:"name,omitempty"`
|
||||
// Human-readable description for the Listener.
|
||||
Description string `json:"description,omitempty"`
|
||||
// The maximum number of connections allowed for the Listener.
|
||||
ConnLimit *int `json:"connection_limit,omitempty"`
|
||||
// A reference to a container of TLS secrets.
|
||||
DefaultTlsContainerRef string `json:"default_tls_container_ref,omitempty"`
|
||||
// A list of references to TLS secrets.
|
||||
SniContainerRefs []string `json:"sni_container_refs,omitempty"`
|
||||
// The administrative state of the Listener. A valid value is true (UP)
|
||||
// or false (DOWN).
|
||||
AdminStateUp *bool `json:"admin_state_up,omitempty"`
|
||||
}
|
||||
|
||||
// ToListenerUpdateMap casts a UpdateOpts struct to a map.
|
||||
func (opts UpdateOpts) ToListenerUpdateMap() (map[string]interface{}, error) {
|
||||
return gophercloud.BuildRequestBody(opts, "listener")
|
||||
}
|
||||
|
||||
// Update is an operation which modifies the attributes of the specified Listener.
|
||||
func Update(c *gophercloud.ServiceClient, id string, opts UpdateOpts) (r UpdateResult) {
|
||||
b, err := opts.ToListenerUpdateMap()
|
||||
if err != nil {
|
||||
r.Err = err
|
||||
return
|
||||
}
|
||||
_, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{
|
||||
OkCodes: []int{200, 202},
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Delete will permanently delete a particular Listeners based on its unique ID.
|
||||
func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) {
|
||||
_, r.Err = c.Delete(resourceURL(c, id), nil)
|
||||
return
|
||||
}
|
114
vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/results.go
generated
vendored
Normal file
114
vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/results.go
generated
vendored
Normal file
@ -0,0 +1,114 @@
|
||||
package listeners
|
||||
|
||||
import (
|
||||
"github.com/gophercloud/gophercloud"
|
||||
"github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools"
|
||||
"github.com/gophercloud/gophercloud/pagination"
|
||||
)
|
||||
|
||||
type LoadBalancerID struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
|
||||
// Listener is the primary load balancing configuration object that specifies
|
||||
// the loadbalancer and port on which client traffic is received, as well
|
||||
// as other details such as the load balancing method to be use, protocol, etc.
|
||||
type Listener struct {
|
||||
// The unique ID for the Listener.
|
||||
ID string `json:"id"`
|
||||
// Owner of the Listener. Only an admin user can specify a tenant ID other than its own.
|
||||
TenantID string `json:"tenant_id"`
|
||||
// Human-readable name for the Listener. Does not have to be unique.
|
||||
Name string `json:"name"`
|
||||
// Human-readable description for the Listener.
|
||||
Description string `json:"description"`
|
||||
// The protocol to loadbalance. A valid value is TCP, HTTP, or HTTPS.
|
||||
Protocol string `json:"protocol"`
|
||||
// The port on which to listen to client traffic that is associated with the
|
||||
// Loadbalancer. A valid value is from 0 to 65535.
|
||||
ProtocolPort int `json:"protocol_port"`
|
||||
// The UUID of default pool. Must have compatible protocol with listener.
|
||||
DefaultPoolID string `json:"default_pool_id"`
|
||||
// A list of load balancer IDs.
|
||||
Loadbalancers []LoadBalancerID `json:"loadbalancers"`
|
||||
// The maximum number of connections allowed for the Loadbalancer. Default is -1,
|
||||
// meaning no limit.
|
||||
ConnLimit int `json:"connection_limit"`
|
||||
// The list of references to TLS secrets.
|
||||
SniContainerRefs []string `json:"sni_container_refs"`
|
||||
// Optional. A reference to a container of TLS secrets.
|
||||
DefaultTlsContainerRef string `json:"default_tls_container_ref"`
|
||||
// The administrative state of the Listener. A valid value is true (UP) or false (DOWN).
|
||||
AdminStateUp bool `json:"admin_state_up"`
|
||||
Pools []pools.Pool `json:"pools"`
|
||||
}
|
||||
|
||||
// ListenerPage is the page returned by a pager when traversing over a
|
||||
// collection of routers.
|
||||
type ListenerPage struct {
|
||||
pagination.LinkedPageBase
|
||||
}
|
||||
|
||||
// NextPageURL is invoked when a paginated collection of routers has reached
|
||||
// the end of a page and the pager seeks to traverse over a new one. In order
|
||||
// to do this, it needs to construct the next page's URL.
|
||||
func (r ListenerPage) NextPageURL() (string, error) {
|
||||
var s struct {
|
||||
Links []gophercloud.Link `json:"listeners_links"`
|
||||
}
|
||||
err := r.ExtractInto(&s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return gophercloud.ExtractNextURL(s.Links)
|
||||
}
|
||||
|
||||
// IsEmpty checks whether a RouterPage struct is empty.
|
||||
func (r ListenerPage) IsEmpty() (bool, error) {
|
||||
is, err := ExtractListeners(r)
|
||||
return len(is) == 0, err
|
||||
}
|
||||
|
||||
// ExtractListeners accepts a Page struct, specifically a ListenerPage struct,
|
||||
// and extracts the elements into a slice of Listener structs. In other words,
|
||||
// a generic collection is mapped into a relevant slice.
|
||||
func ExtractListeners(r pagination.Page) ([]Listener, error) {
|
||||
var s struct {
|
||||
Listeners []Listener `json:"listeners"`
|
||||
}
|
||||
err := (r.(ListenerPage)).ExtractInto(&s)
|
||||
return s.Listeners, err
|
||||
}
|
||||
|
||||
type commonResult struct {
|
||||
gophercloud.Result
|
||||
}
|
||||
|
||||
// Extract is a function that accepts a result and extracts a router.
|
||||
func (r commonResult) Extract() (*Listener, error) {
|
||||
var s struct {
|
||||
Listener *Listener `json:"listener"`
|
||||
}
|
||||
err := r.ExtractInto(&s)
|
||||
return s.Listener, err
|
||||
}
|
||||
|
||||
// CreateResult represents the result of a create operation.
|
||||
type CreateResult struct {
|
||||
commonResult
|
||||
}
|
||||
|
||||
// GetResult represents the result of a get operation.
|
||||
type GetResult struct {
|
||||
commonResult
|
||||
}
|
||||
|
||||
// UpdateResult represents the result of an update operation.
|
||||
type UpdateResult struct {
|
||||
commonResult
|
||||
}
|
||||
|
||||
// DeleteResult represents the result of a delete operation.
|
||||
type DeleteResult struct {
|
||||
gophercloud.ErrResult
|
||||
}
|
16
vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/urls.go
generated
vendored
Normal file
16
vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/urls.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
package listeners
|
||||
|
||||
import "github.com/gophercloud/gophercloud"
|
||||
|
||||
const (
|
||||
rootPath = "lbaas"
|
||||
resourcePath = "listeners"
|
||||
)
|
||||
|
||||
func rootURL(c *gophercloud.ServiceClient) string {
|
||||
return c.ServiceURL(rootPath, resourcePath)
|
||||
}
|
||||
|
||||
func resourceURL(c *gophercloud.ServiceClient, id string) string {
|
||||
return c.ServiceURL(rootPath, resourcePath, id)
|
||||
}
|
172
vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/requests.go
generated
vendored
Normal file
172
vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/requests.go
generated
vendored
Normal file
@ -0,0 +1,172 @@
|
||||
package loadbalancers
|
||||
|
||||
import (
|
||||
"github.com/gophercloud/gophercloud"
|
||||
"github.com/gophercloud/gophercloud/pagination"
|
||||
)
|
||||
|
||||
// ListOptsBuilder allows extensions to add additional parameters to the
|
||||
// List request.
|
||||
type ListOptsBuilder interface {
|
||||
ToLoadBalancerListQuery() (string, error)
|
||||
}
|
||||
|
||||
// ListOpts allows the filtering and sorting of paginated collections through
|
||||
// the API. Filtering is achieved by passing in struct field values that map to
|
||||
// the Loadbalancer attributes you want to see returned. SortKey allows you to
|
||||
// sort by a particular attribute. SortDir sets the direction, and is
|
||||
// either `asc' or `desc'. Marker and Limit are used for pagination.
|
||||
type ListOpts struct {
|
||||
Description string `q:"description"`
|
||||
AdminStateUp *bool `q:"admin_state_up"`
|
||||
TenantID string `q:"tenant_id"`
|
||||
ProvisioningStatus string `q:"provisioning_status"`
|
||||
VipAddress string `q:"vip_address"`
|
||||
VipPortID string `q:"vip_port_id"`
|
||||
VipSubnetID string `q:"vip_subnet_id"`
|
||||
ID string `q:"id"`
|
||||
OperatingStatus string `q:"operating_status"`
|
||||
Name string `q:"name"`
|
||||
Flavor string `q:"flavor"`
|
||||
Provider string `q:"provider"`
|
||||
Limit int `q:"limit"`
|
||||
Marker string `q:"marker"`
|
||||
SortKey string `q:"sort_key"`
|
||||
SortDir string `q:"sort_dir"`
|
||||
}
|
||||
|
||||
// ToLoadbalancerListQuery formats a ListOpts into a query string.
|
||||
func (opts ListOpts) ToLoadBalancerListQuery() (string, error) {
|
||||
q, err := gophercloud.BuildQueryString(opts)
|
||||
return q.String(), err
|
||||
}
|
||||
|
||||
// List returns a Pager which allows you to iterate over a collection of
|
||||
// routers. It accepts a ListOpts struct, which allows you to filter and sort
|
||||
// the returned collection for greater efficiency.
|
||||
//
|
||||
// Default policy settings return only those routers that are owned by the
|
||||
// tenant who submits the request, unless an admin user submits the request.
|
||||
func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager {
|
||||
url := rootURL(c)
|
||||
if opts != nil {
|
||||
query, err := opts.ToLoadBalancerListQuery()
|
||||
if err != nil {
|
||||
return pagination.Pager{Err: err}
|
||||
}
|
||||
url += query
|
||||
}
|
||||
return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page {
|
||||
return LoadBalancerPage{pagination.LinkedPageBase{PageResult: r}}
|
||||
})
|
||||
}
|
||||
|
||||
// CreateOptsBuilder is the interface options structs have to satisfy in order
|
||||
// to be used in the main Create operation in this package. Since many
|
||||
// extensions decorate or modify the common logic, it is useful for them to
|
||||
// satisfy a basic interface in order for them to be used.
|
||||
type CreateOptsBuilder interface {
|
||||
ToLoadBalancerCreateMap() (map[string]interface{}, error)
|
||||
}
|
||||
|
||||
// CreateOpts is the common options struct used in this package's Create
|
||||
// operation.
|
||||
type CreateOpts struct {
|
||||
// Optional. Human-readable name for the Loadbalancer. Does not have to be unique.
|
||||
Name string `json:"name,omitempty"`
|
||||
// Optional. Human-readable description for the Loadbalancer.
|
||||
Description string `json:"description,omitempty"`
|
||||
// Required. The network on which to allocate the Loadbalancer's address. A tenant can
|
||||
// only create Loadbalancers on networks authorized by policy (e.g. networks that
|
||||
// belong to them or networks that are shared).
|
||||
VipSubnetID string `json:"vip_subnet_id" required:"true"`
|
||||
// Required for admins. The UUID of the tenant who owns the Loadbalancer.
|
||||
// Only administrative users can specify a tenant UUID other than their own.
|
||||
TenantID string `json:"tenant_id,omitempty"`
|
||||
// Optional. The IP address of the Loadbalancer.
|
||||
VipAddress string `json:"vip_address,omitempty"`
|
||||
// Optional. The administrative state of the Loadbalancer. A valid value is true (UP)
|
||||
// or false (DOWN).
|
||||
AdminStateUp *bool `json:"admin_state_up,omitempty"`
|
||||
// Optional. The UUID of a flavor.
|
||||
Flavor string `json:"flavor,omitempty"`
|
||||
// Optional. The name of the provider.
|
||||
Provider string `json:"provider,omitempty"`
|
||||
}
|
||||
|
||||
// ToLoadBalancerCreateMap casts a CreateOpts struct to a map.
|
||||
func (opts CreateOpts) ToLoadBalancerCreateMap() (map[string]interface{}, error) {
|
||||
return gophercloud.BuildRequestBody(opts, "loadbalancer")
|
||||
}
|
||||
|
||||
// Create is an operation which provisions a new loadbalancer based on the
|
||||
// configuration defined in the CreateOpts struct. Once the request is
|
||||
// validated and progress has started on the provisioning process, a
|
||||
// CreateResult will be returned.
|
||||
//
|
||||
// Users with an admin role can create loadbalancers on behalf of other tenants by
|
||||
// specifying a TenantID attribute different than their own.
|
||||
func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) {
|
||||
b, err := opts.ToLoadBalancerCreateMap()
|
||||
if err != nil {
|
||||
r.Err = err
|
||||
return
|
||||
}
|
||||
_, r.Err = c.Post(rootURL(c), b, &r.Body, nil)
|
||||
return
|
||||
}
|
||||
|
||||
// Get retrieves a particular Loadbalancer based on its unique ID.
|
||||
func Get(c *gophercloud.ServiceClient, id string) (r GetResult) {
|
||||
_, r.Err = c.Get(resourceURL(c, id), &r.Body, nil)
|
||||
return
|
||||
}
|
||||
|
||||
// UpdateOptsBuilder is the interface options structs have to satisfy in order
|
||||
// to be used in the main Update operation in this package. Since many
|
||||
// extensions decorate or modify the common logic, it is useful for them to
|
||||
// satisfy a basic interface in order for them to be used.
|
||||
type UpdateOptsBuilder interface {
|
||||
ToLoadBalancerUpdateMap() (map[string]interface{}, error)
|
||||
}
|
||||
|
||||
// UpdateOpts is the common options struct used in this package's Update
|
||||
// operation.
|
||||
type UpdateOpts struct {
|
||||
// Optional. Human-readable name for the Loadbalancer. Does not have to be unique.
|
||||
Name string `json:"name,omitempty"`
|
||||
// Optional. Human-readable description for the Loadbalancer.
|
||||
Description string `json:"description,omitempty"`
|
||||
// Optional. The administrative state of the Loadbalancer. A valid value is true (UP)
|
||||
// or false (DOWN).
|
||||
AdminStateUp *bool `json:"admin_state_up,omitempty"`
|
||||
}
|
||||
|
||||
// ToLoadBalancerUpdateMap casts a UpdateOpts struct to a map.
|
||||
func (opts UpdateOpts) ToLoadBalancerUpdateMap() (map[string]interface{}, error) {
|
||||
return gophercloud.BuildRequestBody(opts, "loadbalancer")
|
||||
}
|
||||
|
||||
// Update is an operation which modifies the attributes of the specified LoadBalancer.
|
||||
func Update(c *gophercloud.ServiceClient, id string, opts UpdateOpts) (r UpdateResult) {
|
||||
b, err := opts.ToLoadBalancerUpdateMap()
|
||||
if err != nil {
|
||||
r.Err = err
|
||||
return
|
||||
}
|
||||
_, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{
|
||||
OkCodes: []int{200, 202},
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Delete will permanently delete a particular LoadBalancer based on its unique ID.
|
||||
func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) {
|
||||
_, r.Err = c.Delete(resourceURL(c, id), nil)
|
||||
return
|
||||
}
|
||||
|
||||
func GetStatuses(c *gophercloud.ServiceClient, id string) (r GetStatusesResult) {
|
||||
_, r.Err = c.Get(statusRootURL(c, id), &r.Body, nil)
|
||||
return
|
||||
}
|
125
vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/results.go
generated
vendored
Normal file
125
vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/results.go
generated
vendored
Normal file
@ -0,0 +1,125 @@
|
||||
package loadbalancers
|
||||
|
||||
import (
|
||||
"github.com/gophercloud/gophercloud"
|
||||
"github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners"
|
||||
"github.com/gophercloud/gophercloud/pagination"
|
||||
)
|
||||
|
||||
// LoadBalancer is the primary load balancing configuration object that specifies
|
||||
// the virtual IP address on which client traffic is received, as well
|
||||
// as other details such as the load balancing method to be use, protocol, etc.
|
||||
type LoadBalancer struct {
|
||||
// Human-readable description for the Loadbalancer.
|
||||
Description string `json:"description"`
|
||||
// The administrative state of the Loadbalancer. A valid value is true (UP) or false (DOWN).
|
||||
AdminStateUp bool `json:"admin_state_up"`
|
||||
// Owner of the LoadBalancer. Only an admin user can specify a tenant ID other than its own.
|
||||
TenantID string `json:"tenant_id"`
|
||||
// The provisioning status of the LoadBalancer. This value is ACTIVE, PENDING_CREATE or ERROR.
|
||||
ProvisioningStatus string `json:"provisioning_status"`
|
||||
// The IP address of the Loadbalancer.
|
||||
VipAddress string `json:"vip_address"`
|
||||
// The UUID of the port associated with the IP address.
|
||||
VipPortID string `json:"vip_port_id"`
|
||||
// The UUID of the subnet on which to allocate the virtual IP for the Loadbalancer address.
|
||||
VipSubnetID string `json:"vip_subnet_id"`
|
||||
// The unique ID for the LoadBalancer.
|
||||
ID string `json:"id"`
|
||||
// The operating status of the LoadBalancer. This value is ONLINE or OFFLINE.
|
||||
OperatingStatus string `json:"operating_status"`
|
||||
// Human-readable name for the LoadBalancer. Does not have to be unique.
|
||||
Name string `json:"name"`
|
||||
// The UUID of a flavor if set.
|
||||
Flavor string `json:"flavor"`
|
||||
// The name of the provider.
|
||||
Provider string `json:"provider"`
|
||||
Listeners []listeners.Listener `json:"listeners"`
|
||||
}
|
||||
|
||||
type StatusTree struct {
|
||||
Loadbalancer *LoadBalancer `json:"loadbalancer"`
|
||||
}
|
||||
|
||||
// LoadBalancerPage is the page returned by a pager when traversing over a
|
||||
// collection of routers.
|
||||
type LoadBalancerPage struct {
|
||||
pagination.LinkedPageBase
|
||||
}
|
||||
|
||||
// NextPageURL is invoked when a paginated collection of routers has reached
|
||||
// the end of a page and the pager seeks to traverse over a new one. In order
|
||||
// to do this, it needs to construct the next page's URL.
|
||||
func (r LoadBalancerPage) NextPageURL() (string, error) {
|
||||
var s struct {
|
||||
Links []gophercloud.Link `json:"loadbalancers_links"`
|
||||
}
|
||||
err := r.ExtractInto(&s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return gophercloud.ExtractNextURL(s.Links)
|
||||
}
|
||||
|
||||
// IsEmpty checks whether a LoadBalancerPage struct is empty.
|
||||
func (p LoadBalancerPage) IsEmpty() (bool, error) {
|
||||
is, err := ExtractLoadBalancers(p)
|
||||
return len(is) == 0, err
|
||||
}
|
||||
|
||||
// ExtractLoadBalancers accepts a Page struct, specifically a LoadbalancerPage struct,
|
||||
// and extracts the elements into a slice of LoadBalancer structs. In other words,
|
||||
// a generic collection is mapped into a relevant slice.
|
||||
func ExtractLoadBalancers(r pagination.Page) ([]LoadBalancer, error) {
|
||||
var s struct {
|
||||
LoadBalancers []LoadBalancer `json:"loadbalancers"`
|
||||
}
|
||||
err := (r.(LoadBalancerPage)).ExtractInto(&s)
|
||||
return s.LoadBalancers, err
|
||||
}
|
||||
|
||||
type commonResult struct {
|
||||
gophercloud.Result
|
||||
}
|
||||
|
||||
// Extract is a function that accepts a result and extracts a router.
|
||||
func (r commonResult) Extract() (*LoadBalancer, error) {
|
||||
var s struct {
|
||||
LoadBalancer *LoadBalancer `json:"loadbalancer"`
|
||||
}
|
||||
err := r.ExtractInto(&s)
|
||||
return s.LoadBalancer, err
|
||||
}
|
||||
|
||||
type GetStatusesResult struct {
|
||||
gophercloud.Result
|
||||
}
|
||||
|
||||
// Extract is a function that accepts a result and extracts a Loadbalancer.
|
||||
func (r GetStatusesResult) Extract() (*StatusTree, error) {
|
||||
var s struct {
|
||||
Statuses *StatusTree `json:"statuses"`
|
||||
}
|
||||
err := r.ExtractInto(&s)
|
||||
return s.Statuses, err
|
||||
}
|
||||
|
||||
// CreateResult represents the result of a create operation.
|
||||
type CreateResult struct {
|
||||
commonResult
|
||||
}
|
||||
|
||||
// GetResult represents the result of a get operation.
|
||||
type GetResult struct {
|
||||
commonResult
|
||||
}
|
||||
|
||||
// UpdateResult represents the result of an update operation.
|
||||
type UpdateResult struct {
|
||||
commonResult
|
||||
}
|
||||
|
||||
// DeleteResult represents the result of a delete operation.
|
||||
type DeleteResult struct {
|
||||
gophercloud.ErrResult
|
||||
}
|
21
vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/urls.go
generated
vendored
Normal file
21
vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/urls.go
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
package loadbalancers
|
||||
|
||||
import "github.com/gophercloud/gophercloud"
|
||||
|
||||
const (
|
||||
rootPath = "lbaas"
|
||||
resourcePath = "loadbalancers"
|
||||
statusPath = "statuses"
|
||||
)
|
||||
|
||||
func rootURL(c *gophercloud.ServiceClient) string {
|
||||
return c.ServiceURL(rootPath, resourcePath)
|
||||
}
|
||||
|
||||
func resourceURL(c *gophercloud.ServiceClient, id string) string {
|
||||
return c.ServiceURL(rootPath, resourcePath, id)
|
||||
}
|
||||
|
||||
func statusRootURL(c *gophercloud.ServiceClient, id string) string {
|
||||
return c.ServiceURL(rootPath, resourcePath, id, statusPath)
|
||||
}
|
233
vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/requests.go
generated
vendored
Normal file
233
vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/requests.go
generated
vendored
Normal file
@ -0,0 +1,233 @@
|
||||
package monitors
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/gophercloud/gophercloud"
|
||||
"github.com/gophercloud/gophercloud/pagination"
|
||||
)
|
||||
|
||||
// ListOptsBuilder allows extensions to add additional parameters to the
|
||||
// List request.
|
||||
type ListOptsBuilder interface {
|
||||
ToMonitorListQuery() (string, error)
|
||||
}
|
||||
|
||||
// ListOpts allows the filtering and sorting of paginated collections through
|
||||
// the API. Filtering is achieved by passing in struct field values that map to
|
||||
// the Monitor attributes you want to see returned. SortKey allows you to
|
||||
// sort by a particular Monitor attribute. SortDir sets the direction, and is
|
||||
// either `asc' or `desc'. Marker and Limit are used for pagination.
|
||||
type ListOpts struct {
|
||||
ID string `q:"id"`
|
||||
Name string `q:"name"`
|
||||
TenantID string `q:"tenant_id"`
|
||||
PoolID string `q:"pool_id"`
|
||||
Type string `q:"type"`
|
||||
Delay int `q:"delay"`
|
||||
Timeout int `q:"timeout"`
|
||||
MaxRetries int `q:"max_retries"`
|
||||
HTTPMethod string `q:"http_method"`
|
||||
URLPath string `q:"url_path"`
|
||||
ExpectedCodes string `q:"expected_codes"`
|
||||
AdminStateUp *bool `q:"admin_state_up"`
|
||||
Status string `q:"status"`
|
||||
Limit int `q:"limit"`
|
||||
Marker string `q:"marker"`
|
||||
SortKey string `q:"sort_key"`
|
||||
SortDir string `q:"sort_dir"`
|
||||
}
|
||||
|
||||
// ToMonitorListQuery formats a ListOpts into a query string.
|
||||
func (opts ListOpts) ToMonitorListQuery() (string, error) {
|
||||
q, err := gophercloud.BuildQueryString(opts)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return q.String(), nil
|
||||
}
|
||||
|
||||
// List returns a Pager which allows you to iterate over a collection of
|
||||
// health monitors. It accepts a ListOpts struct, which allows you to filter and sort
|
||||
// the returned collection for greater efficiency.
|
||||
//
|
||||
// Default policy settings return only those health monitors that are owned by the
|
||||
// tenant who submits the request, unless an admin user submits the request.
|
||||
func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager {
|
||||
url := rootURL(c)
|
||||
if opts != nil {
|
||||
query, err := opts.ToMonitorListQuery()
|
||||
if err != nil {
|
||||
return pagination.Pager{Err: err}
|
||||
}
|
||||
url += query
|
||||
}
|
||||
return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page {
|
||||
return MonitorPage{pagination.LinkedPageBase{PageResult: r}}
|
||||
})
|
||||
}
|
||||
|
||||
// Constants that represent approved monitoring types.
|
||||
const (
|
||||
TypePING = "PING"
|
||||
TypeTCP = "TCP"
|
||||
TypeHTTP = "HTTP"
|
||||
TypeHTTPS = "HTTPS"
|
||||
)
|
||||
|
||||
var (
|
||||
errDelayMustGETimeout = fmt.Errorf("Delay must be greater than or equal to timeout")
|
||||
)
|
||||
|
||||
// CreateOptsBuilder is the interface options structs have to satisfy in order
|
||||
// to be used in the main Create operation in this package. Since many
|
||||
// extensions decorate or modify the common logic, it is useful for them to
|
||||
// satisfy a basic interface in order for them to be used.
|
||||
type CreateOptsBuilder interface {
|
||||
ToMonitorCreateMap() (map[string]interface{}, error)
|
||||
}
|
||||
|
||||
// CreateOpts is the common options struct used in this package's Create
|
||||
// operation.
|
||||
type CreateOpts struct {
|
||||
// Required. The Pool to Monitor.
|
||||
PoolID string `json:"pool_id" required:"true"`
|
||||
// Required. The type of probe, which is PING, TCP, HTTP, or HTTPS, that is
|
||||
// sent by the load balancer to verify the member state.
|
||||
Type string `json:"type" required:"true"`
|
||||
// Required. The time, in seconds, between sending probes to members.
|
||||
Delay int `json:"delay" required:"true"`
|
||||
// Required. Maximum number of seconds for a Monitor to wait for a ping reply
|
||||
// before it times out. The value must be less than the delay value.
|
||||
Timeout int `json:"timeout" required:"true"`
|
||||
// Required. Number of permissible ping failures before changing the member's
|
||||
// status to INACTIVE. Must be a number between 1 and 10.
|
||||
MaxRetries int `json:"max_retries" required:"true"`
|
||||
// Required for HTTP(S) types. URI path that will be accessed if Monitor type
|
||||
// is HTTP or HTTPS.
|
||||
URLPath string `json:"url_path,omitempty"`
|
||||
// Required for HTTP(S) types. The HTTP method used for requests by the
|
||||
// Monitor. If this attribute is not specified, it defaults to "GET".
|
||||
HTTPMethod string `json:"http_method,omitempty"`
|
||||
// Required for HTTP(S) types. Expected HTTP codes for a passing HTTP(S)
|
||||
// Monitor. You can either specify a single status like "200", or a range
|
||||
// like "200-202".
|
||||
ExpectedCodes string `json:"expected_codes,omitempty"`
|
||||
// Indicates the owner of the Loadbalancer. Required for admins.
|
||||
TenantID string `json:"tenant_id,omitempty"`
|
||||
// Optional. The Name of the Monitor.
|
||||
Name string `json:"name,omitempty"`
|
||||
AdminStateUp *bool `json:"admin_state_up,omitempty"`
|
||||
}
|
||||
|
||||
// ToMonitorCreateMap casts a CreateOpts struct to a map.
|
||||
func (opts CreateOpts) ToMonitorCreateMap() (map[string]interface{}, error) {
|
||||
b, err := gophercloud.BuildRequestBody(opts, "healthmonitor")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch opts.Type {
|
||||
case TypeHTTP, TypeHTTPS:
|
||||
switch opts.URLPath {
|
||||
case "":
|
||||
return nil, fmt.Errorf("URLPath must be provided for HTTP and HTTPS")
|
||||
}
|
||||
switch opts.ExpectedCodes {
|
||||
case "":
|
||||
return nil, fmt.Errorf("ExpectedCodes must be provided for HTTP and HTTPS")
|
||||
}
|
||||
}
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
/*
|
||||
Create is an operation which provisions a new Health Monitor. There are
|
||||
different types of Monitor you can provision: PING, TCP or HTTP(S). Below
|
||||
are examples of how to create each one.
|
||||
|
||||
Here is an example config struct to use when creating a PING or TCP Monitor:
|
||||
|
||||
CreateOpts{Type: TypePING, Delay: 20, Timeout: 10, MaxRetries: 3}
|
||||
CreateOpts{Type: TypeTCP, Delay: 20, Timeout: 10, MaxRetries: 3}
|
||||
|
||||
Here is an example config struct to use when creating a HTTP(S) Monitor:
|
||||
|
||||
CreateOpts{Type: TypeHTTP, Delay: 20, Timeout: 10, MaxRetries: 3,
|
||||
HttpMethod: "HEAD", ExpectedCodes: "200", PoolID: "2c946bfc-1804-43ab-a2ff-58f6a762b505"}
|
||||
*/
|
||||
func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) {
|
||||
b, err := opts.ToMonitorCreateMap()
|
||||
if err != nil {
|
||||
r.Err = err
|
||||
return
|
||||
}
|
||||
_, r.Err = c.Post(rootURL(c), b, &r.Body, nil)
|
||||
return
|
||||
}
|
||||
|
||||
// Get retrieves a particular Health Monitor based on its unique ID.
|
||||
func Get(c *gophercloud.ServiceClient, id string) (r GetResult) {
|
||||
_, r.Err = c.Get(resourceURL(c, id), &r.Body, nil)
|
||||
return
|
||||
}
|
||||
|
||||
// UpdateOptsBuilder is the interface options structs have to satisfy in order
|
||||
// to be used in the main Update operation in this package. Since many
|
||||
// extensions decorate or modify the common logic, it is useful for them to
|
||||
// satisfy a basic interface in order for them to be used.
|
||||
type UpdateOptsBuilder interface {
|
||||
ToMonitorUpdateMap() (map[string]interface{}, error)
|
||||
}
|
||||
|
||||
// UpdateOpts is the common options struct used in this package's Update
|
||||
// operation.
|
||||
type UpdateOpts struct {
|
||||
// Required. The time, in seconds, between sending probes to members.
|
||||
Delay int `json:"delay,omitempty"`
|
||||
// Required. Maximum number of seconds for a Monitor to wait for a ping reply
|
||||
// before it times out. The value must be less than the delay value.
|
||||
Timeout int `json:"timeout,omitempty"`
|
||||
// Required. Number of permissible ping failures before changing the member's
|
||||
// status to INACTIVE. Must be a number between 1 and 10.
|
||||
MaxRetries int `json:"max_retries,omitempty"`
|
||||
// Required for HTTP(S) types. URI path that will be accessed if Monitor type
|
||||
// is HTTP or HTTPS.
|
||||
URLPath string `json:"url_path,omitempty"`
|
||||
// Required for HTTP(S) types. The HTTP method used for requests by the
|
||||
// Monitor. If this attribute is not specified, it defaults to "GET".
|
||||
HTTPMethod string `json:"http_method,omitempty"`
|
||||
// Required for HTTP(S) types. Expected HTTP codes for a passing HTTP(S)
|
||||
// Monitor. You can either specify a single status like "200", or a range
|
||||
// like "200-202".
|
||||
ExpectedCodes string `json:"expected_codes,omitempty"`
|
||||
// Optional. The Name of the Monitor.
|
||||
Name string `json:"name,omitempty"`
|
||||
AdminStateUp *bool `json:"admin_state_up,omitempty"`
|
||||
}
|
||||
|
||||
// ToMonitorUpdateMap casts a UpdateOpts struct to a map.
|
||||
func (opts UpdateOpts) ToMonitorUpdateMap() (map[string]interface{}, error) {
|
||||
return gophercloud.BuildRequestBody(opts, "healthmonitor")
|
||||
}
|
||||
|
||||
// Update is an operation which modifies the attributes of the specified Monitor.
|
||||
func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) {
|
||||
b, err := opts.ToMonitorUpdateMap()
|
||||
if err != nil {
|
||||
r.Err = err
|
||||
return
|
||||
}
|
||||
|
||||
_, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{
|
||||
OkCodes: []int{200, 202},
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Delete will permanently delete a particular Monitor based on its unique ID.
|
||||
func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) {
|
||||
_, r.Err = c.Delete(resourceURL(c, id), nil)
|
||||
return
|
||||
}
|
144
vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/results.go
generated
vendored
Normal file
144
vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/results.go
generated
vendored
Normal file
@ -0,0 +1,144 @@
|
||||
package monitors
|
||||
|
||||
import (
|
||||
"github.com/gophercloud/gophercloud"
|
||||
"github.com/gophercloud/gophercloud/pagination"
|
||||
)
|
||||
|
||||
type PoolID struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
|
||||
// Monitor represents a load balancer health monitor. A health monitor is used
|
||||
// to determine whether or not back-end members of the VIP's pool are usable
|
||||
// for processing a request. A pool can have several health monitors associated
|
||||
// with it. There are different types of health monitors supported:
|
||||
//
|
||||
// PING: used to ping the members using ICMP.
|
||||
// TCP: used to connect to the members using TCP.
|
||||
// HTTP: used to send an HTTP request to the member.
|
||||
// HTTPS: used to send a secure HTTP request to the member.
|
||||
//
|
||||
// When a pool has several monitors associated with it, each member of the pool
|
||||
// is monitored by all these monitors. If any monitor declares the member as
|
||||
// unhealthy, then the member status is changed to INACTIVE and the member
|
||||
// won't participate in its pool's load balancing. In other words, ALL monitors
|
||||
// must declare the member to be healthy for it to stay ACTIVE.
|
||||
type Monitor struct {
|
||||
// The unique ID for the Monitor.
|
||||
ID string `json:"id"`
|
||||
|
||||
// The Name of the Monitor.
|
||||
Name string `json:"name"`
|
||||
|
||||
// Only an administrative user can specify a tenant ID
|
||||
// other than its own.
|
||||
TenantID string `json:"tenant_id"`
|
||||
|
||||
// The type of probe sent by the load balancer to verify the member state,
|
||||
// which is PING, TCP, HTTP, or HTTPS.
|
||||
Type string `json:"type"`
|
||||
|
||||
// The time, in seconds, between sending probes to members.
|
||||
Delay int `json:"delay"`
|
||||
|
||||
// The maximum number of seconds for a monitor to wait for a connection to be
|
||||
// established before it times out. This value must be less than the delay value.
|
||||
Timeout int `json:"timeout"`
|
||||
|
||||
// Number of allowed connection failures before changing the status of the
|
||||
// member to INACTIVE. A valid value is from 1 to 10.
|
||||
MaxRetries int `json:"max_retries"`
|
||||
|
||||
// The HTTP method that the monitor uses for requests.
|
||||
HTTPMethod string `json:"http_method"`
|
||||
|
||||
// The HTTP path of the request sent by the monitor to test the health of a
|
||||
// member. Must be a string beginning with a forward slash (/).
|
||||
URLPath string `json:"url_path" `
|
||||
|
||||
// Expected HTTP codes for a passing HTTP(S) monitor.
|
||||
ExpectedCodes string `json:"expected_codes"`
|
||||
|
||||
// The administrative state of the health monitor, which is up (true) or down (false).
|
||||
AdminStateUp bool `json:"admin_state_up"`
|
||||
|
||||
// The status of the health monitor. Indicates whether the health monitor is
|
||||
// operational.
|
||||
Status string `json:"status"`
|
||||
|
||||
// List of pools that are associated with the health monitor.
|
||||
Pools []PoolID `json:"pools"`
|
||||
}
|
||||
|
||||
// MonitorPage is the page returned by a pager when traversing over a
|
||||
// collection of health monitors.
|
||||
type MonitorPage struct {
|
||||
pagination.LinkedPageBase
|
||||
}
|
||||
|
||||
// NextPageURL is invoked when a paginated collection of monitors has reached
|
||||
// the end of a page and the pager seeks to traverse over a new one. In order
|
||||
// to do this, it needs to construct the next page's URL.
|
||||
func (r MonitorPage) NextPageURL() (string, error) {
|
||||
var s struct {
|
||||
Links []gophercloud.Link `json:"healthmonitors_links"`
|
||||
}
|
||||
|
||||
err := r.ExtractInto(&s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return gophercloud.ExtractNextURL(s.Links)
|
||||
}
|
||||
|
||||
// IsEmpty checks whether a MonitorPage struct is empty.
|
||||
func (r MonitorPage) IsEmpty() (bool, error) {
|
||||
is, err := ExtractMonitors(r)
|
||||
return len(is) == 0, err
|
||||
}
|
||||
|
||||
// ExtractMonitors accepts a Page struct, specifically a MonitorPage struct,
|
||||
// and extracts the elements into a slice of Monitor structs. In other words,
|
||||
// a generic collection is mapped into a relevant slice.
|
||||
func ExtractMonitors(r pagination.Page) ([]Monitor, error) {
|
||||
var s struct {
|
||||
Monitors []Monitor `json:"healthmonitors"`
|
||||
}
|
||||
err := (r.(MonitorPage)).ExtractInto(&s)
|
||||
return s.Monitors, err
|
||||
}
|
||||
|
||||
type commonResult struct {
|
||||
gophercloud.Result
|
||||
}
|
||||
|
||||
// Extract is a function that accepts a result and extracts a monitor.
|
||||
func (r commonResult) Extract() (*Monitor, error) {
|
||||
var s struct {
|
||||
Monitor *Monitor `json:"healthmonitor"`
|
||||
}
|
||||
err := r.ExtractInto(&s)
|
||||
return s.Monitor, err
|
||||
}
|
||||
|
||||
// CreateResult represents the result of a create operation.
|
||||
type CreateResult struct {
|
||||
commonResult
|
||||
}
|
||||
|
||||
// GetResult represents the result of a get operation.
|
||||
type GetResult struct {
|
||||
commonResult
|
||||
}
|
||||
|
||||
// UpdateResult represents the result of an update operation.
|
||||
type UpdateResult struct {
|
||||
commonResult
|
||||
}
|
||||
|
||||
// DeleteResult represents the result of a delete operation.
|
||||
type DeleteResult struct {
|
||||
gophercloud.ErrResult
|
||||
}
|
16
vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/urls.go
generated
vendored
Normal file
16
vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/urls.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
package monitors
|
||||
|
||||
import "github.com/gophercloud/gophercloud"
|
||||
|
||||
const (
|
||||
rootPath = "lbaas"
|
||||
resourcePath = "healthmonitors"
|
||||
)
|
||||
|
||||
func rootURL(c *gophercloud.ServiceClient) string {
|
||||
return c.ServiceURL(rootPath, resourcePath)
|
||||
}
|
||||
|
||||
func resourceURL(c *gophercloud.ServiceClient, id string) string {
|
||||
return c.ServiceURL(rootPath, resourcePath, id)
|
||||
}
|
334
vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/requests.go
generated
vendored
Normal file
334
vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/requests.go
generated
vendored
Normal file
@ -0,0 +1,334 @@
|
||||
package pools
|
||||
|
||||
import (
|
||||
"github.com/gophercloud/gophercloud"
|
||||
"github.com/gophercloud/gophercloud/pagination"
|
||||
)
|
||||
|
||||
// ListOptsBuilder allows extensions to add additional parameters to the
|
||||
// List request.
|
||||
type ListOptsBuilder interface {
|
||||
ToPoolListQuery() (string, error)
|
||||
}
|
||||
|
||||
// ListOpts allows the filtering and sorting of paginated collections through
|
||||
// the API. Filtering is achieved by passing in struct field values that map to
|
||||
// the Pool attributes you want to see returned. SortKey allows you to
|
||||
// sort by a particular Pool attribute. SortDir sets the direction, and is
|
||||
// either `asc' or `desc'. Marker and Limit are used for pagination.
|
||||
type ListOpts struct {
|
||||
LBMethod string `q:"lb_algorithm"`
|
||||
Protocol string `q:"protocol"`
|
||||
TenantID string `q:"tenant_id"`
|
||||
AdminStateUp *bool `q:"admin_state_up"`
|
||||
Name string `q:"name"`
|
||||
ID string `q:"id"`
|
||||
LoadbalancerID string `q:"loadbalancer_id"`
|
||||
ListenerID string `q:"listener_id"`
|
||||
Limit int `q:"limit"`
|
||||
Marker string `q:"marker"`
|
||||
SortKey string `q:"sort_key"`
|
||||
SortDir string `q:"sort_dir"`
|
||||
}
|
||||
|
||||
// ToPoolListQuery formats a ListOpts into a query string.
|
||||
func (opts ListOpts) ToPoolListQuery() (string, error) {
|
||||
q, err := gophercloud.BuildQueryString(opts)
|
||||
return q.String(), err
|
||||
}
|
||||
|
||||
// List returns a Pager which allows you to iterate over a collection of
|
||||
// pools. It accepts a ListOpts struct, which allows you to filter and sort
|
||||
// the returned collection for greater efficiency.
|
||||
//
|
||||
// Default policy settings return only those pools that are owned by the
|
||||
// tenant who submits the request, unless an admin user submits the request.
|
||||
func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager {
|
||||
url := rootURL(c)
|
||||
if opts != nil {
|
||||
query, err := opts.ToPoolListQuery()
|
||||
if err != nil {
|
||||
return pagination.Pager{Err: err}
|
||||
}
|
||||
url += query
|
||||
}
|
||||
return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page {
|
||||
return PoolPage{pagination.LinkedPageBase{PageResult: r}}
|
||||
})
|
||||
}
|
||||
|
||||
type LBMethod string
|
||||
type Protocol string
|
||||
|
||||
// Supported attributes for create/update operations.
|
||||
const (
|
||||
LBMethodRoundRobin LBMethod = "ROUND_ROBIN"
|
||||
LBMethodLeastConnections LBMethod = "LEAST_CONNECTIONS"
|
||||
LBMethodSourceIp LBMethod = "SOURCE_IP"
|
||||
|
||||
ProtocolTCP Protocol = "TCP"
|
||||
ProtocolHTTP Protocol = "HTTP"
|
||||
ProtocolHTTPS Protocol = "HTTPS"
|
||||
)
|
||||
|
||||
// CreateOptsBuilder is the interface options structs have to satisfy in order
|
||||
// to be used in the main Create operation in this package. Since many
|
||||
// extensions decorate or modify the common logic, it is useful for them to
|
||||
// satisfy a basic interface in order for them to be used.
|
||||
type CreateOptsBuilder interface {
|
||||
ToPoolCreateMap() (map[string]interface{}, error)
|
||||
}
|
||||
|
||||
// CreateOpts is the common options struct used in this package's Create
|
||||
// operation.
|
||||
type CreateOpts struct {
|
||||
// The algorithm used to distribute load between the members of the pool. The
|
||||
// current specification supports LBMethodRoundRobin, LBMethodLeastConnections
|
||||
// and LBMethodSourceIp as valid values for this attribute.
|
||||
LBMethod LBMethod `json:"lb_algorithm" required:"true"`
|
||||
// The protocol used by the pool members, you can use either
|
||||
// ProtocolTCP, ProtocolHTTP, or ProtocolHTTPS.
|
||||
Protocol Protocol `json:"protocol" required:"true"`
|
||||
// The Loadbalancer on which the members of the pool will be associated with.
|
||||
// Note: one of LoadbalancerID or ListenerID must be provided.
|
||||
LoadbalancerID string `json:"loadbalancer_id,omitempty" xor:"ListenerID"`
|
||||
// The Listener on which the members of the pool will be associated with.
|
||||
// Note: one of LoadbalancerID or ListenerID must be provided.
|
||||
ListenerID string `json:"listener_id,omitempty" xor:"LoadbalancerID"`
|
||||
// Only required if the caller has an admin role and wants to create a pool
|
||||
// for another tenant.
|
||||
TenantID string `json:"tenant_id,omitempty"`
|
||||
// Name of the pool.
|
||||
Name string `json:"name,omitempty"`
|
||||
// Human-readable description for the pool.
|
||||
Description string `json:"description,omitempty"`
|
||||
// Omit this field to prevent session persistence.
|
||||
Persistence *SessionPersistence `json:"session_persistence,omitempty"`
|
||||
// The administrative state of the Pool. A valid value is true (UP)
|
||||
// or false (DOWN).
|
||||
AdminStateUp *bool `json:"admin_state_up,omitempty"`
|
||||
}
|
||||
|
||||
// ToPoolCreateMap casts a CreateOpts struct to a map.
|
||||
func (opts CreateOpts) ToPoolCreateMap() (map[string]interface{}, error) {
|
||||
return gophercloud.BuildRequestBody(opts, "pool")
|
||||
}
|
||||
|
||||
// Create accepts a CreateOpts struct and uses the values to create a new
|
||||
// load balancer pool.
|
||||
func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) {
|
||||
b, err := opts.ToPoolCreateMap()
|
||||
if err != nil {
|
||||
r.Err = err
|
||||
return
|
||||
}
|
||||
_, r.Err = c.Post(rootURL(c), b, &r.Body, nil)
|
||||
return
|
||||
}
|
||||
|
||||
// Get retrieves a particular pool based on its unique ID.
|
||||
func Get(c *gophercloud.ServiceClient, id string) (r GetResult) {
|
||||
_, r.Err = c.Get(resourceURL(c, id), &r.Body, nil)
|
||||
return
|
||||
}
|
||||
|
||||
// UpdateOptsBuilder is the interface options structs have to satisfy in order
|
||||
// to be used in the main Update operation in this package. Since many
|
||||
// extensions decorate or modify the common logic, it is useful for them to
|
||||
// satisfy a basic interface in order for them to be used.
|
||||
type UpdateOptsBuilder interface {
|
||||
ToPoolUpdateMap() (map[string]interface{}, error)
|
||||
}
|
||||
|
||||
// UpdateOpts is the common options struct used in this package's Update
|
||||
// operation.
|
||||
type UpdateOpts struct {
|
||||
// Name of the pool.
|
||||
Name string `json:"name,omitempty"`
|
||||
// Human-readable description for the pool.
|
||||
Description string `json:"description,omitempty"`
|
||||
// The algorithm used to distribute load between the members of the pool. The
|
||||
// current specification supports LBMethodRoundRobin, LBMethodLeastConnections
|
||||
// and LBMethodSourceIp as valid values for this attribute.
|
||||
LBMethod LBMethod `json:"lb_algorithm,omitempty"`
|
||||
// The administrative state of the Pool. A valid value is true (UP)
|
||||
// or false (DOWN).
|
||||
AdminStateUp *bool `json:"admin_state_up,omitempty"`
|
||||
}
|
||||
|
||||
// ToPoolUpdateMap casts a UpdateOpts struct to a map.
|
||||
func (opts UpdateOpts) ToPoolUpdateMap() (map[string]interface{}, error) {
|
||||
return gophercloud.BuildRequestBody(opts, "pool")
|
||||
}
|
||||
|
||||
// Update allows pools to be updated.
|
||||
func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) {
|
||||
b, err := opts.ToPoolUpdateMap()
|
||||
if err != nil {
|
||||
r.Err = err
|
||||
return
|
||||
}
|
||||
_, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{
|
||||
OkCodes: []int{200},
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Delete will permanently delete a particular pool based on its unique ID.
|
||||
func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) {
|
||||
_, r.Err = c.Delete(resourceURL(c, id), nil)
|
||||
return
|
||||
}
|
||||
|
||||
// ListMemberOptsBuilder allows extensions to add additional parameters to the
|
||||
// ListMembers request.
|
||||
type ListMembersOptsBuilder interface {
|
||||
ToMembersListQuery() (string, error)
|
||||
}
|
||||
|
||||
// ListMembersOpts allows the filtering and sorting of paginated collections through
|
||||
// the API. Filtering is achieved by passing in struct field values that map to
|
||||
// the Member attributes you want to see returned. SortKey allows you to
|
||||
// sort by a particular Member attribute. SortDir sets the direction, and is
|
||||
// either `asc' or `desc'. Marker and Limit are used for pagination.
|
||||
type ListMembersOpts struct {
|
||||
Name string `q:"name"`
|
||||
Weight int `q:"weight"`
|
||||
AdminStateUp *bool `q:"admin_state_up"`
|
||||
TenantID string `q:"tenant_id"`
|
||||
Address string `q:"address"`
|
||||
ProtocolPort int `q:"protocol_port"`
|
||||
ID string `q:"id"`
|
||||
Limit int `q:"limit"`
|
||||
Marker string `q:"marker"`
|
||||
SortKey string `q:"sort_key"`
|
||||
SortDir string `q:"sort_dir"`
|
||||
}
|
||||
|
||||
// ToMemberListQuery formats a ListOpts into a query string.
|
||||
func (opts ListMembersOpts) ToMembersListQuery() (string, error) {
|
||||
q, err := gophercloud.BuildQueryString(opts)
|
||||
return q.String(), err
|
||||
}
|
||||
|
||||
// ListMembers returns a Pager which allows you to iterate over a collection of
|
||||
// members. It accepts a ListMembersOptsBuilder, which allows you to filter and sort
|
||||
// the returned collection for greater efficiency.
|
||||
//
|
||||
// Default policy settings return only those members that are owned by the
|
||||
// tenant who submits the request, unless an admin user submits the request.
|
||||
func ListMembers(c *gophercloud.ServiceClient, poolID string, opts ListMembersOptsBuilder) pagination.Pager {
|
||||
url := memberRootURL(c, poolID)
|
||||
if opts != nil {
|
||||
query, err := opts.ToMembersListQuery()
|
||||
if err != nil {
|
||||
return pagination.Pager{Err: err}
|
||||
}
|
||||
url += query
|
||||
}
|
||||
return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page {
|
||||
return MemberPage{pagination.LinkedPageBase{PageResult: r}}
|
||||
})
|
||||
}
|
||||
|
||||
// CreateMemberOptsBuilder is the interface options structs have to satisfy in order
|
||||
// to be used in the CreateMember operation in this package. Since many
|
||||
// extensions decorate or modify the common logic, it is useful for them to
|
||||
// satisfy a basic interface in order for them to be used.
|
||||
type CreateMemberOptsBuilder interface {
|
||||
ToMemberCreateMap() (map[string]interface{}, error)
|
||||
}
|
||||
|
||||
// CreateMemberOpts is the common options struct used in this package's CreateMember
|
||||
// operation.
|
||||
type CreateMemberOpts struct {
|
||||
// Required. The IP address of the member to receive traffic from the load balancer.
|
||||
Address string `json:"address" required:"true"`
|
||||
// Required. The port on which to listen for client traffic.
|
||||
ProtocolPort int `json:"protocol_port" required:"true"`
|
||||
// Optional. Name of the Member.
|
||||
Name string `json:"name,omitempty"`
|
||||
// Only required if the caller has an admin role and wants to create a Member
|
||||
// for another tenant.
|
||||
TenantID string `json:"tenant_id,omitempty"`
|
||||
// Optional. A positive integer value that indicates the relative portion of
|
||||
// traffic that this member should receive from the pool. For example, a
|
||||
// member with a weight of 10 receives five times as much traffic as a member
|
||||
// with a weight of 2.
|
||||
Weight int `json:"weight,omitempty"`
|
||||
// Optional. If you omit this parameter, LBaaS uses the vip_subnet_id
|
||||
// parameter value for the subnet UUID.
|
||||
SubnetID string `json:"subnet_id,omitempty"`
|
||||
// Optional. The administrative state of the Pool. A valid value is true (UP)
|
||||
// or false (DOWN).
|
||||
AdminStateUp *bool `json:"admin_state_up,omitempty"`
|
||||
}
|
||||
|
||||
// ToMemberCreateMap casts a CreateOpts struct to a map.
|
||||
func (opts CreateMemberOpts) ToMemberCreateMap() (map[string]interface{}, error) {
|
||||
return gophercloud.BuildRequestBody(opts, "member")
|
||||
}
|
||||
|
||||
// CreateMember will create and associate a Member with a particular Pool.
|
||||
func CreateMember(c *gophercloud.ServiceClient, poolID string, opts CreateMemberOpts) (r CreateMemberResult) {
|
||||
b, err := opts.ToMemberCreateMap()
|
||||
if err != nil {
|
||||
r.Err = err
|
||||
return
|
||||
}
|
||||
_, r.Err = c.Post(memberRootURL(c, poolID), b, &r.Body, nil)
|
||||
return
|
||||
}
|
||||
|
||||
// GetMember retrieves a particular Pool Member based on its unique ID.
|
||||
func GetMember(c *gophercloud.ServiceClient, poolID string, memberID string) (r GetMemberResult) {
|
||||
_, r.Err = c.Get(memberResourceURL(c, poolID, memberID), &r.Body, nil)
|
||||
return
|
||||
}
|
||||
|
||||
// MemberUpdateOptsBuilder is the interface options structs have to satisfy in order
|
||||
// to be used in the main Update operation in this package. Since many
|
||||
// extensions decorate or modify the common logic, it is useful for them to
|
||||
// satisfy a basic interface in order for them to be used.
|
||||
type UpdateMemberOptsBuilder interface {
|
||||
ToMemberUpdateMap() (map[string]interface{}, error)
|
||||
}
|
||||
|
||||
// UpdateMemberOpts is the common options struct used in this package's Update
|
||||
// operation.
|
||||
type UpdateMemberOpts struct {
|
||||
// Optional. Name of the Member.
|
||||
Name string `json:"name,omitempty"`
|
||||
// Optional. A positive integer value that indicates the relative portion of
|
||||
// traffic that this member should receive from the pool. For example, a
|
||||
// member with a weight of 10 receives five times as much traffic as a member
|
||||
// with a weight of 2.
|
||||
Weight int `json:"weight,omitempty"`
|
||||
// Optional. The administrative state of the Pool. A valid value is true (UP)
|
||||
// or false (DOWN).
|
||||
AdminStateUp *bool `json:"admin_state_up,omitempty"`
|
||||
}
|
||||
|
||||
// ToMemberUpdateMap casts a UpdateOpts struct to a map.
|
||||
func (opts UpdateMemberOpts) ToMemberUpdateMap() (map[string]interface{}, error) {
|
||||
return gophercloud.BuildRequestBody(opts, "member")
|
||||
}
|
||||
|
||||
// Update allows Member to be updated.
|
||||
func UpdateMember(c *gophercloud.ServiceClient, poolID string, memberID string, opts UpdateMemberOptsBuilder) (r UpdateMemberResult) {
|
||||
b, err := opts.ToMemberUpdateMap()
|
||||
if err != nil {
|
||||
r.Err = err
|
||||
return
|
||||
}
|
||||
_, r.Err = c.Put(memberResourceURL(c, poolID, memberID), b, &r.Body, &gophercloud.RequestOpts{
|
||||
OkCodes: []int{200, 201, 202},
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// DisassociateMember will remove and disassociate a Member from a particular Pool.
|
||||
func DeleteMember(c *gophercloud.ServiceClient, poolID string, memberID string) (r DeleteMemberResult) {
|
||||
_, r.Err = c.Delete(memberResourceURL(c, poolID, memberID), nil)
|
||||
return
|
||||
}
|
242
vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/results.go
generated
vendored
Normal file
242
vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/results.go
generated
vendored
Normal file
@ -0,0 +1,242 @@
|
||||
package pools
|
||||
|
||||
import (
|
||||
"github.com/gophercloud/gophercloud"
|
||||
"github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors"
|
||||
"github.com/gophercloud/gophercloud/pagination"
|
||||
)
|
||||
|
||||
// SessionPersistence represents the session persistence feature of the load
|
||||
// balancing service. It attempts to force connections or requests in the same
|
||||
// session to be processed by the same member as long as it is ative. Three
|
||||
// types of persistence are supported:
|
||||
//
|
||||
// SOURCE_IP: With this mode, all connections originating from the same source
|
||||
// IP address, will be handled by the same Member of the Pool.
|
||||
// HTTP_COOKIE: With this persistence mode, the load balancing function will
|
||||
// create a cookie on the first request from a client. Subsequent
|
||||
// requests containing the same cookie value will be handled by
|
||||
// the same Member of the Pool.
|
||||
// APP_COOKIE: With this persistence mode, the load balancing function will
|
||||
// rely on a cookie established by the backend application. All
|
||||
// requests carrying the same cookie value will be handled by the
|
||||
// same Member of the Pool.
|
||||
type SessionPersistence struct {
|
||||
// The type of persistence mode
|
||||
Type string `json:"type"`
|
||||
|
||||
// Name of cookie if persistence mode is set appropriately
|
||||
CookieName string `json:"cookie_name,omitempty"`
|
||||
}
|
||||
|
||||
type LoadBalancerID struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
|
||||
type ListenerID struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
|
||||
// Pool represents a logical set of devices, such as web servers, that you
|
||||
// group together to receive and process traffic. The load balancing function
|
||||
// chooses a Member of the Pool according to the configured load balancing
|
||||
// method to handle the new requests or connections received on the VIP address.
|
||||
type Pool struct {
|
||||
// The load-balancer algorithm, which is round-robin, least-connections, and
|
||||
// so on. This value, which must be supported, is dependent on the provider.
|
||||
// Round-robin must be supported.
|
||||
LBMethod string `json:"lb_algorithm"`
|
||||
// The protocol of the Pool, which is TCP, HTTP, or HTTPS.
|
||||
Protocol string `json:"protocol"`
|
||||
// Description for the Pool.
|
||||
Description string `json:"description"`
|
||||
// A list of listeners objects IDs.
|
||||
Listeners []ListenerID `json:"listeners"` //[]map[string]interface{}
|
||||
// A list of member objects IDs.
|
||||
Members []Member `json:"members"`
|
||||
// The ID of associated health monitor.
|
||||
MonitorID string `json:"healthmonitor_id"`
|
||||
// The network on which the members of the Pool will be located. Only members
|
||||
// that are on this network can be added to the Pool.
|
||||
SubnetID string `json:"subnet_id"`
|
||||
// Owner of the Pool. Only an administrative user can specify a tenant ID
|
||||
// other than its own.
|
||||
TenantID string `json:"tenant_id"`
|
||||
// The administrative state of the Pool, which is up (true) or down (false).
|
||||
AdminStateUp bool `json:"admin_state_up"`
|
||||
// Pool name. Does not have to be unique.
|
||||
Name string `json:"name"`
|
||||
// The unique ID for the Pool.
|
||||
ID string `json:"id"`
|
||||
// A list of load balancer objects IDs.
|
||||
Loadbalancers []LoadBalancerID `json:"loadbalancers"`
|
||||
// Indicates whether connections in the same session will be processed by the
|
||||
// same Pool member or not.
|
||||
Persistence SessionPersistence `json:"session_persistence"`
|
||||
// The provider
|
||||
Provider string `json:"provider"`
|
||||
Monitor monitors.Monitor `json:"healthmonitor"`
|
||||
}
|
||||
|
||||
// PoolPage is the page returned by a pager when traversing over a
|
||||
// collection of pools.
|
||||
type PoolPage struct {
|
||||
pagination.LinkedPageBase
|
||||
}
|
||||
|
||||
// NextPageURL is invoked when a paginated collection of pools has reached
|
||||
// the end of a page and the pager seeks to traverse over a new one. In order
|
||||
// to do this, it needs to construct the next page's URL.
|
||||
func (r PoolPage) NextPageURL() (string, error) {
|
||||
var s struct {
|
||||
Links []gophercloud.Link `json:"pools_links"`
|
||||
}
|
||||
err := r.ExtractInto(&s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return gophercloud.ExtractNextURL(s.Links)
|
||||
}
|
||||
|
||||
// IsEmpty checks whether a PoolPage struct is empty.
|
||||
func (r PoolPage) IsEmpty() (bool, error) {
|
||||
is, err := ExtractPools(r)
|
||||
return len(is) == 0, err
|
||||
}
|
||||
|
||||
// ExtractPools accepts a Page struct, specifically a PoolPage struct,
|
||||
// and extracts the elements into a slice of Router structs. In other words,
|
||||
// a generic collection is mapped into a relevant slice.
|
||||
func ExtractPools(r pagination.Page) ([]Pool, error) {
|
||||
var s struct {
|
||||
Pools []Pool `json:"pools"`
|
||||
}
|
||||
err := (r.(PoolPage)).ExtractInto(&s)
|
||||
return s.Pools, err
|
||||
}
|
||||
|
||||
type commonResult struct {
|
||||
gophercloud.Result
|
||||
}
|
||||
|
||||
// Extract is a function that accepts a result and extracts a router.
|
||||
func (r commonResult) Extract() (*Pool, error) {
|
||||
var s struct {
|
||||
Pool *Pool `json:"pool"`
|
||||
}
|
||||
err := r.ExtractInto(&s)
|
||||
return s.Pool, err
|
||||
}
|
||||
|
||||
// CreateResult represents the result of a Create operation.
|
||||
type CreateResult struct {
|
||||
commonResult
|
||||
}
|
||||
|
||||
// GetResult represents the result of a Get operation.
|
||||
type GetResult struct {
|
||||
commonResult
|
||||
}
|
||||
|
||||
// UpdateResult represents the result of an Update operation.
|
||||
type UpdateResult struct {
|
||||
commonResult
|
||||
}
|
||||
|
||||
// DeleteResult represents the result of a Delete operation.
|
||||
type DeleteResult struct {
|
||||
gophercloud.ErrResult
|
||||
}
|
||||
|
||||
// Member represents the application running on a backend server.
|
||||
type Member struct {
|
||||
// Name of the Member.
|
||||
Name string `json:"name"`
|
||||
// Weight of Member.
|
||||
Weight int `json:"weight"`
|
||||
// The administrative state of the member, which is up (true) or down (false).
|
||||
AdminStateUp bool `json:"admin_state_up"`
|
||||
// Owner of the Member. Only an administrative user can specify a tenant ID
|
||||
// other than its own.
|
||||
TenantID string `json:"tenant_id"`
|
||||
// parameter value for the subnet UUID.
|
||||
SubnetID string `json:"subnet_id"`
|
||||
// The Pool to which the Member belongs.
|
||||
PoolID string `json:"pool_id"`
|
||||
// The IP address of the Member.
|
||||
Address string `json:"address"`
|
||||
// The port on which the application is hosted.
|
||||
ProtocolPort int `json:"protocol_port"`
|
||||
// The unique ID for the Member.
|
||||
ID string `json:"id"`
|
||||
}
|
||||
|
||||
// MemberPage is the page returned by a pager when traversing over a
|
||||
// collection of Members in a Pool.
|
||||
type MemberPage struct {
|
||||
pagination.LinkedPageBase
|
||||
}
|
||||
|
||||
// NextPageURL is invoked when a paginated collection of members has reached
|
||||
// the end of a page and the pager seeks to traverse over a new one. In order
|
||||
// to do this, it needs to construct the next page's URL.
|
||||
func (r MemberPage) NextPageURL() (string, error) {
|
||||
var s struct {
|
||||
Links []gophercloud.Link `json:"members_links"`
|
||||
}
|
||||
err := r.ExtractInto(&s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return gophercloud.ExtractNextURL(s.Links)
|
||||
}
|
||||
|
||||
// IsEmpty checks whether a MemberPage struct is empty.
|
||||
func (r MemberPage) IsEmpty() (bool, error) {
|
||||
is, err := ExtractMembers(r)
|
||||
return len(is) == 0, err
|
||||
}
|
||||
|
||||
// ExtractMembers accepts a Page struct, specifically a RouterPage struct,
|
||||
// and extracts the elements into a slice of Router structs. In other words,
|
||||
// a generic collection is mapped into a relevant slice.
|
||||
func ExtractMembers(r pagination.Page) ([]Member, error) {
|
||||
var s struct {
|
||||
Members []Member `json:"members"`
|
||||
}
|
||||
err := (r.(MemberPage)).ExtractInto(&s)
|
||||
return s.Members, err
|
||||
}
|
||||
|
||||
type commonMemberResult struct {
|
||||
gophercloud.Result
|
||||
}
|
||||
|
||||
// ExtractMember is a function that accepts a result and extracts a router.
|
||||
func (r commonMemberResult) Extract() (*Member, error) {
|
||||
var s struct {
|
||||
Member *Member `json:"member"`
|
||||
}
|
||||
err := r.ExtractInto(&s)
|
||||
return s.Member, err
|
||||
}
|
||||
|
||||
// CreateMemberResult represents the result of a CreateMember operation.
|
||||
type CreateMemberResult struct {
|
||||
commonMemberResult
|
||||
}
|
||||
|
||||
// GetMemberResult represents the result of a GetMember operation.
|
||||
type GetMemberResult struct {
|
||||
commonMemberResult
|
||||
}
|
||||
|
||||
// UpdateMemberResult represents the result of an UpdateMember operation.
|
||||
type UpdateMemberResult struct {
|
||||
commonMemberResult
|
||||
}
|
||||
|
||||
// DeleteMemberResult represents the result of a DeleteMember operation.
|
||||
type DeleteMemberResult struct {
|
||||
gophercloud.ErrResult
|
||||
}
|
25
vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/urls.go
generated
vendored
Normal file
25
vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/urls.go
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
package pools
|
||||
|
||||
import "github.com/gophercloud/gophercloud"
|
||||
|
||||
const (
|
||||
rootPath = "lbaas"
|
||||
resourcePath = "pools"
|
||||
memberPath = "members"
|
||||
)
|
||||
|
||||
func rootURL(c *gophercloud.ServiceClient) string {
|
||||
return c.ServiceURL(rootPath, resourcePath)
|
||||
}
|
||||
|
||||
func resourceURL(c *gophercloud.ServiceClient, id string) string {
|
||||
return c.ServiceURL(rootPath, resourcePath, id)
|
||||
}
|
||||
|
||||
func memberRootURL(c *gophercloud.ServiceClient, poolId string) string {
|
||||
return c.ServiceURL(rootPath, resourcePath, poolId, memberPath)
|
||||
}
|
||||
|
||||
func memberResourceURL(c *gophercloud.ServiceClient, poolID string, memeberID string) string {
|
||||
return c.ServiceURL(rootPath, resourcePath, poolID, memberPath, memeberID)
|
||||
}
|
30
vendor/vendor.json
vendored
30
vendor/vendor.json
vendored
@ -239,12 +239,42 @@
|
||||
"revision": "1217c40cf502cef2fc3aa1a25fcd1ed1bdf00a4b",
|
||||
"revisionTime": "2017-07-18T07:00:34Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "CHmnyRSFPivC+b/ojgfeEIY5ReM=",
|
||||
"path": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips",
|
||||
"revision": "1217c40cf502cef2fc3aa1a25fcd1ed1bdf00a4b",
|
||||
"revisionTime": "2017-07-18T07:00:34Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "Mjt7GwFygyqPxygY8xZZnUasHmk=",
|
||||
"path": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers",
|
||||
"revision": "1217c40cf502cef2fc3aa1a25fcd1ed1bdf00a4b",
|
||||
"revisionTime": "2017-07-18T07:00:34Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "mhpwj5tPv7Uw5aUfC55fhLPBcKo=",
|
||||
"path": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners",
|
||||
"revision": "1217c40cf502cef2fc3aa1a25fcd1ed1bdf00a4b",
|
||||
"revisionTime": "2017-07-18T07:00:34Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "5efJz6UH7JCFeav5ZCCzicXCFTU=",
|
||||
"path": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers",
|
||||
"revision": "1217c40cf502cef2fc3aa1a25fcd1ed1bdf00a4b",
|
||||
"revisionTime": "2017-07-18T07:00:34Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "TVFgBTz7B6bb1R4TWdgAkbE1/fk=",
|
||||
"path": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors",
|
||||
"revision": "1217c40cf502cef2fc3aa1a25fcd1ed1bdf00a4b",
|
||||
"revisionTime": "2017-07-18T07:00:34Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "xirjw9vJIN6rmkT3T56bfPfOLUM=",
|
||||
"path": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools",
|
||||
"revision": "1217c40cf502cef2fc3aa1a25fcd1ed1bdf00a4b",
|
||||
"revisionTime": "2017-07-18T07:00:34Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "S+9nmGWO/5zr/leEGq4qLzU0gsQ=",
|
||||
"path": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsbinding",
|
||||
|
Loading…
Reference in New Issue
Block a user