Implement GetLoadBalancerName per provider and add DefaultLoadBalancerName.

This commit is contained in:
morrislaw
2018-08-04 00:36:48 -04:00
parent 0ffee495ad
commit 6ecec23690
20 changed files with 138 additions and 101 deletions

View File

@@ -27,6 +27,7 @@ import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
serviceapi "k8s.io/kubernetes/pkg/api/v1/service"
"k8s.io/kubernetes/pkg/cloudprovider"
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
"github.com/Azure/go-autorest/autorest/to"
@@ -186,6 +187,11 @@ func (az *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName stri
return nil
}
// GetLoadBalancerName returns the LoadBalancer name.
func (az *Cloud) GetLoadBalancerName(ctx context.Context, clusterName string, service *v1.Service) string {
return cloudprovider.DefaultLoadBalancerName(service)
}
// getServiceLoadBalancer gets the loadbalancer for the service if it already exists.
// If wantLb is TRUE then -it selects a new load balancer.
// In case the selected load balancer does not exist it returns network.LoadBalancer struct
@@ -195,7 +201,7 @@ func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string,
isInternal := requiresInternalLoadBalancer(service)
var defaultLB *network.LoadBalancer
primaryVMSetName := az.vmSet.GetPrimaryVMSetName()
defaultLBName := az.getLoadBalancerName(clusterName, primaryVMSetName, isInternal)
defaultLBName := az.getAzureLoadBalancerName(clusterName, primaryVMSetName, isInternal)
existingLBs, err := az.ListLBWithRetry()
if err != nil {
@@ -280,7 +286,7 @@ func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, exi
}
selectedLBRuleCount := math.MaxInt32
for _, currASName := range *vmSetNames {
currLBName := az.getLoadBalancerName(clusterName, currASName, isInternal)
currLBName := az.getAzureLoadBalancerName(clusterName, currASName, isInternal)
lb, exists := mapExistingLBs[currLBName]
if !exists {
// select this LB as this is a new LB and will have minimum rules
@@ -330,7 +336,7 @@ func (az *Cloud) getServiceLoadBalancerStatus(service *v1.Service, lb *network.L
return nil, nil
}
isInternal := requiresInternalLoadBalancer(service)
lbFrontendIPConfigName := getFrontendIPConfigName(service, subnet(service))
lbFrontendIPConfigName := az.getFrontendIPConfigName(service, subnet(service))
serviceName := getServiceName(service)
for _, ipConfiguration := range *lb.FrontendIPConfigurations {
if lbFrontendIPConfigName == *ipConfiguration.Name {
@@ -369,7 +375,7 @@ func (az *Cloud) getServiceLoadBalancerStatus(service *v1.Service, lb *network.L
func (az *Cloud) determinePublicIPName(clusterName string, service *v1.Service) (string, error) {
loadBalancerIP := service.Spec.LoadBalancerIP
if len(loadBalancerIP) == 0 {
return getPublicIPName(clusterName, service), nil
return az.getPublicIPName(clusterName, service), nil
}
pipResourceGroup := az.getPublicIPAddressResourceGroup(service)
@@ -511,7 +517,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
}
lbName := *lb.Name
glog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) wantLb(%t) resolved load balancer name", serviceName, lbName, wantLb)
lbFrontendIPConfigName := getFrontendIPConfigName(service, subnet(service))
lbFrontendIPConfigName := az.getFrontendIPConfigName(service, subnet(service))
lbFrontendIPConfigID := az.getFrontendIPConfigID(lbName, lbFrontendIPConfigName)
lbBackendPoolName := getBackendPoolName(clusterName)
lbBackendPoolID := az.getBackendPoolID(lbName, lbBackendPoolName)
@@ -561,7 +567,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
if !wantLb {
for i := len(newConfigs) - 1; i >= 0; i-- {
config := newConfigs[i]
if serviceOwnsFrontendIP(config, service) {
if az.serviceOwnsFrontendIP(config, service) {
glog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, lbFrontendIPConfigName)
newConfigs = append(newConfigs[:i], newConfigs[i+1:]...)
dirtyConfigs = true
@@ -571,7 +577,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
if isInternal {
for i := len(newConfigs) - 1; i >= 0; i-- {
config := newConfigs[i]
if serviceOwnsFrontendIP(config, service) && !strings.EqualFold(*config.Name, lbFrontendIPConfigName) {
if az.serviceOwnsFrontendIP(config, service) && !strings.EqualFold(*config.Name, lbFrontendIPConfigName) {
glog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, *config.Name)
newConfigs = append(newConfigs[:i], newConfigs[i+1:]...)
dirtyConfigs = true
@@ -656,7 +662,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
var expectedProbes []network.Probe
var expectedRules []network.LoadBalancingRule
for _, port := range ports {
lbRuleName := getLoadBalancerRuleName(service, port, subnet(service))
lbRuleName := az.getLoadBalancerRuleName(service, port, subnet(service))
transportProto, _, probeProto, err := getProtocolsFromKubernetesProtocol(port.Protocol)
if err != nil {
@@ -739,7 +745,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
}
for i := len(updatedProbes) - 1; i >= 0; i-- {
existingProbe := updatedProbes[i]
if serviceOwnsRule(service, *existingProbe.Name) {
if az.serviceOwnsRule(service, *existingProbe.Name) {
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - considering evicting", serviceName, wantLb, *existingProbe.Name)
keepProbe := false
if findProbe(expectedProbes, existingProbe) {
@@ -780,7 +786,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
// update rules: remove unwanted
for i := len(updatedRules) - 1; i >= 0; i-- {
existingRule := updatedRules[i]
if serviceOwnsRule(service, *existingRule.Name) {
if az.serviceOwnsRule(service, *existingRule.Name) {
keepRule := false
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - considering evicting", serviceName, wantLb, *existingRule.Name)
if findRule(expectedRules, existingRule) {
@@ -939,7 +945,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
}
for j := range sourceAddressPrefixes {
ix := i*len(sourceAddressPrefixes) + j
securityRuleName := getSecurityRuleName(service, port, sourceAddressPrefixes[j])
securityRuleName := az.getSecurityRuleName(service, port, sourceAddressPrefixes[j])
expectedSecurityRules[ix] = network.SecurityRule{
Name: to.StringPtr(securityRuleName),
SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{
@@ -975,7 +981,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
// to this service
for i := len(updatedRules) - 1; i >= 0; i-- {
existingRule := updatedRules[i]
if serviceOwnsRule(service, *existingRule.Name) {
if az.serviceOwnsRule(service, *existingRule.Name) {
glog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - considering evicting", serviceName, wantLb, *existingRule.Name)
keepRule := false
if findSecurityRule(expectedSecurityRules, existingRule) {
@@ -994,7 +1000,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
if useSharedSecurityRule(service) && !wantLb {
for _, port := range ports {
for _, sourceAddressPrefix := range sourceAddressPrefixes {
sharedRuleName := getSecurityRuleName(service, port, sourceAddressPrefix)
sharedRuleName := az.getSecurityRuleName(service, port, sourceAddressPrefix)
sharedIndex, sharedRule, sharedRuleFound := findSecurityRuleByName(updatedRules, sharedRuleName)
if !sharedRuleFound {
glog.V(4).Infof("Expected to find shared rule %s for service %s being deleted, but did not", sharedRuleName, service.Name)