Move from glog to klog
- Move from the old github.com/golang/glog to k8s.io/klog - klog as explicit InitFlags() so we add them as necessary - we update the other repositories that we vendor that made a similar change from glog to klog * github.com/kubernetes/repo-infra * k8s.io/gengo/ * k8s.io/kube-openapi/ * github.com/google/cadvisor - Entirely remove all references to glog - Fix some tests by explicit InitFlags in their init() methods Change-Id: I92db545ff36fcec83afe98f550c9e630098b3135
This commit is contained in:
@@ -31,7 +31,7 @@ import (
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -101,7 +101,7 @@ func (az *Cloud) GetLoadBalancer(ctx context.Context, clusterName string, servic
|
||||
}
|
||||
if !exists {
|
||||
serviceName := getServiceName(service)
|
||||
glog.V(5).Infof("getloadbalancer (cluster:%s) (service:%s) - doesn't exist", clusterName, serviceName)
|
||||
klog.V(5).Infof("getloadbalancer (cluster:%s) (service:%s) - doesn't exist", clusterName, serviceName)
|
||||
return nil, false, nil
|
||||
}
|
||||
return status, true, nil
|
||||
@@ -120,7 +120,7 @@ func (az *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, ser
|
||||
// the service may be switched from an internal LB to a public one, or vise versa.
|
||||
// Here we'll firstly ensure service do not lie in the opposite LB.
|
||||
serviceName := getServiceName(service)
|
||||
glog.V(5).Infof("ensureloadbalancer(%s): START clusterName=%q", serviceName, clusterName)
|
||||
klog.V(5).Infof("ensureloadbalancer(%s): START clusterName=%q", serviceName, clusterName)
|
||||
|
||||
lb, err := az.reconcileLoadBalancer(clusterName, service, nodes, true /* wantLb */)
|
||||
if err != nil {
|
||||
@@ -136,7 +136,7 @@ func (az *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, ser
|
||||
if lbStatus != nil && len(lbStatus.Ingress) > 0 {
|
||||
serviceIP = &lbStatus.Ingress[0].IP
|
||||
}
|
||||
glog.V(2).Infof("EnsureLoadBalancer: reconciling security group for service %q with IP %q, wantLb = true", serviceName, logSafe(serviceIP))
|
||||
klog.V(2).Infof("EnsureLoadBalancer: reconciling security group for service %q with IP %q, wantLb = true", serviceName, logSafe(serviceIP))
|
||||
if _, err := az.reconcileSecurityGroup(clusterName, service, serviceIP, true /* wantLb */); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -169,14 +169,14 @@ func (az *Cloud) UpdateLoadBalancer(ctx context.Context, clusterName string, ser
|
||||
func (az *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *v1.Service) error {
|
||||
isInternal := requiresInternalLoadBalancer(service)
|
||||
serviceName := getServiceName(service)
|
||||
glog.V(5).Infof("delete(%s): START clusterName=%q", serviceName, clusterName)
|
||||
klog.V(5).Infof("delete(%s): START clusterName=%q", serviceName, clusterName)
|
||||
|
||||
serviceIPToCleanup, err := az.findServiceIPAddress(ctx, clusterName, service, isInternal)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("EnsureLoadBalancerDeleted: reconciling security group for service %q with IP %q, wantLb = false", serviceName, serviceIPToCleanup)
|
||||
klog.V(2).Infof("EnsureLoadBalancerDeleted: reconciling security group for service %q with IP %q, wantLb = false", serviceName, serviceIPToCleanup)
|
||||
if _, err := az.reconcileSecurityGroup(clusterName, service, &serviceIPToCleanup, false /* wantLb */); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -189,7 +189,7 @@ func (az *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName stri
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("delete(%s): FINISH", serviceName)
|
||||
klog.V(2).Infof("delete(%s): FINISH", serviceName)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -279,13 +279,13 @@ func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string,
|
||||
func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, existingLBs *[]network.LoadBalancer, nodes []*v1.Node) (selectedLB *network.LoadBalancer, existsLb bool, err error) {
|
||||
isInternal := requiresInternalLoadBalancer(service)
|
||||
serviceName := getServiceName(service)
|
||||
glog.V(2).Infof("selectLoadBalancer for service (%s): isInternal(%v) - start", serviceName, isInternal)
|
||||
klog.V(2).Infof("selectLoadBalancer for service (%s): isInternal(%v) - start", serviceName, isInternal)
|
||||
vmSetNames, err := az.vmSet.GetVMSetNames(service, nodes)
|
||||
if err != nil {
|
||||
glog.Errorf("az.selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - az.GetVMSetNames failed, err=(%v)", clusterName, serviceName, isInternal, err)
|
||||
klog.Errorf("az.selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - az.GetVMSetNames failed, err=(%v)", clusterName, serviceName, isInternal, err)
|
||||
return nil, false, err
|
||||
}
|
||||
glog.Infof("selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - vmSetNames %v", clusterName, serviceName, isInternal, *vmSetNames)
|
||||
klog.Infof("selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - vmSetNames %v", clusterName, serviceName, isInternal, *vmSetNames)
|
||||
|
||||
mapExistingLBs := map[string]network.LoadBalancer{}
|
||||
for _, lb := range *existingLBs {
|
||||
@@ -320,13 +320,13 @@ func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, exi
|
||||
|
||||
if selectedLB == nil {
|
||||
err = fmt.Errorf("selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - unable to find load balancer for selected VM sets %v", clusterName, serviceName, isInternal, *vmSetNames)
|
||||
glog.Error(err)
|
||||
klog.Error(err)
|
||||
return nil, false, err
|
||||
}
|
||||
// validate if the selected LB has not exceeded the MaximumLoadBalancerRuleCount
|
||||
if az.Config.MaximumLoadBalancerRuleCount != 0 && selectedLBRuleCount >= az.Config.MaximumLoadBalancerRuleCount {
|
||||
err = fmt.Errorf("selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - all available load balancers have exceeded maximum rule limit %d, vmSetNames (%v)", clusterName, serviceName, isInternal, selectedLBRuleCount, *vmSetNames)
|
||||
glog.Error(err)
|
||||
klog.Error(err)
|
||||
return selectedLB, existsLb, err
|
||||
}
|
||||
|
||||
@@ -335,11 +335,11 @@ func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, exi
|
||||
|
||||
func (az *Cloud) getServiceLoadBalancerStatus(service *v1.Service, lb *network.LoadBalancer) (status *v1.LoadBalancerStatus, err error) {
|
||||
if lb == nil {
|
||||
glog.V(10).Info("getServiceLoadBalancerStatus: lb is nil")
|
||||
klog.V(10).Info("getServiceLoadBalancerStatus: lb is nil")
|
||||
return nil, nil
|
||||
}
|
||||
if lb.FrontendIPConfigurations == nil || *lb.FrontendIPConfigurations == nil {
|
||||
glog.V(10).Info("getServiceLoadBalancerStatus: lb.FrontendIPConfigurations is nil")
|
||||
klog.V(10).Info("getServiceLoadBalancerStatus: lb.FrontendIPConfigurations is nil")
|
||||
return nil, nil
|
||||
}
|
||||
isInternal := requiresInternalLoadBalancer(service)
|
||||
@@ -371,7 +371,7 @@ func (az *Cloud) getServiceLoadBalancerStatus(service *v1.Service, lb *network.L
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(2).Infof("getServiceLoadBalancerStatus gets ingress IP %q from frontendIPConfiguration %q for service %q", to.String(lbIP), lbFrontendIPConfigName, serviceName)
|
||||
klog.V(2).Infof("getServiceLoadBalancerStatus gets ingress IP %q from frontendIPConfiguration %q for service %q", to.String(lbIP), lbFrontendIPConfigName, serviceName)
|
||||
return &v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: to.String(lbIP)}}}, nil
|
||||
}
|
||||
}
|
||||
@@ -434,11 +434,11 @@ func (az *Cloud) findServiceIPAddress(ctx context.Context, clusterName string, s
|
||||
return "", err
|
||||
}
|
||||
if !existsLb {
|
||||
glog.V(2).Infof("Expected to find an IP address for service %s but did not. Assuming it has been removed", service.Name)
|
||||
klog.V(2).Infof("Expected to find an IP address for service %s but did not. Assuming it has been removed", service.Name)
|
||||
return "", nil
|
||||
}
|
||||
if len(lbStatus.Ingress) < 1 {
|
||||
glog.V(2).Infof("Expected to find an IP address for service %s but it had no ingresses. Assuming it has been removed", service.Name)
|
||||
klog.V(2).Infof("Expected to find an IP address for service %s but it had no ingresses. Assuming it has been removed", service.Name)
|
||||
return "", nil
|
||||
}
|
||||
|
||||
@@ -473,14 +473,14 @@ func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domai
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(2).Infof("ensurePublicIPExists for service(%s): pip(%s) - creating", serviceName, *pip.Name)
|
||||
glog.V(10).Infof("CreateOrUpdatePIPWithRetry(%s, %q): start", pipResourceGroup, *pip.Name)
|
||||
klog.V(2).Infof("ensurePublicIPExists for service(%s): pip(%s) - creating", serviceName, *pip.Name)
|
||||
klog.V(10).Infof("CreateOrUpdatePIPWithRetry(%s, %q): start", pipResourceGroup, *pip.Name)
|
||||
err = az.CreateOrUpdatePIPWithRetry(service, pipResourceGroup, pip)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("ensure(%s) abort backoff: pip(%s) - creating", serviceName, *pip.Name)
|
||||
klog.V(2).Infof("ensure(%s) abort backoff: pip(%s) - creating", serviceName, *pip.Name)
|
||||
return nil, err
|
||||
}
|
||||
glog.V(10).Infof("CreateOrUpdatePIPWithRetry(%s, %q): end", pipResourceGroup, *pip.Name)
|
||||
klog.V(10).Infof("CreateOrUpdatePIPWithRetry(%s, %q): end", pipResourceGroup, *pip.Name)
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
@@ -570,14 +570,14 @@ func (az *Cloud) isFrontendIPChanged(clusterName string, config network.Frontend
|
||||
func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node, wantLb bool) (*network.LoadBalancer, error) {
|
||||
isInternal := requiresInternalLoadBalancer(service)
|
||||
serviceName := getServiceName(service)
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service(%s) - wantLb(%t): started", serviceName, wantLb)
|
||||
klog.V(2).Infof("reconcileLoadBalancer for service(%s) - wantLb(%t): started", serviceName, wantLb)
|
||||
lb, _, _, err := az.getServiceLoadBalancer(service, clusterName, nodes, wantLb)
|
||||
if err != nil {
|
||||
glog.Errorf("reconcileLoadBalancer: failed to get load balancer for service %q, error: %v", serviceName, err)
|
||||
klog.Errorf("reconcileLoadBalancer: failed to get load balancer for service %q, error: %v", serviceName, err)
|
||||
return nil, err
|
||||
}
|
||||
lbName := *lb.Name
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) wantLb(%t) resolved load balancer name", serviceName, lbName, wantLb)
|
||||
klog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) wantLb(%t) resolved load balancer name", serviceName, lbName, wantLb)
|
||||
lbFrontendIPConfigName := az.getFrontendIPConfigName(service, subnet(service))
|
||||
lbFrontendIPConfigID := az.getFrontendIPConfigID(lbName, lbFrontendIPConfigName)
|
||||
lbBackendPoolName := getBackendPoolName(clusterName)
|
||||
@@ -600,18 +600,18 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
foundBackendPool := false
|
||||
for _, bp := range newBackendPools {
|
||||
if strings.EqualFold(*bp.Name, lbBackendPoolName) {
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - found wanted backendpool. not adding anything", serviceName, wantLb)
|
||||
klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - found wanted backendpool. not adding anything", serviceName, wantLb)
|
||||
foundBackendPool = true
|
||||
break
|
||||
} else {
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - found other backendpool %s", serviceName, wantLb, *bp.Name)
|
||||
klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - found other backendpool %s", serviceName, wantLb, *bp.Name)
|
||||
}
|
||||
}
|
||||
if !foundBackendPool {
|
||||
newBackendPools = append(newBackendPools, network.BackendAddressPool{
|
||||
Name: to.StringPtr(lbBackendPoolName),
|
||||
})
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - adding backendpool", serviceName, wantLb)
|
||||
klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - adding backendpool", serviceName, wantLb)
|
||||
|
||||
dirtyLb = true
|
||||
lb.BackendAddressPools = &newBackendPools
|
||||
@@ -629,7 +629,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
for i := len(newConfigs) - 1; i >= 0; i-- {
|
||||
config := newConfigs[i]
|
||||
if az.serviceOwnsFrontendIP(config, service) {
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, lbFrontendIPConfigName)
|
||||
klog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, lbFrontendIPConfigName)
|
||||
newConfigs = append(newConfigs[:i], newConfigs[i+1:]...)
|
||||
dirtyConfigs = true
|
||||
}
|
||||
@@ -642,7 +642,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
return nil, err
|
||||
}
|
||||
if isFipChanged {
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, *config.Name)
|
||||
klog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, *config.Name)
|
||||
newConfigs = append(newConfigs[:i], newConfigs[i+1:]...)
|
||||
dirtyConfigs = true
|
||||
}
|
||||
@@ -705,7 +705,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
Name: to.StringPtr(lbFrontendIPConfigName),
|
||||
FrontendIPConfigurationPropertiesFormat: fipConfigurationProperties,
|
||||
})
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - adding", serviceName, wantLb, lbFrontendIPConfigName)
|
||||
klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - adding", serviceName, wantLb, lbFrontendIPConfigName)
|
||||
dirtyConfigs = true
|
||||
}
|
||||
}
|
||||
@@ -726,15 +726,15 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
for i := len(updatedProbes) - 1; i >= 0; i-- {
|
||||
existingProbe := updatedProbes[i]
|
||||
if az.serviceOwnsRule(service, *existingProbe.Name) {
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - considering evicting", serviceName, wantLb, *existingProbe.Name)
|
||||
klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - considering evicting", serviceName, wantLb, *existingProbe.Name)
|
||||
keepProbe := false
|
||||
if findProbe(expectedProbes, existingProbe) {
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - keeping", serviceName, wantLb, *existingProbe.Name)
|
||||
klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - keeping", serviceName, wantLb, *existingProbe.Name)
|
||||
keepProbe = true
|
||||
}
|
||||
if !keepProbe {
|
||||
updatedProbes = append(updatedProbes[:i], updatedProbes[i+1:]...)
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - dropping", serviceName, wantLb, *existingProbe.Name)
|
||||
klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - dropping", serviceName, wantLb, *existingProbe.Name)
|
||||
dirtyProbes = true
|
||||
}
|
||||
}
|
||||
@@ -743,11 +743,11 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
for _, expectedProbe := range expectedProbes {
|
||||
foundProbe := false
|
||||
if findProbe(updatedProbes, expectedProbe) {
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - already exists", serviceName, wantLb, *expectedProbe.Name)
|
||||
klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - already exists", serviceName, wantLb, *expectedProbe.Name)
|
||||
foundProbe = true
|
||||
}
|
||||
if !foundProbe {
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - adding", serviceName, wantLb, *expectedProbe.Name)
|
||||
klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - adding", serviceName, wantLb, *expectedProbe.Name)
|
||||
updatedProbes = append(updatedProbes, expectedProbe)
|
||||
dirtyProbes = true
|
||||
}
|
||||
@@ -768,13 +768,13 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
existingRule := updatedRules[i]
|
||||
if az.serviceOwnsRule(service, *existingRule.Name) {
|
||||
keepRule := false
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - considering evicting", serviceName, wantLb, *existingRule.Name)
|
||||
klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - considering evicting", serviceName, wantLb, *existingRule.Name)
|
||||
if findRule(expectedRules, existingRule) {
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - keeping", serviceName, wantLb, *existingRule.Name)
|
||||
klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - keeping", serviceName, wantLb, *existingRule.Name)
|
||||
keepRule = true
|
||||
}
|
||||
if !keepRule {
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - dropping", serviceName, wantLb, *existingRule.Name)
|
||||
klog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - dropping", serviceName, wantLb, *existingRule.Name)
|
||||
updatedRules = append(updatedRules[:i], updatedRules[i+1:]...)
|
||||
dirtyRules = true
|
||||
}
|
||||
@@ -784,11 +784,11 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
for _, expectedRule := range expectedRules {
|
||||
foundRule := false
|
||||
if findRule(updatedRules, expectedRule) {
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - already exists", serviceName, wantLb, *expectedRule.Name)
|
||||
klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - already exists", serviceName, wantLb, *expectedRule.Name)
|
||||
foundRule = true
|
||||
}
|
||||
if !foundRule {
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) adding", serviceName, wantLb, *expectedRule.Name)
|
||||
klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) adding", serviceName, wantLb, *expectedRule.Name)
|
||||
updatedRules = append(updatedRules, expectedRule)
|
||||
dirtyRules = true
|
||||
}
|
||||
@@ -805,31 +805,31 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
if lb.FrontendIPConfigurations == nil || len(*lb.FrontendIPConfigurations) == 0 {
|
||||
// When FrontendIPConfigurations is empty, we need to delete the Azure load balancer resource itself,
|
||||
// because an Azure load balancer cannot have an empty FrontendIPConfigurations collection
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) - deleting; no remaining frontendIPConfigurations", serviceName, lbName)
|
||||
klog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) - deleting; no remaining frontendIPConfigurations", serviceName, lbName)
|
||||
|
||||
// Remove backend pools from vmSets. This is required for virtual machine scale sets before removing the LB.
|
||||
vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName)
|
||||
glog.V(10).Infof("EnsureBackendPoolDeleted(%s, %s): start", lbBackendPoolID, vmSetName)
|
||||
klog.V(10).Infof("EnsureBackendPoolDeleted(%s, %s): start", lbBackendPoolID, vmSetName)
|
||||
err := az.vmSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, lb.BackendAddressPools)
|
||||
if err != nil {
|
||||
glog.Errorf("EnsureBackendPoolDeleted(%s, %s) failed: %v", lbBackendPoolID, vmSetName, err)
|
||||
klog.Errorf("EnsureBackendPoolDeleted(%s, %s) failed: %v", lbBackendPoolID, vmSetName, err)
|
||||
return nil, err
|
||||
}
|
||||
glog.V(10).Infof("EnsureBackendPoolDeleted(%s, %s): end", lbBackendPoolID, vmSetName)
|
||||
klog.V(10).Infof("EnsureBackendPoolDeleted(%s, %s): end", lbBackendPoolID, vmSetName)
|
||||
|
||||
// Remove the LB.
|
||||
glog.V(10).Infof("reconcileLoadBalancer: az.DeleteLBWithRetry(%q): start", lbName)
|
||||
klog.V(10).Infof("reconcileLoadBalancer: az.DeleteLBWithRetry(%q): start", lbName)
|
||||
err = az.DeleteLBWithRetry(service, lbName)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service(%s) abort backoff: lb(%s) - deleting; no remaining frontendIPConfigurations", serviceName, lbName)
|
||||
klog.V(2).Infof("reconcileLoadBalancer for service(%s) abort backoff: lb(%s) - deleting; no remaining frontendIPConfigurations", serviceName, lbName)
|
||||
return nil, err
|
||||
}
|
||||
glog.V(10).Infof("az.DeleteLBWithRetry(%q): end", lbName)
|
||||
klog.V(10).Infof("az.DeleteLBWithRetry(%q): end", lbName)
|
||||
} else {
|
||||
glog.V(2).Infof("reconcileLoadBalancer: reconcileLoadBalancer for service(%s): lb(%s) - updating", serviceName, lbName)
|
||||
klog.V(2).Infof("reconcileLoadBalancer: reconcileLoadBalancer for service(%s): lb(%s) - updating", serviceName, lbName)
|
||||
err := az.CreateOrUpdateLBWithRetry(service, *lb)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service(%s) abort backoff: lb(%s) - updating", serviceName, lbName)
|
||||
klog.V(2).Infof("reconcileLoadBalancer for service(%s) abort backoff: lb(%s) - updating", serviceName, lbName)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -837,7 +837,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
// Refresh updated lb which will be used later in other places.
|
||||
newLB, exist, err := az.getAzureLoadBalancer(lbName)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service(%s): getAzureLoadBalancer(%s) failed: %v", serviceName, lbName, err)
|
||||
klog.V(2).Infof("reconcileLoadBalancer for service(%s): getAzureLoadBalancer(%s) failed: %v", serviceName, lbName, err)
|
||||
return nil, err
|
||||
}
|
||||
if !exist {
|
||||
@@ -857,7 +857,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) finished", serviceName, lbName)
|
||||
klog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) finished", serviceName, lbName)
|
||||
return lb, nil
|
||||
}
|
||||
|
||||
@@ -881,7 +881,7 @@ func (az *Cloud) reconcileLoadBalancerRule(
|
||||
for _, port := range ports {
|
||||
lbRuleName := az.getLoadBalancerRuleName(service, port, subnet(service))
|
||||
|
||||
glog.V(2).Infof("reconcileLoadBalancerRule lb name (%s) rule name (%s)", lbName, lbRuleName)
|
||||
klog.V(2).Infof("reconcileLoadBalancerRule lb name (%s) rule name (%s)", lbName, lbRuleName)
|
||||
|
||||
transportProto, _, probeProto, err := getProtocolsFromKubernetesProtocol(port.Protocol)
|
||||
if err != nil {
|
||||
@@ -956,12 +956,12 @@ func (az *Cloud) reconcileLoadBalancerRule(
|
||||
// This entails adding required, missing SecurityRules and removing stale rules.
|
||||
func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, lbIP *string, wantLb bool) (*network.SecurityGroup, error) {
|
||||
serviceName := getServiceName(service)
|
||||
glog.V(5).Infof("reconcileSecurityGroup(%s): START clusterName=%q", serviceName, clusterName)
|
||||
klog.V(5).Infof("reconcileSecurityGroup(%s): START clusterName=%q", serviceName, clusterName)
|
||||
|
||||
ports := service.Spec.Ports
|
||||
if ports == nil {
|
||||
if useSharedSecurityRule(service) {
|
||||
glog.V(2).Infof("Attempting to reconcile security group for service %s, but service uses shared rule and we don't know which port it's for", service.Name)
|
||||
klog.V(2).Infof("Attempting to reconcile security group for service %s, but service uses shared rule and we don't know which port it's for", service.Name)
|
||||
return nil, fmt.Errorf("No port info for reconciling shared rule for service %s", service.Name)
|
||||
}
|
||||
ports = []v1.ServicePort{}
|
||||
@@ -1034,7 +1034,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
|
||||
}
|
||||
|
||||
for _, r := range expectedSecurityRules {
|
||||
glog.V(10).Infof("Expecting security rule for %s: %s:%s -> %s:%s", service.Name, *r.SourceAddressPrefix, *r.SourcePortRange, *r.DestinationAddressPrefix, *r.DestinationPortRange)
|
||||
klog.V(10).Infof("Expecting security rule for %s: %s:%s -> %s:%s", service.Name, *r.SourceAddressPrefix, *r.SourcePortRange, *r.DestinationAddressPrefix, *r.DestinationPortRange)
|
||||
}
|
||||
|
||||
// update security rules
|
||||
@@ -1045,7 +1045,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
|
||||
}
|
||||
|
||||
for _, r := range updatedRules {
|
||||
glog.V(10).Infof("Existing security rule while processing %s: %s:%s -> %s:%s", service.Name, logSafe(r.SourceAddressPrefix), logSafe(r.SourcePortRange), logSafeCollection(r.DestinationAddressPrefix, r.DestinationAddressPrefixes), logSafe(r.DestinationPortRange))
|
||||
klog.V(10).Infof("Existing security rule while processing %s: %s:%s -> %s:%s", service.Name, logSafe(r.SourceAddressPrefix), logSafe(r.SourcePortRange), logSafeCollection(r.DestinationAddressPrefix, r.DestinationAddressPrefixes), logSafe(r.DestinationPortRange))
|
||||
}
|
||||
|
||||
// update security rules: remove unwanted rules that belong privately
|
||||
@@ -1053,14 +1053,14 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
|
||||
for i := len(updatedRules) - 1; i >= 0; i-- {
|
||||
existingRule := updatedRules[i]
|
||||
if az.serviceOwnsRule(service, *existingRule.Name) {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - considering evicting", serviceName, wantLb, *existingRule.Name)
|
||||
klog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - considering evicting", serviceName, wantLb, *existingRule.Name)
|
||||
keepRule := false
|
||||
if findSecurityRule(expectedSecurityRules, existingRule) {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - keeping", serviceName, wantLb, *existingRule.Name)
|
||||
klog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - keeping", serviceName, wantLb, *existingRule.Name)
|
||||
keepRule = true
|
||||
}
|
||||
if !keepRule {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - dropping", serviceName, wantLb, *existingRule.Name)
|
||||
klog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - dropping", serviceName, wantLb, *existingRule.Name)
|
||||
updatedRules = append(updatedRules[:i], updatedRules[i+1:]...)
|
||||
dirtySg = true
|
||||
}
|
||||
@@ -1074,17 +1074,17 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
|
||||
sharedRuleName := az.getSecurityRuleName(service, port, sourceAddressPrefix)
|
||||
sharedIndex, sharedRule, sharedRuleFound := findSecurityRuleByName(updatedRules, sharedRuleName)
|
||||
if !sharedRuleFound {
|
||||
glog.V(4).Infof("Expected to find shared rule %s for service %s being deleted, but did not", sharedRuleName, service.Name)
|
||||
klog.V(4).Infof("Expected to find shared rule %s for service %s being deleted, but did not", sharedRuleName, service.Name)
|
||||
return nil, fmt.Errorf("Expected to find shared rule %s for service %s being deleted, but did not", sharedRuleName, service.Name)
|
||||
}
|
||||
if sharedRule.DestinationAddressPrefixes == nil {
|
||||
glog.V(4).Infof("Expected to have array of destinations in shared rule for service %s being deleted, but did not", service.Name)
|
||||
klog.V(4).Infof("Expected to have array of destinations in shared rule for service %s being deleted, but did not", service.Name)
|
||||
return nil, fmt.Errorf("Expected to have array of destinations in shared rule for service %s being deleted, but did not", service.Name)
|
||||
}
|
||||
existingPrefixes := *sharedRule.DestinationAddressPrefixes
|
||||
addressIndex, found := findIndex(existingPrefixes, destinationIPAddress)
|
||||
if !found {
|
||||
glog.V(4).Infof("Expected to find destination address %s in shared rule %s for service %s being deleted, but did not", destinationIPAddress, sharedRuleName, service.Name)
|
||||
klog.V(4).Infof("Expected to find destination address %s in shared rule %s for service %s being deleted, but did not", destinationIPAddress, sharedRuleName, service.Name)
|
||||
return nil, fmt.Errorf("Expected to find destination address %s in shared rule %s for service %s being deleted, but did not", destinationIPAddress, sharedRuleName, service.Name)
|
||||
}
|
||||
if len(existingPrefixes) == 1 {
|
||||
@@ -1114,7 +1114,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
|
||||
for _, expectedRule := range expectedSecurityRules {
|
||||
foundRule := false
|
||||
if findSecurityRule(updatedRules, expectedRule) {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - already exists", serviceName, wantLb, *expectedRule.Name)
|
||||
klog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - already exists", serviceName, wantLb, *expectedRule.Name)
|
||||
foundRule = true
|
||||
}
|
||||
if foundRule && allowsConsolidation(expectedRule) {
|
||||
@@ -1123,7 +1123,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
|
||||
dirtySg = true
|
||||
}
|
||||
if !foundRule {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - adding", serviceName, wantLb, *expectedRule.Name)
|
||||
klog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - adding", serviceName, wantLb, *expectedRule.Name)
|
||||
|
||||
nextAvailablePriority, err := getNextAvailablePriority(updatedRules)
|
||||
if err != nil {
|
||||
@@ -1137,16 +1137,16 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
|
||||
}
|
||||
|
||||
for _, r := range updatedRules {
|
||||
glog.V(10).Infof("Updated security rule while processing %s: %s:%s -> %s:%s", service.Name, logSafe(r.SourceAddressPrefix), logSafe(r.SourcePortRange), logSafeCollection(r.DestinationAddressPrefix, r.DestinationAddressPrefixes), logSafe(r.DestinationPortRange))
|
||||
klog.V(10).Infof("Updated security rule while processing %s: %s:%s -> %s:%s", service.Name, logSafe(r.SourceAddressPrefix), logSafe(r.SourcePortRange), logSafeCollection(r.DestinationAddressPrefix, r.DestinationAddressPrefixes), logSafe(r.DestinationPortRange))
|
||||
}
|
||||
|
||||
if dirtySg {
|
||||
sg.SecurityRules = &updatedRules
|
||||
glog.V(2).Infof("reconcileSecurityGroup for service(%s): sg(%s) - updating", serviceName, *sg.Name)
|
||||
glog.V(10).Infof("CreateOrUpdateSGWithRetry(%q): start", *sg.Name)
|
||||
klog.V(2).Infof("reconcileSecurityGroup for service(%s): sg(%s) - updating", serviceName, *sg.Name)
|
||||
klog.V(10).Infof("CreateOrUpdateSGWithRetry(%q): start", *sg.Name)
|
||||
err := az.CreateOrUpdateSGWithRetry(service, sg)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("ensure(%s) abort backoff: sg(%s) - updating", serviceName, *sg.Name)
|
||||
klog.V(2).Infof("ensure(%s) abort backoff: sg(%s) - updating", serviceName, *sg.Name)
|
||||
// TODO (Nov 2017): remove when augmented security rules are out of preview
|
||||
// we could try to parse the response but it's not worth it for bridging a preview
|
||||
errorDescription := err.Error()
|
||||
@@ -1157,7 +1157,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
|
||||
// END TODO
|
||||
return nil, err
|
||||
}
|
||||
glog.V(10).Infof("CreateOrUpdateSGWithRetry(%q): end", *sg.Name)
|
||||
klog.V(10).Infof("CreateOrUpdateSGWithRetry(%q): end", *sg.Name)
|
||||
}
|
||||
return &sg, nil
|
||||
}
|
||||
@@ -1331,13 +1331,13 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, lb *
|
||||
// This is the only case we should preserve the
|
||||
// Public ip resource with match service tag
|
||||
} else {
|
||||
glog.V(2).Infof("reconcilePublicIP for service(%s): pip(%s) - deleting", serviceName, pipName)
|
||||
klog.V(2).Infof("reconcilePublicIP for service(%s): pip(%s) - deleting", serviceName, pipName)
|
||||
err := az.safeDeletePublicIP(service, pipResourceGroup, &pip, lb)
|
||||
if err != nil {
|
||||
glog.Errorf("safeDeletePublicIP(%s) failed with error: %v", pipName, err)
|
||||
klog.Errorf("safeDeletePublicIP(%s) failed with error: %v", pipName, err)
|
||||
return nil, err
|
||||
}
|
||||
glog.V(2).Infof("reconcilePublicIP for service(%s): pip(%s) - finished", serviceName, pipName)
|
||||
klog.V(2).Infof("reconcilePublicIP for service(%s): pip(%s) - finished", serviceName, pipName)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1416,21 +1416,21 @@ func (az *Cloud) safeDeletePublicIP(service *v1.Service, pipResourceGroup string
|
||||
if frontendIPConfigUpdated || loadBalancerRuleUpdated {
|
||||
err := az.CreateOrUpdateLBWithRetry(service, *lb)
|
||||
if err != nil {
|
||||
glog.Errorf("safeDeletePublicIP for service(%s) failed with error: %v", getServiceName(service), err)
|
||||
klog.Errorf("safeDeletePublicIP for service(%s) failed with error: %v", getServiceName(service), err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pipName := to.String(pip.Name)
|
||||
glog.V(10).Infof("DeletePublicIPWithRetry(%s, %q): start", pipResourceGroup, pipName)
|
||||
klog.V(10).Infof("DeletePublicIPWithRetry(%s, %q): start", pipResourceGroup, pipName)
|
||||
err := az.DeletePublicIPWithRetry(service, pipResourceGroup, pipName)
|
||||
if err != nil {
|
||||
if err = ignoreStatusNotFoundFromError(err); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
glog.V(10).Infof("DeletePublicIPWithRetry(%s, %q): end", pipResourceGroup, pipName)
|
||||
klog.V(10).Infof("DeletePublicIPWithRetry(%s, %q): end", pipResourceGroup, pipName)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
Reference in New Issue
Block a user