Staging the legacy Azure Cloud Provider

Signed-off-by: Stephen Augustus <saugustus@vmware.com>
This commit is contained in:
Stephen Augustus
2019-04-24 21:54:58 -04:00
parent a7a6bf4386
commit 2f74c90480
63 changed files with 61 additions and 40 deletions

View File

@@ -14,12 +14,12 @@ go_library(
],
deps = [
"//pkg/cloudprovider/providers/aws:go_default_library",
"//pkg/cloudprovider/providers/azure:go_default_library",
"//pkg/cloudprovider/providers/cloudstack:go_default_library",
"//pkg/cloudprovider/providers/gce:go_default_library",
"//pkg/cloudprovider/providers/openstack:go_default_library",
"//pkg/cloudprovider/providers/ovirt:go_default_library",
"//pkg/cloudprovider/providers/photon:go_default_library",
"//staging/src/k8s.io/legacy-cloud-providers/azure:go_default_library",
"//staging/src/k8s.io/legacy-cloud-providers/vsphere:go_default_library",
],
)
@@ -36,7 +36,6 @@ filegroup(
srcs = [
":package-srcs",
"//pkg/cloudprovider/providers/aws:all-srcs",
"//pkg/cloudprovider/providers/azure:all-srcs",
"//pkg/cloudprovider/providers/cloudstack:all-srcs",
"//pkg/cloudprovider/providers/fake:all-srcs",
"//pkg/cloudprovider/providers/gce:all-srcs",

View File

@@ -1,128 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"azure.go",
"azure_backoff.go",
"azure_blobDiskController.go",
"azure_cache.go",
"azure_client.go",
"azure_controller_common.go",
"azure_controller_standard.go",
"azure_controller_vmss.go",
"azure_fakes.go",
"azure_file.go",
"azure_instance_metadata.go",
"azure_instances.go",
"azure_loadbalancer.go",
"azure_managedDiskController.go",
"azure_metrics.go",
"azure_routes.go",
"azure_standard.go",
"azure_storage.go",
"azure_storageaccount.go",
"azure_vmsets.go",
"azure_vmss.go",
"azure_vmss_cache.go",
"azure_wrap.go",
"azure_zones.go",
],
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/azure",
deps = [
"//pkg/cloudprovider/providers/azure/auth:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/pkg/version:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library",
"//staging/src/k8s.io/cloud-provider:go_default_library",
"//staging/src/k8s.io/cloud-provider/service/helpers:go_default_library",
"//staging/src/k8s.io/cloud-provider/volume:go_default_library",
"//staging/src/k8s.io/cloud-provider/volume/errors:go_default_library",
"//staging/src/k8s.io/cloud-provider/volume/helpers:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/storage:go_default_library",
"//vendor/github.com/Azure/go-autorest/autorest:go_default_library",
"//vendor/github.com/Azure/go-autorest/autorest/adal:go_default_library",
"//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library",
"//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library",
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
"//vendor/github.com/rubiojr/go-vhd/vhd:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
"//vendor/k8s.io/utils/keymutex:go_default_library",
"//vendor/sigs.k8s.io/yaml:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"azure_backoff_test.go",
"azure_cache_test.go",
"azure_controller_common_test.go",
"azure_instances_test.go",
"azure_loadbalancer_test.go",
"azure_metrics_test.go",
"azure_routes_test.go",
"azure_standard_test.go",
"azure_storage_test.go",
"azure_storageaccount_test.go",
"azure_test.go",
"azure_vmss_cache_test.go",
"azure_vmss_test.go",
"azure_wrap_test.go",
"azure_zones_test.go",
],
embed = [":go_default_library"],
deps = [
"//pkg/cloudprovider/providers/azure/auth:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/cloud-provider:go_default_library",
"//staging/src/k8s.io/cloud-provider/service/helpers:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage:go_default_library",
"//vendor/github.com/Azure/go-autorest/autorest:go_default_library",
"//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/cloudprovider/providers/azure/auth:all-srcs",
],
tags = ["automanaged"],
)

View File

@@ -1,15 +0,0 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- andyzhangx
- brendandburns
- feiskyer
- karataliu
- khenidak
reviewers:
- andyzhangx
- brendandburns
- feiskyer
- justaugustus
- karataliu
- khenidak

View File

@@ -1,28 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["azure_auth.go"],
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/Azure/go-autorest/autorest/adal:go_default_library",
"//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library",
"//vendor/golang.org/x/crypto/pkcs12:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -1,135 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package auth
import (
"crypto/rsa"
"crypto/x509"
"fmt"
"io/ioutil"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/Azure/go-autorest/autorest/azure"
"golang.org/x/crypto/pkcs12"
"k8s.io/klog"
)
// AzureAuthConfig holds auth related part of cloud config
type AzureAuthConfig struct {
// The cloud environment identifier. Takes values from https://github.com/Azure/go-autorest/blob/ec5f4903f77ed9927ac95b19ab8e44ada64c1356/autorest/azure/environments.go#L13
Cloud string `json:"cloud" yaml:"cloud"`
// The AAD Tenant ID for the Subscription that the cluster is deployed in
TenantID string `json:"tenantId" yaml:"tenantId"`
// The ClientID for an AAD application with RBAC access to talk to Azure RM APIs
AADClientID string `json:"aadClientId" yaml:"aadClientId"`
// The ClientSecret for an AAD application with RBAC access to talk to Azure RM APIs
AADClientSecret string `json:"aadClientSecret" yaml:"aadClientSecret"`
// The path of a client certificate for an AAD application with RBAC access to talk to Azure RM APIs
AADClientCertPath string `json:"aadClientCertPath" yaml:"aadClientCertPath"`
// The password of the client certificate for an AAD application with RBAC access to talk to Azure RM APIs
AADClientCertPassword string `json:"aadClientCertPassword" yaml:"aadClientCertPassword"`
// Use managed service identity for the virtual machine to access Azure ARM APIs
UseManagedIdentityExtension bool `json:"useManagedIdentityExtension" yaml:"useManagedIdentityExtension"`
// UserAssignedIdentityID contains the Client ID of the user assigned MSI which is assigned to the underlying VMs. If empty the user assigned identity is not used.
// More details of the user assigned identity can be found at: https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/overview
// For the user assigned identity specified here to be used, the UseManagedIdentityExtension has to be set to true.
UserAssignedIdentityID string `json:"userAssignedIdentityID" yaml:"userAssignedIdentityID"`
// The ID of the Azure Subscription that the cluster is deployed in
SubscriptionID string `json:"subscriptionId" yaml:"subscriptionId"`
}
// GetServicePrincipalToken creates a new service principal token based on the configuration
func GetServicePrincipalToken(config *AzureAuthConfig, env *azure.Environment) (*adal.ServicePrincipalToken, error) {
if config.UseManagedIdentityExtension {
klog.V(2).Infoln("azure: using managed identity extension to retrieve access token")
msiEndpoint, err := adal.GetMSIVMEndpoint()
if err != nil {
return nil, fmt.Errorf("Getting the managed service identity endpoint: %v", err)
}
if len(config.UserAssignedIdentityID) > 0 {
klog.V(4).Info("azure: using User Assigned MSI ID to retrieve access token")
return adal.NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint,
env.ServiceManagementEndpoint,
config.UserAssignedIdentityID)
}
klog.V(4).Info("azure: using System Assigned MSI to retrieve access token")
return adal.NewServicePrincipalTokenFromMSI(
msiEndpoint,
env.ServiceManagementEndpoint)
}
oauthConfig, err := adal.NewOAuthConfig(env.ActiveDirectoryEndpoint, config.TenantID)
if err != nil {
return nil, fmt.Errorf("creating the OAuth config: %v", err)
}
if len(config.AADClientSecret) > 0 {
klog.V(2).Infoln("azure: using client_id+client_secret to retrieve access token")
return adal.NewServicePrincipalToken(
*oauthConfig,
config.AADClientID,
config.AADClientSecret,
env.ServiceManagementEndpoint)
}
if len(config.AADClientCertPath) > 0 && len(config.AADClientCertPassword) > 0 {
klog.V(2).Infoln("azure: using jwt client_assertion (client_cert+client_private_key) to retrieve access token")
certData, err := ioutil.ReadFile(config.AADClientCertPath)
if err != nil {
return nil, fmt.Errorf("reading the client certificate from file %s: %v", config.AADClientCertPath, err)
}
certificate, privateKey, err := decodePkcs12(certData, config.AADClientCertPassword)
if err != nil {
return nil, fmt.Errorf("decoding the client certificate: %v", err)
}
return adal.NewServicePrincipalTokenFromCertificate(
*oauthConfig,
config.AADClientID,
certificate,
privateKey,
env.ServiceManagementEndpoint)
}
return nil, fmt.Errorf("No credentials provided for AAD application %s", config.AADClientID)
}
// ParseAzureEnvironment returns azure environment by name
func ParseAzureEnvironment(cloudName string) (*azure.Environment, error) {
var env azure.Environment
var err error
if cloudName == "" {
env = azure.PublicCloud
} else {
env, err = azure.EnvironmentFromName(cloudName)
}
return &env, err
}
// decodePkcs12 decodes a PKCS#12 client certificate by extracting the public certificate and
// the private RSA key
func decodePkcs12(pkcs []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) {
privateKey, certificate, err := pkcs12.Decode(pkcs, password)
if err != nil {
return nil, nil, fmt.Errorf("decoding the PKCS#12 client certificate: %v", err)
}
rsaPrivateKey, isRsaKey := privateKey.(*rsa.PrivateKey)
if !isRsaKey {
return nil, nil, fmt.Errorf("PKCS#12 certificate must contain a RSA private key")
}
return certificate, rsaPrivateKey, nil
}

View File

@@ -1,714 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"fmt"
"io"
"io/ioutil"
"strings"
"sync"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/pkg/version"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/flowcontrol"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"k8s.io/klog"
"sigs.k8s.io/yaml"
)
const (
// CloudProviderName is the value used for the --cloud-provider flag
CloudProviderName = "azure"
rateLimitQPSDefault = 1.0
rateLimitBucketDefault = 5
backoffRetriesDefault = 6
backoffExponentDefault = 1.5
backoffDurationDefault = 5 // in seconds
backoffJitterDefault = 1.0
// According to https://docs.microsoft.com/en-us/azure/azure-subscription-service-limits#load-balancer.
maximumLoadBalancerRuleCount = 250
vmTypeVMSS = "vmss"
vmTypeStandard = "standard"
backoffModeDefault = "default"
backoffModeV2 = "v2"
loadBalancerSkuBasic = "basic"
loadBalancerSkuStandard = "standard"
externalResourceGroupLabel = "kubernetes.azure.com/resource-group"
managedByAzureLabel = "kubernetes.azure.com/managed"
)
var (
// Master nodes are not added to standard load balancer by default.
defaultExcludeMasterFromStandardLB = true
// Outbound SNAT is enabled by default.
defaultDisableOutboundSNAT = false
)
// Config holds the configuration parsed from the --cloud-config flag
// All fields are required unless otherwise specified
type Config struct {
auth.AzureAuthConfig
// The name of the resource group that the cluster is deployed in
ResourceGroup string `json:"resourceGroup" yaml:"resourceGroup"`
// The location of the resource group that the cluster is deployed in
Location string `json:"location" yaml:"location"`
// The name of the VNet that the cluster is deployed in
VnetName string `json:"vnetName" yaml:"vnetName"`
// The name of the resource group that the Vnet is deployed in
VnetResourceGroup string `json:"vnetResourceGroup" yaml:"vnetResourceGroup"`
// The name of the subnet that the cluster is deployed in
SubnetName string `json:"subnetName" yaml:"subnetName"`
// The name of the security group attached to the cluster's subnet
SecurityGroupName string `json:"securityGroupName" yaml:"securityGroupName"`
// (Optional in 1.6) The name of the route table attached to the subnet that the cluster is deployed in
RouteTableName string `json:"routeTableName" yaml:"routeTableName"`
// The name of the resource group that the RouteTable is deployed in
RouteTableResourceGroup string `json:"routeTableResourceGroup" yaml:"routeTableResourceGroup"`
// (Optional) The name of the availability set that should be used as the load balancer backend
// If this is set, the Azure cloudprovider will only add nodes from that availability set to the load
// balancer backend pool. If this is not set, and multiple agent pools (availability sets) are used, then
// the cloudprovider will try to add all nodes to a single backend pool which is forbidden.
// In other words, if you use multiple agent pools (availability sets), you MUST set this field.
PrimaryAvailabilitySetName string `json:"primaryAvailabilitySetName" yaml:"primaryAvailabilitySetName"`
// The type of azure nodes. Candidate values are: vmss and standard.
// If not set, it will be default to standard.
VMType string `json:"vmType" yaml:"vmType"`
// The name of the scale set that should be used as the load balancer backend.
// If this is set, the Azure cloudprovider will only add nodes from that scale set to the load
// balancer backend pool. If this is not set, and multiple agent pools (scale sets) are used, then
// the cloudprovider will try to add all nodes to a single backend pool which is forbidden.
// In other words, if you use multiple agent pools (scale sets), you MUST set this field.
PrimaryScaleSetName string `json:"primaryScaleSetName" yaml:"primaryScaleSetName"`
// Enable exponential backoff to manage resource request retries
CloudProviderBackoff bool `json:"cloudProviderBackoff" yaml:"cloudProviderBackoff"`
// Backoff retry limit
CloudProviderBackoffRetries int `json:"cloudProviderBackoffRetries" yaml:"cloudProviderBackoffRetries"`
// Backoff exponent
CloudProviderBackoffExponent float64 `json:"cloudProviderBackoffExponent" yaml:"cloudProviderBackoffExponent"`
// Backoff duration
CloudProviderBackoffDuration int `json:"cloudProviderBackoffDuration" yaml:"cloudProviderBackoffDuration"`
// Backoff jitter
CloudProviderBackoffJitter float64 `json:"cloudProviderBackoffJitter" yaml:"cloudProviderBackoffJitter"`
// Backoff mode, options are v2 and default.
// * default means two-layer backoff retrying, one in the cloud provider and the other in the Azure SDK.
// * v2 means only backoff in the Azure SDK is used. In such mode, CloudProviderBackoffDuration and
// CloudProviderBackoffJitter are omitted.
// "default" will be used if not specified.
CloudProviderBackoffMode string `json:"cloudProviderBackoffMode" yaml:"cloudProviderBackoffMode"`
// Enable rate limiting
CloudProviderRateLimit bool `json:"cloudProviderRateLimit" yaml:"cloudProviderRateLimit"`
// Rate limit QPS (Read)
CloudProviderRateLimitQPS float32 `json:"cloudProviderRateLimitQPS" yaml:"cloudProviderRateLimitQPS"`
// Rate limit Bucket Size
CloudProviderRateLimitBucket int `json:"cloudProviderRateLimitBucket" yaml:"cloudProviderRateLimitBucket"`
// Rate limit QPS (Write)
CloudProviderRateLimitQPSWrite float32 `json:"cloudProviderRateLimitQPSWrite" yaml:"cloudProviderRateLimitQPSWrite"`
// Rate limit Bucket Size
CloudProviderRateLimitBucketWrite int `json:"cloudProviderRateLimitBucketWrite" yaml:"cloudProviderRateLimitBucketWrite"`
// Use instance metadata service where possible
UseInstanceMetadata bool `json:"useInstanceMetadata" yaml:"useInstanceMetadata"`
// Sku of Load Balancer and Public IP. Candidate values are: basic and standard.
// If not set, it will be default to basic.
LoadBalancerSku string `json:"loadBalancerSku" yaml:"loadBalancerSku"`
// ExcludeMasterFromStandardLB excludes master nodes from standard load balancer.
// If not set, it will be default to true.
ExcludeMasterFromStandardLB *bool `json:"excludeMasterFromStandardLB" yaml:"excludeMasterFromStandardLB"`
// DisableOutboundSNAT disables the outbound SNAT for public load balancer rules.
// It should only be set when loadBalancerSku is standard. If not set, it will be default to false.
DisableOutboundSNAT *bool `json:"disableOutboundSNAT" yaml:"disableOutboundSNAT"`
// Maximum allowed LoadBalancer Rule Count is the limit enforced by Azure Load balancer
MaximumLoadBalancerRuleCount int `json:"maximumLoadBalancerRuleCount" yaml:"maximumLoadBalancerRuleCount"`
}
var _ cloudprovider.Interface = (*Cloud)(nil)
var _ cloudprovider.Instances = (*Cloud)(nil)
var _ cloudprovider.LoadBalancer = (*Cloud)(nil)
var _ cloudprovider.Routes = (*Cloud)(nil)
var _ cloudprovider.Zones = (*Cloud)(nil)
var _ cloudprovider.PVLabeler = (*Cloud)(nil)
// Cloud holds the config and clients
type Cloud struct {
Config
Environment azure.Environment
RoutesClient RoutesClient
SubnetsClient SubnetsClient
InterfacesClient InterfacesClient
RouteTablesClient RouteTablesClient
LoadBalancerClient LoadBalancersClient
PublicIPAddressesClient PublicIPAddressesClient
SecurityGroupsClient SecurityGroupsClient
VirtualMachinesClient VirtualMachinesClient
StorageAccountClient StorageAccountClient
DisksClient DisksClient
SnapshotsClient *compute.SnapshotsClient
FileClient FileClient
resourceRequestBackoff wait.Backoff
metadata *InstanceMetadataService
vmSet VMSet
// Lock for access to node caches, includes nodeZones, nodeResourceGroups, and unmanagedNodes.
nodeCachesLock sync.Mutex
// nodeZones is a mapping from Zone to a sets.String of Node's names in the Zone
// it is updated by the nodeInformer
nodeZones map[string]sets.String
// nodeResourceGroups holds nodes external resource groups
nodeResourceGroups map[string]string
// unmanagedNodes holds a list of nodes not managed by Azure cloud provider.
unmanagedNodes sets.String
// nodeInformerSynced is for determining if the informer has synced.
nodeInformerSynced cache.InformerSynced
// routeCIDRsLock holds lock for routeCIDRs cache.
routeCIDRsLock sync.Mutex
// routeCIDRs holds cache for route CIDRs.
routeCIDRs map[string]string
// Clients for vmss.
VirtualMachineScaleSetsClient VirtualMachineScaleSetsClient
VirtualMachineScaleSetVMsClient VirtualMachineScaleSetVMsClient
// client for vm sizes list
VirtualMachineSizesClient VirtualMachineSizesClient
kubeClient clientset.Interface
eventBroadcaster record.EventBroadcaster
eventRecorder record.EventRecorder
vmCache *timedCache
lbCache *timedCache
nsgCache *timedCache
rtCache *timedCache
*BlobDiskController
*ManagedDiskController
*controllerCommon
}
func init() {
cloudprovider.RegisterCloudProvider(CloudProviderName, NewCloud)
}
// NewCloud returns a Cloud with initialized clients
func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) {
config, err := parseConfig(configReader)
if err != nil {
return nil, err
}
if config.RouteTableResourceGroup == "" {
config.RouteTableResourceGroup = config.ResourceGroup
}
if config.VMType == "" {
// default to standard vmType if not set.
config.VMType = vmTypeStandard
}
env, err := auth.ParseAzureEnvironment(config.Cloud)
if err != nil {
return nil, err
}
servicePrincipalToken, err := auth.GetServicePrincipalToken(&config.AzureAuthConfig, env)
if err != nil {
return nil, err
}
// operationPollRateLimiter.Accept() is a no-op if rate limits are configured off.
operationPollRateLimiter := flowcontrol.NewFakeAlwaysRateLimiter()
operationPollRateLimiterWrite := flowcontrol.NewFakeAlwaysRateLimiter()
// If reader is provided (and no writer) we will
// use the same value for both.
if config.CloudProviderRateLimit {
// Assign rate limit defaults if no configuration was passed in
if config.CloudProviderRateLimitQPS == 0 {
config.CloudProviderRateLimitQPS = rateLimitQPSDefault
}
if config.CloudProviderRateLimitBucket == 0 {
config.CloudProviderRateLimitBucket = rateLimitBucketDefault
}
if config.CloudProviderRateLimitQPSWrite == 0 {
config.CloudProviderRateLimitQPSWrite = rateLimitQPSDefault
}
if config.CloudProviderRateLimitBucketWrite == 0 {
config.CloudProviderRateLimitBucketWrite = rateLimitBucketDefault
}
operationPollRateLimiter = flowcontrol.NewTokenBucketRateLimiter(
config.CloudProviderRateLimitQPS,
config.CloudProviderRateLimitBucket)
operationPollRateLimiterWrite = flowcontrol.NewTokenBucketRateLimiter(
config.CloudProviderRateLimitQPSWrite,
config.CloudProviderRateLimitBucketWrite)
klog.V(2).Infof("Azure cloudprovider (read ops) using rate limit config: QPS=%g, bucket=%d",
config.CloudProviderRateLimitQPS,
config.CloudProviderRateLimitBucket)
klog.V(2).Infof("Azure cloudprovider (write ops) using rate limit config: QPS=%g, bucket=%d",
config.CloudProviderRateLimitQPSWrite,
config.CloudProviderRateLimitBucketWrite)
}
// Conditionally configure resource request backoff
resourceRequestBackoff := wait.Backoff{
Steps: 1,
}
if config.CloudProviderBackoff {
// Assign backoff defaults if no configuration was passed in
if config.CloudProviderBackoffRetries == 0 {
config.CloudProviderBackoffRetries = backoffRetriesDefault
}
if config.CloudProviderBackoffDuration == 0 {
config.CloudProviderBackoffDuration = backoffDurationDefault
}
if config.CloudProviderBackoffExponent == 0 {
config.CloudProviderBackoffExponent = backoffExponentDefault
} else if config.shouldOmitCloudProviderBackoff() {
klog.Warning("Azure cloud provider config 'cloudProviderBackoffExponent' has been deprecated for 'v2' backoff mode. 2 is always used as the backoff exponent.")
}
if config.CloudProviderBackoffJitter == 0 {
config.CloudProviderBackoffJitter = backoffJitterDefault
} else if config.shouldOmitCloudProviderBackoff() {
klog.Warning("Azure cloud provider config 'cloudProviderBackoffJitter' has been deprecated for 'v2' backoff mode.")
}
if !config.shouldOmitCloudProviderBackoff() {
resourceRequestBackoff = wait.Backoff{
Steps: config.CloudProviderBackoffRetries,
Factor: config.CloudProviderBackoffExponent,
Duration: time.Duration(config.CloudProviderBackoffDuration) * time.Second,
Jitter: config.CloudProviderBackoffJitter,
}
}
klog.V(2).Infof("Azure cloudprovider using try backoff: retries=%d, exponent=%f, duration=%d, jitter=%f",
config.CloudProviderBackoffRetries,
config.CloudProviderBackoffExponent,
config.CloudProviderBackoffDuration,
config.CloudProviderBackoffJitter)
} else {
// CloudProviderBackoffRetries will be set to 1 by default as the requirements of Azure SDK.
config.CloudProviderBackoffRetries = 1
config.CloudProviderBackoffDuration = backoffDurationDefault
}
if strings.EqualFold(config.LoadBalancerSku, loadBalancerSkuStandard) {
// Do not add master nodes to standard LB by default.
if config.ExcludeMasterFromStandardLB == nil {
config.ExcludeMasterFromStandardLB = &defaultExcludeMasterFromStandardLB
}
// Enable outbound SNAT by default.
if config.DisableOutboundSNAT == nil {
config.DisableOutboundSNAT = &defaultDisableOutboundSNAT
}
} else {
if config.DisableOutboundSNAT != nil && *config.DisableOutboundSNAT {
return nil, fmt.Errorf("disableOutboundSNAT should only set when loadBalancerSku is standard")
}
}
azClientConfig := &azClientConfig{
subscriptionID: config.SubscriptionID,
resourceManagerEndpoint: env.ResourceManagerEndpoint,
servicePrincipalToken: servicePrincipalToken,
rateLimiterReader: operationPollRateLimiter,
rateLimiterWriter: operationPollRateLimiterWrite,
CloudProviderBackoffRetries: config.CloudProviderBackoffRetries,
CloudProviderBackoffDuration: config.CloudProviderBackoffDuration,
ShouldOmitCloudProviderBackoff: config.shouldOmitCloudProviderBackoff(),
}
az := Cloud{
Config: *config,
Environment: *env,
nodeZones: map[string]sets.String{},
nodeResourceGroups: map[string]string{},
unmanagedNodes: sets.NewString(),
routeCIDRs: map[string]string{},
resourceRequestBackoff: resourceRequestBackoff,
DisksClient: newAzDisksClient(azClientConfig),
SnapshotsClient: newSnapshotsClient(azClientConfig),
RoutesClient: newAzRoutesClient(azClientConfig),
SubnetsClient: newAzSubnetsClient(azClientConfig),
InterfacesClient: newAzInterfacesClient(azClientConfig),
RouteTablesClient: newAzRouteTablesClient(azClientConfig),
LoadBalancerClient: newAzLoadBalancersClient(azClientConfig),
SecurityGroupsClient: newAzSecurityGroupsClient(azClientConfig),
StorageAccountClient: newAzStorageAccountClient(azClientConfig),
VirtualMachinesClient: newAzVirtualMachinesClient(azClientConfig),
PublicIPAddressesClient: newAzPublicIPAddressesClient(azClientConfig),
VirtualMachineSizesClient: newAzVirtualMachineSizesClient(azClientConfig),
VirtualMachineScaleSetsClient: newAzVirtualMachineScaleSetsClient(azClientConfig),
VirtualMachineScaleSetVMsClient: newAzVirtualMachineScaleSetVMsClient(azClientConfig),
FileClient: &azureFileClient{env: *env},
}
az.metadata, err = NewInstanceMetadataService(metadataURL)
if err != nil {
return nil, err
}
if az.MaximumLoadBalancerRuleCount == 0 {
az.MaximumLoadBalancerRuleCount = maximumLoadBalancerRuleCount
}
if strings.EqualFold(vmTypeVMSS, az.Config.VMType) {
az.vmSet, err = newScaleSet(&az)
if err != nil {
return nil, err
}
} else {
az.vmSet = newAvailabilitySet(&az)
}
az.vmCache, err = az.newVMCache()
if err != nil {
return nil, err
}
az.lbCache, err = az.newLBCache()
if err != nil {
return nil, err
}
az.nsgCache, err = az.newNSGCache()
if err != nil {
return nil, err
}
az.rtCache, err = az.newRouteTableCache()
if err != nil {
return nil, err
}
if err := initDiskControllers(&az); err != nil {
return nil, err
}
return &az, nil
}
// parseConfig returns a parsed configuration for an Azure cloudprovider config file
func parseConfig(configReader io.Reader) (*Config, error) {
var config Config
if configReader == nil {
return &config, nil
}
configContents, err := ioutil.ReadAll(configReader)
if err != nil {
return nil, err
}
err = yaml.Unmarshal(configContents, &config)
if err != nil {
return nil, err
}
// The resource group name may be in different cases from different Azure APIs, hence it is converted to lower here.
// See more context at https://github.com/kubernetes/kubernetes/issues/71994.
config.ResourceGroup = strings.ToLower(config.ResourceGroup)
return &config, nil
}
// Initialize passes a Kubernetes clientBuilder interface to the cloud provider
func (az *Cloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) {
az.kubeClient = clientBuilder.ClientOrDie("azure-cloud-provider")
az.eventBroadcaster = record.NewBroadcaster()
az.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: az.kubeClient.CoreV1().Events("")})
az.eventRecorder = az.eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "azure-cloud-provider"})
}
// LoadBalancer returns a balancer interface. Also returns true if the interface is supported, false otherwise.
func (az *Cloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
return az, true
}
// Instances returns an instances interface. Also returns true if the interface is supported, false otherwise.
func (az *Cloud) Instances() (cloudprovider.Instances, bool) {
return az, true
}
// Zones returns a zones interface. Also returns true if the interface is supported, false otherwise.
func (az *Cloud) Zones() (cloudprovider.Zones, bool) {
return az, true
}
// Clusters returns a clusters interface. Also returns true if the interface is supported, false otherwise.
func (az *Cloud) Clusters() (cloudprovider.Clusters, bool) {
return nil, false
}
// Routes returns a routes interface along with whether the interface is supported.
func (az *Cloud) Routes() (cloudprovider.Routes, bool) {
return az, true
}
// HasClusterID returns true if the cluster has a clusterID
func (az *Cloud) HasClusterID() bool {
return true
}
// ProviderName returns the cloud provider ID.
func (az *Cloud) ProviderName() string {
return CloudProviderName
}
// configureUserAgent configures the autorest client with a user agent that
// includes "kubernetes" and the full kubernetes git version string
// example:
// Azure-SDK-for-Go/7.0.1-beta arm-network/2016-09-01; kubernetes-cloudprovider/v1.7.0-alpha.2.711+a2fadef8170bb0-dirty;
func configureUserAgent(client *autorest.Client) {
k8sVersion := version.Get().GitVersion
client.UserAgent = fmt.Sprintf("%s; kubernetes-cloudprovider/%s", client.UserAgent, k8sVersion)
}
func initDiskControllers(az *Cloud) error {
// Common controller contains the function
// needed by both blob disk and managed disk controllers
common := &controllerCommon{
location: az.Location,
storageEndpointSuffix: az.Environment.StorageEndpointSuffix,
resourceGroup: az.ResourceGroup,
subscriptionID: az.SubscriptionID,
cloud: az,
}
az.BlobDiskController = &BlobDiskController{common: common}
az.ManagedDiskController = &ManagedDiskController{common: common}
az.controllerCommon = common
return nil
}
// SetInformers sets informers for Azure cloud provider.
func (az *Cloud) SetInformers(informerFactory informers.SharedInformerFactory) {
klog.Infof("Setting up informers for Azure cloud provider")
nodeInformer := informerFactory.Core().V1().Nodes().Informer()
nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
node := obj.(*v1.Node)
az.updateNodeCaches(nil, node)
},
UpdateFunc: func(prev, obj interface{}) {
prevNode := prev.(*v1.Node)
newNode := obj.(*v1.Node)
if newNode.Labels[v1.LabelZoneFailureDomain] ==
prevNode.Labels[v1.LabelZoneFailureDomain] {
return
}
az.updateNodeCaches(prevNode, newNode)
},
DeleteFunc: func(obj interface{}) {
node, isNode := obj.(*v1.Node)
// We can get DeletedFinalStateUnknown instead of *v1.Node here
// and we need to handle that correctly.
if !isNode {
deletedState, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
klog.Errorf("Received unexpected object: %v", obj)
return
}
node, ok = deletedState.Obj.(*v1.Node)
if !ok {
klog.Errorf("DeletedFinalStateUnknown contained non-Node object: %v", deletedState.Obj)
return
}
}
az.updateNodeCaches(node, nil)
},
})
az.nodeInformerSynced = nodeInformer.HasSynced
}
// updateNodeCaches updates local cache for node's zones and external resource groups.
func (az *Cloud) updateNodeCaches(prevNode, newNode *v1.Node) {
az.nodeCachesLock.Lock()
defer az.nodeCachesLock.Unlock()
if prevNode != nil {
// Remove from nodeZones cache.
prevZone, ok := prevNode.ObjectMeta.Labels[v1.LabelZoneFailureDomain]
if ok && az.isAvailabilityZone(prevZone) {
az.nodeZones[prevZone].Delete(prevNode.ObjectMeta.Name)
if az.nodeZones[prevZone].Len() == 0 {
az.nodeZones[prevZone] = nil
}
}
// Remove from nodeResourceGroups cache.
_, ok = prevNode.ObjectMeta.Labels[externalResourceGroupLabel]
if ok {
delete(az.nodeResourceGroups, prevNode.ObjectMeta.Name)
}
// Remove from unmanagedNodes cache.
managed, ok := prevNode.ObjectMeta.Labels[managedByAzureLabel]
if ok && managed == "false" {
az.unmanagedNodes.Delete(prevNode.ObjectMeta.Name)
}
}
if newNode != nil {
// Add to nodeZones cache.
newZone, ok := newNode.ObjectMeta.Labels[v1.LabelZoneFailureDomain]
if ok && az.isAvailabilityZone(newZone) {
if az.nodeZones[newZone] == nil {
az.nodeZones[newZone] = sets.NewString()
}
az.nodeZones[newZone].Insert(newNode.ObjectMeta.Name)
}
// Add to nodeResourceGroups cache.
newRG, ok := newNode.ObjectMeta.Labels[externalResourceGroupLabel]
if ok && len(newRG) > 0 {
az.nodeResourceGroups[newNode.ObjectMeta.Name] = strings.ToLower(newRG)
}
// Add to unmanagedNodes cache.
managed, ok := newNode.ObjectMeta.Labels[managedByAzureLabel]
if ok && managed == "false" {
az.unmanagedNodes.Insert(newNode.ObjectMeta.Name)
}
}
}
// GetActiveZones returns all the zones in which k8s nodes are currently running.
func (az *Cloud) GetActiveZones() (sets.String, error) {
if az.nodeInformerSynced == nil {
return nil, fmt.Errorf("Azure cloud provider doesn't have informers set")
}
az.nodeCachesLock.Lock()
defer az.nodeCachesLock.Unlock()
if !az.nodeInformerSynced() {
return nil, fmt.Errorf("node informer is not synced when trying to GetActiveZones")
}
zones := sets.NewString()
for zone, nodes := range az.nodeZones {
if len(nodes) > 0 {
zones.Insert(zone)
}
}
return zones, nil
}
// GetLocation returns the location in which k8s cluster is currently running.
func (az *Cloud) GetLocation() string {
return az.Location
}
// GetNodeResourceGroup gets resource group for given node.
func (az *Cloud) GetNodeResourceGroup(nodeName string) (string, error) {
// Kubelet won't set az.nodeInformerSynced, always return configured resourceGroup.
if az.nodeInformerSynced == nil {
return az.ResourceGroup, nil
}
az.nodeCachesLock.Lock()
defer az.nodeCachesLock.Unlock()
if !az.nodeInformerSynced() {
return "", fmt.Errorf("node informer is not synced when trying to GetNodeResourceGroup")
}
// Return external resource group if it has been cached.
if cachedRG, ok := az.nodeResourceGroups[nodeName]; ok {
return cachedRG, nil
}
// Return resource group from cloud provider options.
return az.ResourceGroup, nil
}
// GetResourceGroups returns a set of resource groups that all nodes are running on.
func (az *Cloud) GetResourceGroups() (sets.String, error) {
// Kubelet won't set az.nodeInformerSynced, always return configured resourceGroup.
if az.nodeInformerSynced == nil {
return sets.NewString(az.ResourceGroup), nil
}
az.nodeCachesLock.Lock()
defer az.nodeCachesLock.Unlock()
if !az.nodeInformerSynced() {
return nil, fmt.Errorf("node informer is not synced when trying to GetResourceGroups")
}
resourceGroups := sets.NewString(az.ResourceGroup)
for _, rg := range az.nodeResourceGroups {
resourceGroups.Insert(rg)
}
return resourceGroups, nil
}
// GetUnmanagedNodes returns a list of nodes not managed by Azure cloud provider (e.g. on-prem nodes).
func (az *Cloud) GetUnmanagedNodes() (sets.String, error) {
// Kubelet won't set az.nodeInformerSynced, always return nil.
if az.nodeInformerSynced == nil {
return nil, nil
}
az.nodeCachesLock.Lock()
defer az.nodeCachesLock.Unlock()
if !az.nodeInformerSynced() {
return nil, fmt.Errorf("node informer is not synced when trying to GetUnmanagedNodes")
}
return sets.NewString(az.unmanagedNodes.List()...), nil
}
// ShouldNodeExcludedFromLoadBalancer returns true if node is unmanaged or in external resource group.
func (az *Cloud) ShouldNodeExcludedFromLoadBalancer(node *v1.Node) bool {
labels := node.ObjectMeta.Labels
if rg, ok := labels[externalResourceGroupLabel]; ok && !strings.EqualFold(rg, az.ResourceGroup) {
return true
}
if managed, ok := labels[managedByAzureLabel]; ok && managed == "false" {
return true
}
return false
}

View File

@@ -1,597 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"fmt"
"net/http"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute"
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/klog"
)
// requestBackoff if backoff is disabled in cloud provider it
// returns a new Backoff object steps = 1
// This is to make sure that the requested command executes
// at least once
func (az *Cloud) requestBackoff() (resourceRequestBackoff wait.Backoff) {
if az.CloudProviderBackoff {
return az.resourceRequestBackoff
}
resourceRequestBackoff = wait.Backoff{
Steps: 1,
}
return resourceRequestBackoff
}
// Event creates a event for the specified object.
func (az *Cloud) Event(obj runtime.Object, eventtype, reason, message string) {
if obj != nil && reason != "" {
az.eventRecorder.Event(obj, eventtype, reason, message)
}
}
// GetVirtualMachineWithRetry invokes az.getVirtualMachine with exponential backoff retry
func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName) (compute.VirtualMachine, error) {
var machine compute.VirtualMachine
var retryErr error
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
machine, retryErr = az.getVirtualMachine(name)
if retryErr == cloudprovider.InstanceNotFound {
return true, cloudprovider.InstanceNotFound
}
if retryErr != nil {
klog.Errorf("GetVirtualMachineWithRetry(%s): backoff failure, will retry, err=%v", name, retryErr)
return false, nil
}
klog.V(2).Infof("GetVirtualMachineWithRetry(%s): backoff success", name)
return true, nil
})
if err == wait.ErrWaitTimeout {
err = retryErr
}
return machine, err
}
// ListVirtualMachinesWithRetry invokes az.VirtualMachinesClient.List with exponential backoff retry
func (az *Cloud) ListVirtualMachinesWithRetry(resourceGroup string) ([]compute.VirtualMachine, error) {
allNodes := []compute.VirtualMachine{}
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
var retryErr error
ctx, cancel := getContextWithCancel()
defer cancel()
allNodes, retryErr = az.VirtualMachinesClient.List(ctx, resourceGroup)
if retryErr != nil {
klog.Errorf("VirtualMachinesClient.List(%v) - backoff: failure, will retry,err=%v",
resourceGroup,
retryErr)
return false, retryErr
}
klog.V(2).Infof("VirtualMachinesClient.List(%v) - backoff: success", resourceGroup)
return true, nil
})
if err != nil {
return nil, err
}
return allNodes, err
}
// ListVirtualMachines invokes az.VirtualMachinesClient.List with exponential backoff retry
func (az *Cloud) ListVirtualMachines(resourceGroup string) ([]compute.VirtualMachine, error) {
if az.Config.shouldOmitCloudProviderBackoff() {
ctx, cancel := getContextWithCancel()
defer cancel()
allNodes, err := az.VirtualMachinesClient.List(ctx, resourceGroup)
if err != nil {
klog.Errorf("VirtualMachinesClient.List(%v) failure with err=%v", resourceGroup, err)
return nil, err
}
klog.V(2).Infof("VirtualMachinesClient.List(%v) success", resourceGroup)
return allNodes, nil
}
return az.ListVirtualMachinesWithRetry(resourceGroup)
}
func (az *Cloud) getIPForMachine(nodeName types.NodeName) (string, string, error) {
if az.Config.shouldOmitCloudProviderBackoff() {
return az.vmSet.GetIPByNodeName(string(nodeName))
}
return az.GetIPForMachineWithRetry(nodeName)
}
// GetIPForMachineWithRetry invokes az.getIPForMachine with exponential backoff retry
func (az *Cloud) GetIPForMachineWithRetry(name types.NodeName) (string, string, error) {
var ip, publicIP string
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
var retryErr error
ip, publicIP, retryErr = az.vmSet.GetIPByNodeName(string(name))
if retryErr != nil {
klog.Errorf("GetIPForMachineWithRetry(%s): backoff failure, will retry,err=%v", name, retryErr)
return false, nil
}
klog.V(2).Infof("GetIPForMachineWithRetry(%s): backoff success", name)
return true, nil
})
return ip, publicIP, err
}
// CreateOrUpdateSecurityGroup invokes az.SecurityGroupsClient.CreateOrUpdate with exponential backoff retry
func (az *Cloud) CreateOrUpdateSecurityGroup(service *v1.Service, sg network.SecurityGroup) error {
if az.Config.shouldOmitCloudProviderBackoff() {
ctx, cancel := getContextWithCancel()
defer cancel()
resp, err := az.SecurityGroupsClient.CreateOrUpdate(ctx, az.ResourceGroup, *sg.Name, sg)
klog.V(10).Infof("SecurityGroupsClient.CreateOrUpdate(%s): end", *sg.Name)
if err == nil {
if isSuccessHTTPResponse(resp) {
// Invalidate the cache right after updating
az.nsgCache.Delete(*sg.Name)
} else if resp != nil {
return fmt.Errorf("HTTP response %q", resp.Status)
}
}
return err
}
return az.CreateOrUpdateSGWithRetry(service, sg)
}
// CreateOrUpdateSGWithRetry invokes az.SecurityGroupsClient.CreateOrUpdate with exponential backoff retry
func (az *Cloud) CreateOrUpdateSGWithRetry(service *v1.Service, sg network.SecurityGroup) error {
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
ctx, cancel := getContextWithCancel()
defer cancel()
resp, err := az.SecurityGroupsClient.CreateOrUpdate(ctx, az.ResourceGroup, *sg.Name, sg)
klog.V(10).Infof("SecurityGroupsClient.CreateOrUpdate(%s): end", *sg.Name)
done, err := az.processHTTPRetryResponse(service, "CreateOrUpdateSecurityGroup", resp, err)
if done && err == nil {
// Invalidate the cache right after updating
az.nsgCache.Delete(*sg.Name)
}
return done, err
})
}
// CreateOrUpdateLB invokes az.LoadBalancerClient.CreateOrUpdate with exponential backoff retry
func (az *Cloud) CreateOrUpdateLB(service *v1.Service, lb network.LoadBalancer) error {
if az.Config.shouldOmitCloudProviderBackoff() {
ctx, cancel := getContextWithCancel()
defer cancel()
resp, err := az.LoadBalancerClient.CreateOrUpdate(ctx, az.ResourceGroup, *lb.Name, lb)
klog.V(10).Infof("LoadBalancerClient.CreateOrUpdate(%s): end", *lb.Name)
if err == nil {
if isSuccessHTTPResponse(resp) {
// Invalidate the cache right after updating
az.lbCache.Delete(*lb.Name)
} else if resp != nil {
return fmt.Errorf("HTTP response %q", resp.Status)
}
}
return err
}
return az.createOrUpdateLBWithRetry(service, lb)
}
// createOrUpdateLBWithRetry invokes az.LoadBalancerClient.CreateOrUpdate with exponential backoff retry
func (az *Cloud) createOrUpdateLBWithRetry(service *v1.Service, lb network.LoadBalancer) error {
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
ctx, cancel := getContextWithCancel()
defer cancel()
resp, err := az.LoadBalancerClient.CreateOrUpdate(ctx, az.ResourceGroup, *lb.Name, lb)
klog.V(10).Infof("LoadBalancerClient.CreateOrUpdate(%s): end", *lb.Name)
done, err := az.processHTTPRetryResponse(service, "CreateOrUpdateLoadBalancer", resp, err)
if done && err == nil {
// Invalidate the cache right after updating
az.lbCache.Delete(*lb.Name)
}
return done, err
})
}
// ListLB invokes az.LoadBalancerClient.List with exponential backoff retry
func (az *Cloud) ListLB(service *v1.Service) ([]network.LoadBalancer, error) {
if az.Config.shouldOmitCloudProviderBackoff() {
ctx, cancel := getContextWithCancel()
defer cancel()
allLBs, err := az.LoadBalancerClient.List(ctx, az.ResourceGroup)
if err != nil {
az.Event(service, v1.EventTypeWarning, "ListLoadBalancers", err.Error())
klog.Errorf("LoadBalancerClient.List(%v) failure with err=%v", az.ResourceGroup, err)
return nil, err
}
klog.V(2).Infof("LoadBalancerClient.List(%v) success", az.ResourceGroup)
return allLBs, nil
}
return az.listLBWithRetry(service)
}
// listLBWithRetry invokes az.LoadBalancerClient.List with exponential backoff retry
func (az *Cloud) listLBWithRetry(service *v1.Service) ([]network.LoadBalancer, error) {
var allLBs []network.LoadBalancer
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
var retryErr error
ctx, cancel := getContextWithCancel()
defer cancel()
allLBs, retryErr = az.LoadBalancerClient.List(ctx, az.ResourceGroup)
if retryErr != nil {
az.Event(service, v1.EventTypeWarning, "ListLoadBalancers", retryErr.Error())
klog.Errorf("LoadBalancerClient.List(%v) - backoff: failure, will retry,err=%v",
az.ResourceGroup,
retryErr)
return false, retryErr
}
klog.V(2).Infof("LoadBalancerClient.List(%v) - backoff: success", az.ResourceGroup)
return true, nil
})
if err != nil {
return nil, err
}
return allLBs, nil
}
// ListPIP list the PIP resources in the given resource group
func (az *Cloud) ListPIP(service *v1.Service, pipResourceGroup string) ([]network.PublicIPAddress, error) {
if az.Config.shouldOmitCloudProviderBackoff() {
ctx, cancel := getContextWithCancel()
defer cancel()
allPIPs, err := az.PublicIPAddressesClient.List(ctx, pipResourceGroup)
if err != nil {
az.Event(service, v1.EventTypeWarning, "ListPublicIPs", err.Error())
klog.Errorf("PublicIPAddressesClient.List(%v) failure with err=%v", pipResourceGroup, err)
return nil, err
}
klog.V(2).Infof("PublicIPAddressesClient.List(%v) success", pipResourceGroup)
return allPIPs, nil
}
return az.listPIPWithRetry(service, pipResourceGroup)
}
// listPIPWithRetry list the PIP resources in the given resource group
func (az *Cloud) listPIPWithRetry(service *v1.Service, pipResourceGroup string) ([]network.PublicIPAddress, error) {
var allPIPs []network.PublicIPAddress
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
var retryErr error
ctx, cancel := getContextWithCancel()
defer cancel()
allPIPs, retryErr = az.PublicIPAddressesClient.List(ctx, pipResourceGroup)
if retryErr != nil {
az.Event(service, v1.EventTypeWarning, "ListPublicIPs", retryErr.Error())
klog.Errorf("PublicIPAddressesClient.List(%v) - backoff: failure, will retry,err=%v",
pipResourceGroup,
retryErr)
return false, retryErr
}
klog.V(2).Infof("PublicIPAddressesClient.List(%v) - backoff: success", pipResourceGroup)
return true, nil
})
if err != nil {
return nil, err
}
return allPIPs, nil
}
// CreateOrUpdatePIP invokes az.PublicIPAddressesClient.CreateOrUpdate with exponential backoff retry
func (az *Cloud) CreateOrUpdatePIP(service *v1.Service, pipResourceGroup string, pip network.PublicIPAddress) error {
if az.Config.shouldOmitCloudProviderBackoff() {
ctx, cancel := getContextWithCancel()
defer cancel()
resp, err := az.PublicIPAddressesClient.CreateOrUpdate(ctx, pipResourceGroup, *pip.Name, pip)
klog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s, %s): end", pipResourceGroup, *pip.Name)
return az.processHTTPResponse(service, "CreateOrUpdatePublicIPAddress", resp, err)
}
return az.createOrUpdatePIPWithRetry(service, pipResourceGroup, pip)
}
// createOrUpdatePIPWithRetry invokes az.PublicIPAddressesClient.CreateOrUpdate with exponential backoff retry
func (az *Cloud) createOrUpdatePIPWithRetry(service *v1.Service, pipResourceGroup string, pip network.PublicIPAddress) error {
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
ctx, cancel := getContextWithCancel()
defer cancel()
resp, err := az.PublicIPAddressesClient.CreateOrUpdate(ctx, pipResourceGroup, *pip.Name, pip)
klog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s, %s): end", pipResourceGroup, *pip.Name)
return az.processHTTPRetryResponse(service, "CreateOrUpdatePublicIPAddress", resp, err)
})
}
// CreateOrUpdateInterface invokes az.PublicIPAddressesClient.CreateOrUpdate with exponential backoff retry
func (az *Cloud) CreateOrUpdateInterface(service *v1.Service, nic network.Interface) error {
if az.Config.shouldOmitCloudProviderBackoff() {
ctx, cancel := getContextWithCancel()
defer cancel()
resp, err := az.InterfacesClient.CreateOrUpdate(ctx, az.ResourceGroup, *nic.Name, nic)
klog.V(10).Infof("InterfacesClient.CreateOrUpdate(%s): end", *nic.Name)
return az.processHTTPResponse(service, "CreateOrUpdateInterface", resp, err)
}
return az.createOrUpdateInterfaceWithRetry(service, nic)
}
// createOrUpdateInterfaceWithRetry invokes az.PublicIPAddressesClient.CreateOrUpdate with exponential backoff retry
func (az *Cloud) createOrUpdateInterfaceWithRetry(service *v1.Service, nic network.Interface) error {
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
ctx, cancel := getContextWithCancel()
defer cancel()
resp, err := az.InterfacesClient.CreateOrUpdate(ctx, az.ResourceGroup, *nic.Name, nic)
klog.V(10).Infof("InterfacesClient.CreateOrUpdate(%s): end", *nic.Name)
return az.processHTTPRetryResponse(service, "CreateOrUpdateInterface", resp, err)
})
}
// DeletePublicIP invokes az.PublicIPAddressesClient.Delete with exponential backoff retry
func (az *Cloud) DeletePublicIP(service *v1.Service, pipResourceGroup string, pipName string) error {
if az.Config.shouldOmitCloudProviderBackoff() {
ctx, cancel := getContextWithCancel()
defer cancel()
resp, err := az.PublicIPAddressesClient.Delete(ctx, pipResourceGroup, pipName)
return az.processHTTPResponse(service, "DeletePublicIPAddress", resp, err)
}
return az.deletePublicIPWithRetry(service, pipResourceGroup, pipName)
}
// deletePublicIPWithRetry invokes az.PublicIPAddressesClient.Delete with exponential backoff retry
func (az *Cloud) deletePublicIPWithRetry(service *v1.Service, pipResourceGroup string, pipName string) error {
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
ctx, cancel := getContextWithCancel()
defer cancel()
resp, err := az.PublicIPAddressesClient.Delete(ctx, pipResourceGroup, pipName)
return az.processHTTPRetryResponse(service, "DeletePublicIPAddress", resp, err)
})
}
// DeleteLB invokes az.LoadBalancerClient.Delete with exponential backoff retry
func (az *Cloud) DeleteLB(service *v1.Service, lbName string) error {
if az.Config.shouldOmitCloudProviderBackoff() {
ctx, cancel := getContextWithCancel()
defer cancel()
resp, err := az.LoadBalancerClient.Delete(ctx, az.ResourceGroup, lbName)
if err == nil {
if isSuccessHTTPResponse(resp) {
// Invalidate the cache right after updating
az.lbCache.Delete(lbName)
} else if resp != nil {
return fmt.Errorf("HTTP response %q", resp.Status)
}
}
return err
}
return az.deleteLBWithRetry(service, lbName)
}
// deleteLBWithRetry invokes az.LoadBalancerClient.Delete with exponential backoff retry
func (az *Cloud) deleteLBWithRetry(service *v1.Service, lbName string) error {
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
ctx, cancel := getContextWithCancel()
defer cancel()
resp, err := az.LoadBalancerClient.Delete(ctx, az.ResourceGroup, lbName)
done, err := az.processHTTPRetryResponse(service, "DeleteLoadBalancer", resp, err)
if done && err == nil {
// Invalidate the cache right after deleting
az.lbCache.Delete(lbName)
}
return done, err
})
}
// CreateOrUpdateRouteTable invokes az.RouteTablesClient.CreateOrUpdate with exponential backoff retry
func (az *Cloud) CreateOrUpdateRouteTable(routeTable network.RouteTable) error {
if az.Config.shouldOmitCloudProviderBackoff() {
ctx, cancel := getContextWithCancel()
defer cancel()
resp, err := az.RouteTablesClient.CreateOrUpdate(ctx, az.RouteTableResourceGroup, az.RouteTableName, routeTable)
return az.processHTTPResponse(nil, "", resp, err)
}
return az.createOrUpdateRouteTableWithRetry(routeTable)
}
// createOrUpdateRouteTableWithRetry invokes az.RouteTablesClient.CreateOrUpdate with exponential backoff retry
func (az *Cloud) createOrUpdateRouteTableWithRetry(routeTable network.RouteTable) error {
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
ctx, cancel := getContextWithCancel()
defer cancel()
resp, err := az.RouteTablesClient.CreateOrUpdate(ctx, az.RouteTableResourceGroup, az.RouteTableName, routeTable)
return az.processHTTPRetryResponse(nil, "", resp, err)
})
}
// CreateOrUpdateRoute invokes az.RoutesClient.CreateOrUpdate with exponential backoff retry
func (az *Cloud) CreateOrUpdateRoute(route network.Route) error {
if az.Config.shouldOmitCloudProviderBackoff() {
ctx, cancel := getContextWithCancel()
defer cancel()
resp, err := az.RoutesClient.CreateOrUpdate(ctx, az.RouteTableResourceGroup, az.RouteTableName, *route.Name, route)
klog.V(10).Infof("RoutesClient.CreateOrUpdate(%s): end", *route.Name)
return az.processHTTPResponse(nil, "", resp, err)
}
return az.createOrUpdateRouteWithRetry(route)
}
// createOrUpdateRouteWithRetry invokes az.RoutesClient.CreateOrUpdate with exponential backoff retry
func (az *Cloud) createOrUpdateRouteWithRetry(route network.Route) error {
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
ctx, cancel := getContextWithCancel()
defer cancel()
resp, err := az.RoutesClient.CreateOrUpdate(ctx, az.RouteTableResourceGroup, az.RouteTableName, *route.Name, route)
klog.V(10).Infof("RoutesClient.CreateOrUpdate(%s): end", *route.Name)
return az.processHTTPRetryResponse(nil, "", resp, err)
})
}
// DeleteRouteWithName invokes az.RoutesClient.CreateOrUpdate with exponential backoff retry
func (az *Cloud) DeleteRouteWithName(routeName string) error {
if az.Config.shouldOmitCloudProviderBackoff() {
ctx, cancel := getContextWithCancel()
defer cancel()
resp, err := az.RoutesClient.Delete(ctx, az.RouteTableResourceGroup, az.RouteTableName, routeName)
klog.V(10).Infof("RoutesClient.Delete(%s,%s): end", az.RouteTableName, routeName)
return az.processHTTPResponse(nil, "", resp, err)
}
return az.deleteRouteWithRetry(routeName)
}
// deleteRouteWithRetry invokes az.RoutesClient.Delete with exponential backoff retry
func (az *Cloud) deleteRouteWithRetry(routeName string) error {
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
ctx, cancel := getContextWithCancel()
defer cancel()
resp, err := az.RoutesClient.Delete(ctx, az.RouteTableResourceGroup, az.RouteTableName, routeName)
klog.V(10).Infof("RoutesClient.Delete(%s,%s): end", az.RouteTableName, routeName)
return az.processHTTPRetryResponse(nil, "", resp, err)
})
}
// CreateOrUpdateVMWithRetry invokes az.VirtualMachinesClient.CreateOrUpdate with exponential backoff retry
func (az *Cloud) CreateOrUpdateVMWithRetry(resourceGroup, vmName string, newVM compute.VirtualMachine) error {
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
ctx, cancel := getContextWithCancel()
defer cancel()
resp, err := az.VirtualMachinesClient.CreateOrUpdate(ctx, resourceGroup, vmName, newVM)
klog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s): end", vmName)
return az.processHTTPRetryResponse(nil, "", resp, err)
})
}
// UpdateVmssVMWithRetry invokes az.VirtualMachineScaleSetVMsClient.Update with exponential backoff retry
func (az *Cloud) UpdateVmssVMWithRetry(resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM) error {
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
ctx, cancel := getContextWithCancel()
defer cancel()
resp, err := az.VirtualMachineScaleSetVMsClient.Update(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters)
klog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s,%s): end", VMScaleSetName, instanceID)
return az.processHTTPRetryResponse(nil, "", resp, err)
})
}
// isSuccessHTTPResponse determines if the response from an HTTP request suggests success
func isSuccessHTTPResponse(resp *http.Response) bool {
if resp == nil {
return false
}
// HTTP 2xx suggests a successful response
if 199 < resp.StatusCode && resp.StatusCode < 300 {
return true
}
return false
}
func shouldRetryHTTPRequest(resp *http.Response, err error) bool {
if err != nil {
return true
}
if resp != nil {
// HTTP 4xx or 5xx suggests we should retry
if 399 < resp.StatusCode && resp.StatusCode < 600 {
return true
}
}
return false
}
func (az *Cloud) processHTTPRetryResponse(service *v1.Service, reason string, resp *http.Response, err error) (bool, error) {
if resp != nil && isSuccessHTTPResponse(resp) {
// HTTP 2xx suggests a successful response
return true, nil
}
if shouldRetryHTTPRequest(resp, err) {
if err != nil {
az.Event(service, v1.EventTypeWarning, reason, err.Error())
klog.Errorf("processHTTPRetryResponse: backoff failure, will retry, err=%v", err)
} else {
az.Event(service, v1.EventTypeWarning, reason, fmt.Sprintf("Azure HTTP response %d", resp.StatusCode))
klog.Errorf("processHTTPRetryResponse: backoff failure, will retry, HTTP response=%d", resp.StatusCode)
}
// suppress the error object so that backoff process continues
return false, nil
}
// Fall-through: stop periodic backoff
return true, nil
}
func (az *Cloud) processHTTPResponse(service *v1.Service, reason string, resp *http.Response, err error) error {
if isSuccessHTTPResponse(resp) {
// HTTP 2xx suggests a successful response
return nil
}
if err != nil {
az.Event(service, v1.EventTypeWarning, reason, err.Error())
klog.Errorf("processHTTPRetryResponse failure with err: %v", err)
} else if resp != nil {
az.Event(service, v1.EventTypeWarning, reason, fmt.Sprintf("Azure HTTP response %d", resp.StatusCode))
klog.Errorf("processHTTPRetryResponse failure with HTTP response %q", resp.Status)
}
return err
}
func (cfg *Config) shouldOmitCloudProviderBackoff() bool {
return cfg.CloudProviderBackoffMode == backoffModeV2
}

View File

@@ -1,140 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"fmt"
"net/http"
"testing"
)
func TestShouldRetryHTTPRequest(t *testing.T) {
tests := []struct {
code int
err error
expected bool
}{
{
code: http.StatusBadRequest,
expected: true,
},
{
code: http.StatusInternalServerError,
expected: true,
},
{
code: http.StatusOK,
err: fmt.Errorf("some error"),
expected: true,
},
{
code: http.StatusOK,
expected: false,
},
{
code: 399,
expected: false,
},
}
for _, test := range tests {
resp := &http.Response{
StatusCode: test.code,
}
res := shouldRetryHTTPRequest(resp, test.err)
if res != test.expected {
t.Errorf("expected: %v, saw: %v", test.expected, res)
}
}
}
func TestIsSuccessResponse(t *testing.T) {
tests := []struct {
code int
expected bool
}{
{
code: http.StatusNotFound,
expected: false,
},
{
code: http.StatusInternalServerError,
expected: false,
},
{
code: http.StatusOK,
expected: true,
},
}
for _, test := range tests {
resp := http.Response{
StatusCode: test.code,
}
res := isSuccessHTTPResponse(&resp)
if res != test.expected {
t.Errorf("expected: %v, saw: %v", test.expected, res)
}
}
}
func TestProcessRetryResponse(t *testing.T) {
az := &Cloud{}
tests := []struct {
code int
err error
stop bool
}{
{
code: http.StatusBadRequest,
stop: false,
},
{
code: http.StatusInternalServerError,
stop: false,
},
{
code: http.StatusSeeOther,
err: fmt.Errorf("some error"),
stop: false,
},
{
code: http.StatusSeeOther,
stop: true,
},
{
code: http.StatusOK,
stop: true,
},
{
code: 399,
stop: true,
},
}
for _, test := range tests {
resp := &http.Response{
StatusCode: test.code,
}
res, err := az.processHTTPRetryResponse(nil, "", resp, test.err)
if res != test.stop {
t.Errorf("expected: %v, saw: %v", test.stop, res)
}
if err != nil {
t.Errorf("unexpected error: %v", err)
}
}
}

View File

@@ -1,640 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"bytes"
"context"
"encoding/binary"
"fmt"
"net/url"
"regexp"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage"
azstorage "github.com/Azure/azure-sdk-for-go/storage"
"github.com/Azure/go-autorest/autorest/to"
"github.com/rubiojr/go-vhd/vhd"
kwait "k8s.io/apimachinery/pkg/util/wait"
volerr "k8s.io/cloud-provider/volume/errors"
"k8s.io/klog"
)
const (
vhdContainerName = "vhds"
useHTTPSForBlobBasedDisk = true
blobServiceName = "blob"
)
type storageAccountState struct {
name string
saType storage.SkuName
key string
diskCount int32
isValidating int32
defaultContainerCreated bool
}
//BlobDiskController : blob disk controller struct
type BlobDiskController struct {
common *controllerCommon
accounts map[string]*storageAccountState
}
var (
accountsLock = &sync.Mutex{}
)
func (c *BlobDiskController) initStorageAccounts() {
accountsLock.Lock()
defer accountsLock.Unlock()
if c.accounts == nil {
// get accounts
accounts, err := c.getAllStorageAccounts()
if err != nil {
klog.Errorf("azureDisk - getAllStorageAccounts error: %v", err)
c.accounts = make(map[string]*storageAccountState)
}
c.accounts = accounts
}
}
// CreateVolume creates a VHD blob in a storage account that has storageType and location using the given storage account.
// If no storage account is given, search all the storage accounts associated with the resource group and pick one that
// fits storage type and location.
func (c *BlobDiskController) CreateVolume(blobName, accountName, accountType, location string, requestGB int) (string, string, int, error) {
account, key, err := c.common.cloud.EnsureStorageAccount(accountName, accountType, string(defaultStorageAccountKind), c.common.resourceGroup, location, dedicatedDiskAccountNamePrefix)
if err != nil {
return "", "", 0, fmt.Errorf("could not get storage key for storage account %s: %v", accountName, err)
}
client, err := azstorage.NewBasicClientOnSovereignCloud(account, key, c.common.cloud.Environment)
if err != nil {
return "", "", 0, err
}
blobClient := client.GetBlobService()
// create a page blob in this account's vhd container
diskName, diskURI, err := c.createVHDBlobDisk(blobClient, account, blobName, vhdContainerName, int64(requestGB))
if err != nil {
return "", "", 0, err
}
klog.V(4).Infof("azureDisk - created vhd blob uri: %s", diskURI)
return diskName, diskURI, requestGB, err
}
// DeleteVolume deletes a VHD blob
func (c *BlobDiskController) DeleteVolume(diskURI string) error {
klog.V(4).Infof("azureDisk - begin to delete volume %s", diskURI)
accountName, blob, err := c.common.cloud.getBlobNameAndAccountFromURI(diskURI)
if err != nil {
return fmt.Errorf("failed to parse vhd URI %v", err)
}
key, err := c.common.cloud.GetStorageAccesskey(accountName, c.common.resourceGroup)
if err != nil {
return fmt.Errorf("no key for storage account %s, err %v", accountName, err)
}
err = c.common.cloud.deleteVhdBlob(accountName, key, blob)
if err != nil {
klog.Warningf("azureDisk - failed to delete blob %s err: %v", diskURI, err)
detail := err.Error()
if strings.Contains(detail, errLeaseIDMissing) {
// disk is still being used
// see https://msdn.microsoft.com/en-us/library/microsoft.windowsazure.storage.blob.protocol.bloberrorcodestrings.leaseidmissing.aspx
return volerr.NewDeletedVolumeInUseError(fmt.Sprintf("disk %q is still in use while being deleted", diskURI))
}
return fmt.Errorf("failed to delete vhd %v, account %s, blob %s, err: %v", diskURI, accountName, blob, err)
}
klog.V(4).Infof("azureDisk - blob %s deleted", diskURI)
return nil
}
// get diskURI https://foo.blob.core.windows.net/vhds/bar.vhd and return foo (account) and bar.vhd (blob name)
func (c *BlobDiskController) getBlobNameAndAccountFromURI(diskURI string) (string, string, error) {
scheme := "http"
if useHTTPSForBlobBasedDisk {
scheme = "https"
}
host := fmt.Sprintf("%s://(.*).%s.%s", scheme, blobServiceName, c.common.storageEndpointSuffix)
reStr := fmt.Sprintf("%s/%s/(.*)", host, vhdContainerName)
re := regexp.MustCompile(reStr)
res := re.FindSubmatch([]byte(diskURI))
if len(res) < 3 {
return "", "", fmt.Errorf("invalid vhd URI for regex %s: %s", reStr, diskURI)
}
return string(res[1]), string(res[2]), nil
}
func (c *BlobDiskController) createVHDBlobDisk(blobClient azstorage.BlobStorageClient, accountName, vhdName, containerName string, sizeGB int64) (string, string, error) {
container := blobClient.GetContainerReference(containerName)
size := 1024 * 1024 * 1024 * sizeGB
vhdSize := size + vhd.VHD_HEADER_SIZE /* header size */
// Blob name in URL must end with '.vhd' extension.
vhdName = vhdName + ".vhd"
tags := make(map[string]string)
tags["createdby"] = "k8sAzureDataDisk"
klog.V(4).Infof("azureDisk - creating page blob %s in container %s account %s", vhdName, containerName, accountName)
blob := container.GetBlobReference(vhdName)
blob.Properties.ContentLength = vhdSize
blob.Metadata = tags
err := blob.PutPageBlob(nil)
if err != nil {
// if container doesn't exist, create one and retry PutPageBlob
detail := err.Error()
if strings.Contains(detail, errContainerNotFound) {
err = container.Create(&azstorage.CreateContainerOptions{Access: azstorage.ContainerAccessTypePrivate})
if err == nil {
err = blob.PutPageBlob(nil)
}
}
}
if err != nil {
return "", "", fmt.Errorf("failed to put page blob %s in container %s: %v", vhdName, containerName, err)
}
// add VHD signature to the blob
h, err := createVHDHeader(uint64(size))
if err != nil {
blob.DeleteIfExists(nil)
return "", "", fmt.Errorf("failed to create vhd header, err: %v", err)
}
blobRange := azstorage.BlobRange{
Start: uint64(size),
End: uint64(vhdSize - 1),
}
if err = blob.WriteRange(blobRange, bytes.NewBuffer(h[:vhd.VHD_HEADER_SIZE]), nil); err != nil {
klog.Infof("azureDisk - failed to put header page for data disk %s in container %s account %s, error was %s\n",
vhdName, containerName, accountName, err.Error())
return "", "", err
}
scheme := "http"
if useHTTPSForBlobBasedDisk {
scheme = "https"
}
host := fmt.Sprintf("%s://%s.%s.%s", scheme, accountName, blobServiceName, c.common.storageEndpointSuffix)
uri := fmt.Sprintf("%s/%s/%s", host, containerName, vhdName)
return vhdName, uri, nil
}
// delete a vhd blob
func (c *BlobDiskController) deleteVhdBlob(accountName, accountKey, blobName string) error {
client, err := azstorage.NewBasicClientOnSovereignCloud(accountName, accountKey, c.common.cloud.Environment)
if err != nil {
return err
}
blobSvc := client.GetBlobService()
container := blobSvc.GetContainerReference(vhdContainerName)
blob := container.GetBlobReference(blobName)
return blob.Delete(nil)
}
//CreateBlobDisk : create a blob disk in a node
func (c *BlobDiskController) CreateBlobDisk(dataDiskName string, storageAccountType storage.SkuName, sizeGB int) (string, error) {
klog.V(4).Infof("azureDisk - creating blob data disk named:%s on StorageAccountType:%s", dataDiskName, storageAccountType)
c.initStorageAccounts()
storageAccountName, err := c.findSANameForDisk(storageAccountType)
if err != nil {
return "", err
}
blobClient, err := c.getBlobSvcClient(storageAccountName)
if err != nil {
return "", err
}
_, diskURI, err := c.createVHDBlobDisk(blobClient, storageAccountName, dataDiskName, vhdContainerName, int64(sizeGB))
if err != nil {
return "", err
}
atomic.AddInt32(&c.accounts[storageAccountName].diskCount, 1)
return diskURI, nil
}
//DeleteBlobDisk : delete a blob disk from a node
func (c *BlobDiskController) DeleteBlobDisk(diskURI string) error {
storageAccountName, vhdName, err := diskNameandSANameFromURI(diskURI)
if err != nil {
return err
}
_, ok := c.accounts[storageAccountName]
if !ok {
// the storage account is specified by user
klog.V(4).Infof("azureDisk - deleting volume %s", diskURI)
return c.DeleteVolume(diskURI)
}
blobSvc, err := c.getBlobSvcClient(storageAccountName)
if err != nil {
return err
}
klog.V(4).Infof("azureDisk - About to delete vhd file %s on storage account %s container %s", vhdName, storageAccountName, vhdContainerName)
container := blobSvc.GetContainerReference(vhdContainerName)
blob := container.GetBlobReference(vhdName)
_, err = blob.DeleteIfExists(nil)
if c.accounts[storageAccountName].diskCount == -1 {
if diskCount, err := c.getDiskCount(storageAccountName); err != nil {
c.accounts[storageAccountName].diskCount = int32(diskCount)
} else {
klog.Warningf("azureDisk - failed to get disk count for %s however the delete disk operation was ok", storageAccountName)
return nil // we have failed to acquire a new count. not an error condition
}
}
atomic.AddInt32(&c.accounts[storageAccountName].diskCount, -1)
return err
}
func (c *BlobDiskController) getStorageAccountKey(SAName string) (string, error) {
if account, exists := c.accounts[SAName]; exists && account.key != "" {
return c.accounts[SAName].key, nil
}
ctx, cancel := getContextWithCancel()
defer cancel()
listKeysResult, err := c.common.cloud.StorageAccountClient.ListKeys(ctx, c.common.resourceGroup, SAName)
if err != nil {
return "", err
}
if listKeysResult.Keys == nil {
return "", fmt.Errorf("azureDisk - empty listKeysResult in storage account:%s keys", SAName)
}
for _, v := range *listKeysResult.Keys {
if v.Value != nil && *v.Value == "key1" {
if _, ok := c.accounts[SAName]; !ok {
klog.Warningf("azureDisk - account %s was not cached while getting keys", SAName)
return *v.Value, nil
}
}
c.accounts[SAName].key = *v.Value
return c.accounts[SAName].key, nil
}
return "", fmt.Errorf("couldn't find key named key1 in storage account:%s keys", SAName)
}
func (c *BlobDiskController) getBlobSvcClient(SAName string) (azstorage.BlobStorageClient, error) {
key := ""
var client azstorage.Client
var blobSvc azstorage.BlobStorageClient
var err error
if key, err = c.getStorageAccountKey(SAName); err != nil {
return blobSvc, err
}
if client, err = azstorage.NewBasicClientOnSovereignCloud(SAName, key, c.common.cloud.Environment); err != nil {
return blobSvc, err
}
blobSvc = client.GetBlobService()
return blobSvc, nil
}
func (c *BlobDiskController) ensureDefaultContainer(storageAccountName string) error {
var err error
var blobSvc azstorage.BlobStorageClient
// short circuit the check via local cache
// we are forgiving the fact that account may not be in cache yet
if v, ok := c.accounts[storageAccountName]; ok && v.defaultContainerCreated {
return nil
}
// not cached, check existence and readiness
bExist, provisionState, _ := c.getStorageAccountState(storageAccountName)
// account does not exist
if !bExist {
return fmt.Errorf("azureDisk - account %s does not exist while trying to create/ensure default container", storageAccountName)
}
// account exists but not ready yet
if provisionState != storage.Succeeded {
// we don't want many attempts to validate the account readiness
// here hence we are locking
counter := 1
for swapped := atomic.CompareAndSwapInt32(&c.accounts[storageAccountName].isValidating, 0, 1); swapped != true; {
time.Sleep(3 * time.Second)
counter = counter + 1
// check if we passed the max sleep
if counter >= 20 {
return fmt.Errorf("azureDisk - timeout waiting to acquire lock to validate account:%s readiness", storageAccountName)
}
}
// swapped
defer func() {
c.accounts[storageAccountName].isValidating = 0
}()
// short circuit the check again.
if v, ok := c.accounts[storageAccountName]; ok && v.defaultContainerCreated {
return nil
}
err = kwait.ExponentialBackoff(defaultBackOff, func() (bool, error) {
_, provisionState, err := c.getStorageAccountState(storageAccountName)
if err != nil {
klog.V(4).Infof("azureDisk - GetStorageAccount:%s err %s", storageAccountName, err.Error())
return false, nil // error performing the query - retryable
}
if provisionState == storage.Succeeded {
return true, nil
}
klog.V(4).Infof("azureDisk - GetStorageAccount:%s not ready yet (not flagged Succeeded by ARM)", storageAccountName)
return false, nil // back off and see if the account becomes ready on next retry
})
// we have failed to ensure that account is ready for us to create
// the default vhd container
if err != nil {
if err == kwait.ErrWaitTimeout {
return fmt.Errorf("azureDisk - timed out waiting for storage account %s to become ready", storageAccountName)
}
return err
}
}
if blobSvc, err = c.getBlobSvcClient(storageAccountName); err != nil {
return err
}
container := blobSvc.GetContainerReference(vhdContainerName)
bCreated, err := container.CreateIfNotExists(&azstorage.CreateContainerOptions{Access: azstorage.ContainerAccessTypePrivate})
if err != nil {
return err
}
if bCreated {
klog.V(2).Infof("azureDisk - storage account:%s had no default container(%s) and it was created \n", storageAccountName, vhdContainerName)
}
// flag so we no longer have to check on ARM
c.accounts[storageAccountName].defaultContainerCreated = true
return nil
}
// Gets Disk counts per storage account
func (c *BlobDiskController) getDiskCount(SAName string) (int, error) {
// if we have it in cache
if c.accounts[SAName].diskCount != -1 {
return int(c.accounts[SAName].diskCount), nil
}
var err error
var blobSvc azstorage.BlobStorageClient
if err = c.ensureDefaultContainer(SAName); err != nil {
return 0, err
}
if blobSvc, err = c.getBlobSvcClient(SAName); err != nil {
return 0, err
}
params := azstorage.ListBlobsParameters{}
container := blobSvc.GetContainerReference(vhdContainerName)
response, err := container.ListBlobs(params)
if err != nil {
return 0, err
}
klog.V(4).Infof("azure-Disk - refreshed data count for account %s and found %v", SAName, len(response.Blobs))
c.accounts[SAName].diskCount = int32(len(response.Blobs))
return int(c.accounts[SAName].diskCount), nil
}
func (c *BlobDiskController) getAllStorageAccounts() (map[string]*storageAccountState, error) {
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
accountListResult, err := c.common.cloud.StorageAccountClient.ListByResourceGroup(ctx, c.common.resourceGroup)
if err != nil {
return nil, err
}
if accountListResult.Value == nil {
return nil, fmt.Errorf("azureDisk - empty accountListResult")
}
accounts := make(map[string]*storageAccountState)
for _, v := range *accountListResult.Value {
if v.Name == nil || v.Sku == nil {
klog.Info("azureDisk - accountListResult Name or Sku is nil")
continue
}
if !strings.HasPrefix(*v.Name, sharedDiskAccountNamePrefix) {
continue
}
klog.Infof("azureDisk - identified account %s as part of shared PVC accounts", *v.Name)
sastate := &storageAccountState{
name: *v.Name,
saType: (*v.Sku).Name,
diskCount: -1,
}
accounts[*v.Name] = sastate
}
return accounts, nil
}
func (c *BlobDiskController) createStorageAccount(storageAccountName string, storageAccountType storage.SkuName, location string, checkMaxAccounts bool) error {
bExist, _, _ := c.getStorageAccountState(storageAccountName)
if bExist {
newAccountState := &storageAccountState{
diskCount: -1,
saType: storageAccountType,
name: storageAccountName,
}
c.addAccountState(storageAccountName, newAccountState)
}
// Account Does not exist
if !bExist {
if len(c.accounts) == maxStorageAccounts && checkMaxAccounts {
return fmt.Errorf("azureDisk - can not create new storage account, current storage accounts count:%v Max is:%v", len(c.accounts), maxStorageAccounts)
}
klog.V(2).Infof("azureDisk - Creating storage account %s type %s", storageAccountName, string(storageAccountType))
cp := storage.AccountCreateParameters{
Sku: &storage.Sku{Name: storageAccountType},
// switch to use StorageV2 as it's recommended according to https://docs.microsoft.com/en-us/azure/storage/common/storage-account-options
Kind: defaultStorageAccountKind,
Tags: map[string]*string{"created-by": to.StringPtr("azure-dd")},
Location: &location}
ctx, cancel := getContextWithCancel()
defer cancel()
_, err := c.common.cloud.StorageAccountClient.Create(ctx, c.common.resourceGroup, storageAccountName, cp)
if err != nil {
return fmt.Errorf(fmt.Sprintf("Create Storage Account: %s, error: %s", storageAccountName, err))
}
newAccountState := &storageAccountState{
diskCount: -1,
saType: storageAccountType,
name: storageAccountName,
}
c.addAccountState(storageAccountName, newAccountState)
}
// finally, make sure that we default container is created
// before handing it back over
return c.ensureDefaultContainer(storageAccountName)
}
// finds a new suitable storageAccount for this disk
func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuName) (string, error) {
maxDiskCount := maxDisksPerStorageAccounts
SAName := ""
totalDiskCounts := 0
countAccounts := 0 // account of this type.
for _, v := range c.accounts {
// filter out any stand-alone disks/accounts
if !strings.HasPrefix(v.name, sharedDiskAccountNamePrefix) {
continue
}
// note: we compute avg stratified by type.
// this is to enable user to grow per SA type to avoid low
// avg utilization on one account type skewing all data.
if v.saType == storageAccountType {
// compute average
dCount, err := c.getDiskCount(v.name)
if err != nil {
return "", err
}
totalDiskCounts = totalDiskCounts + dCount
countAccounts = countAccounts + 1
// empty account
if dCount == 0 {
klog.V(2).Infof("azureDisk - account %s identified for a new disk is because it has 0 allocated disks", v.name)
return v.name, nil // short circuit, avg is good and no need to adjust
}
// if this account is less allocated
if dCount < maxDiskCount {
maxDiskCount = dCount
SAName = v.name
}
}
}
// if we failed to find storageaccount
if SAName == "" {
klog.V(2).Infof("azureDisk - failed to identify a suitable account for new disk and will attempt to create new account")
SAName = generateStorageAccountName(sharedDiskAccountNamePrefix)
err := c.createStorageAccount(SAName, storageAccountType, c.common.location, true)
if err != nil {
return "", err
}
return SAName, nil
}
disksAfter := totalDiskCounts + 1 // with the new one!
avgUtilization := float64(disksAfter) / float64(countAccounts*maxDisksPerStorageAccounts)
aboveAvg := (avgUtilization > storageAccountUtilizationBeforeGrowing)
// avg are not create and we should create more accounts if we can
if aboveAvg && countAccounts < maxStorageAccounts {
klog.V(2).Infof("azureDisk - shared storageAccounts utilization(%v) > grow-at-avg-utilization (%v). New storage account will be created", avgUtilization, storageAccountUtilizationBeforeGrowing)
SAName = generateStorageAccountName(sharedDiskAccountNamePrefix)
err := c.createStorageAccount(SAName, storageAccountType, c.common.location, true)
if err != nil {
return "", err
}
return SAName, nil
}
// averages are not ok and we are at capacity (max storage accounts allowed)
if aboveAvg && countAccounts == maxStorageAccounts {
klog.Infof("azureDisk - shared storageAccounts utilization(%v) > grow-at-avg-utilization (%v). But k8s maxed on SAs for PVC(%v). k8s will now exceed grow-at-avg-utilization without adding accounts",
avgUtilization, storageAccountUtilizationBeforeGrowing, maxStorageAccounts)
}
// we found a storage accounts && [ avg are ok || we reached max sa count ]
return SAName, nil
}
//Gets storage account exist, provisionStatus, Error if any
func (c *BlobDiskController) getStorageAccountState(storageAccountName string) (bool, storage.ProvisioningState, error) {
ctx, cancel := getContextWithCancel()
defer cancel()
account, err := c.common.cloud.StorageAccountClient.GetProperties(ctx, c.common.resourceGroup, storageAccountName)
if err != nil {
return false, "", err
}
return true, account.AccountProperties.ProvisioningState, nil
}
func (c *BlobDiskController) addAccountState(key string, state *storageAccountState) {
accountsLock.Lock()
defer accountsLock.Unlock()
if _, ok := c.accounts[key]; !ok {
c.accounts[key] = state
}
}
func createVHDHeader(size uint64) ([]byte, error) {
h := vhd.CreateFixedHeader(size, &vhd.VHDOptions{})
b := new(bytes.Buffer)
err := binary.Write(b, binary.BigEndian, h)
if err != nil {
return nil, err
}
return b.Bytes(), nil
}
func diskNameandSANameFromURI(diskURI string) (string, string, error) {
uri, err := url.Parse(diskURI)
if err != nil {
return "", "", err
}
hostName := uri.Host
storageAccountName := strings.Split(hostName, ".")[0]
segments := strings.Split(uri.Path, "/")
diskNameVhd := segments[len(segments)-1]
return storageAccountName, diskNameVhd, nil
}

View File

@@ -1,124 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"fmt"
"sync"
"time"
"k8s.io/client-go/tools/cache"
)
// getFunc defines a getter function for timedCache.
type getFunc func(key string) (interface{}, error)
// cacheEntry is the internal structure stores inside TTLStore.
type cacheEntry struct {
key string
data interface{}
// The lock to ensure not updating same entry simultaneously.
lock sync.Mutex
}
// cacheKeyFunc defines the key function required in TTLStore.
func cacheKeyFunc(obj interface{}) (string, error) {
return obj.(*cacheEntry).key, nil
}
// timedCache is a cache with TTL.
type timedCache struct {
store cache.Store
lock sync.Mutex
getter getFunc
}
// newTimedcache creates a new timedCache.
func newTimedcache(ttl time.Duration, getter getFunc) (*timedCache, error) {
if getter == nil {
return nil, fmt.Errorf("getter is not provided")
}
return &timedCache{
getter: getter,
store: cache.NewTTLStore(cacheKeyFunc, ttl),
}, nil
}
// getInternal returns cacheEntry by key. If the key is not cached yet,
// it returns a cacheEntry with nil data.
func (t *timedCache) getInternal(key string) (*cacheEntry, error) {
entry, exists, err := t.store.GetByKey(key)
if err != nil {
return nil, err
}
if exists {
return entry.(*cacheEntry), nil
}
t.lock.Lock()
defer t.lock.Unlock()
entry, exists, err = t.store.GetByKey(key)
if err != nil {
return nil, err
}
if exists {
return entry.(*cacheEntry), nil
}
// Still not found, add new entry with nil data.
// Note the data will be filled later by getter.
newEntry := &cacheEntry{
key: key,
data: nil,
}
t.store.Add(newEntry)
return newEntry, nil
}
// Get returns the requested item by key.
func (t *timedCache) Get(key string) (interface{}, error) {
entry, err := t.getInternal(key)
if err != nil {
return nil, err
}
// Data is still not cached yet, cache it by getter.
if entry.data == nil {
entry.lock.Lock()
defer entry.lock.Unlock()
if entry.data == nil {
data, err := t.getter(key)
if err != nil {
return nil, err
}
entry.data = data
}
}
return entry.data, nil
}
// Delete removes an item from the cache.
func (t *timedCache) Delete(key string) error {
return t.store.Delete(&cacheEntry{
key: key,
})
}

View File

@@ -1,160 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"fmt"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
var (
fakeCacheTTL = 2 * time.Second
)
type fakeDataObj struct{}
type fakeDataSource struct {
called int
data map[string]*fakeDataObj
lock sync.Mutex
}
func (fake *fakeDataSource) get(key string) (interface{}, error) {
fake.lock.Lock()
defer fake.lock.Unlock()
fake.called = fake.called + 1
if v, ok := fake.data[key]; ok {
return v, nil
}
return nil, nil
}
func (fake *fakeDataSource) set(data map[string]*fakeDataObj) {
fake.lock.Lock()
defer fake.lock.Unlock()
fake.data = data
fake.called = 0
}
func newFakeCache(t *testing.T) (*fakeDataSource, *timedCache) {
dataSource := &fakeDataSource{
data: make(map[string]*fakeDataObj),
}
getter := dataSource.get
cache, err := newTimedcache(fakeCacheTTL, getter)
assert.NoError(t, err)
return dataSource, cache
}
func TestCacheGet(t *testing.T) {
val := &fakeDataObj{}
cases := []struct {
name string
data map[string]*fakeDataObj
key string
expected interface{}
}{
{
name: "cache should return nil for empty data source",
key: "key1",
expected: nil,
},
{
name: "cache should return nil for non exist key",
data: map[string]*fakeDataObj{"key2": val},
key: "key1",
expected: nil,
},
{
name: "cache should return data for existing key",
data: map[string]*fakeDataObj{"key1": val},
key: "key1",
expected: val,
},
}
for _, c := range cases {
dataSource, cache := newFakeCache(t)
dataSource.set(c.data)
val, err := cache.Get(c.key)
assert.NoError(t, err, c.name)
assert.Equal(t, c.expected, val, c.name)
}
}
func TestCacheGetError(t *testing.T) {
getError := fmt.Errorf("getError")
getter := func(key string) (interface{}, error) {
return nil, getError
}
cache, err := newTimedcache(fakeCacheTTL, getter)
assert.NoError(t, err)
val, err := cache.Get("key")
assert.Error(t, err)
assert.Equal(t, getError, err)
assert.Nil(t, val)
}
func TestCacheDelete(t *testing.T) {
key := "key1"
val := &fakeDataObj{}
data := map[string]*fakeDataObj{
key: val,
}
dataSource, cache := newFakeCache(t)
dataSource.set(data)
v, err := cache.Get(key)
assert.NoError(t, err)
assert.Equal(t, val, v, "cache should get correct data")
dataSource.set(nil)
cache.Delete(key)
v, err = cache.Get(key)
assert.NoError(t, err)
assert.Equal(t, 1, dataSource.called)
assert.Equal(t, nil, v, "cache should get nil after data is removed")
}
func TestCacheExpired(t *testing.T) {
key := "key1"
val := &fakeDataObj{}
data := map[string]*fakeDataObj{
key: val,
}
dataSource, cache := newFakeCache(t)
dataSource.set(data)
v, err := cache.Get(key)
assert.NoError(t, err)
assert.Equal(t, 1, dataSource.called)
assert.Equal(t, val, v, "cache should get correct data")
time.Sleep(fakeCacheTTL)
v, err = cache.Get(key)
assert.NoError(t, err)
assert.Equal(t, 2, dataSource.called)
assert.Equal(t, val, v, "cache should get correct data even after expired")
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,241 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"context"
"fmt"
"time"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute"
"k8s.io/klog"
"k8s.io/apimachinery/pkg/types"
kwait "k8s.io/apimachinery/pkg/util/wait"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/utils/keymutex"
)
const (
storageAccountNameTemplate = "pvc%s"
// for limits check https://docs.microsoft.com/en-us/azure/azure-subscription-service-limits#storage-limits
maxStorageAccounts = 100 // max # is 200 (250 with special request). this allows 100 for everything else including stand alone disks
maxDisksPerStorageAccounts = 60
storageAccountUtilizationBeforeGrowing = 0.5
maxLUN = 64 // max number of LUNs per VM
errLeaseFailed = "AcquireDiskLeaseFailed"
errLeaseIDMissing = "LeaseIdMissing"
errContainerNotFound = "ContainerNotFound"
errDiskBlobNotFound = "DiskBlobNotFound"
)
var defaultBackOff = kwait.Backoff{
Steps: 20,
Duration: 2 * time.Second,
Factor: 1.5,
Jitter: 0.0,
}
// acquire lock to attach/detach disk in one node
var diskOpMutex = keymutex.NewHashed(0)
type controllerCommon struct {
subscriptionID string
location string
storageEndpointSuffix string
resourceGroup string
cloud *Cloud
}
// getNodeVMSet gets the VMSet interface based on config.VMType and the real virtual machine type.
func (c *controllerCommon) getNodeVMSet(nodeName types.NodeName) (VMSet, error) {
// 1. vmType is standard, return cloud.vmSet directly.
if c.cloud.VMType == vmTypeStandard {
return c.cloud.vmSet, nil
}
// 2. vmType is Virtual Machine Scale Set (vmss), convert vmSet to scaleSet.
ss, ok := c.cloud.vmSet.(*scaleSet)
if !ok {
return nil, fmt.Errorf("error of converting vmSet (%q) to scaleSet with vmType %q", c.cloud.vmSet, c.cloud.VMType)
}
// 3. If the node is managed by availability set, then return ss.availabilitySet.
managedByAS, err := ss.isNodeManagedByAvailabilitySet(mapNodeNameToVMName(nodeName))
if err != nil {
return nil, err
}
if managedByAS {
// vm is managed by availability set.
return ss.availabilitySet, nil
}
// 4. Node is managed by vmss
return ss, nil
}
// AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI.
func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, cachingMode compute.CachingTypes) error {
vmset, err := c.getNodeVMSet(nodeName)
if err != nil {
return err
}
instanceid, err := c.cloud.InstanceID(context.TODO(), nodeName)
if err != nil {
klog.Warningf("failed to get azure instance id (%v)", err)
return fmt.Errorf("failed to get azure instance id for node %q (%v)", nodeName, err)
}
diskOpMutex.LockKey(instanceid)
defer diskOpMutex.UnlockKey(instanceid)
lun, err := c.GetNextDiskLun(nodeName)
if err != nil {
klog.Warningf("no LUN available for instance %q (%v)", nodeName, err)
return fmt.Errorf("all LUNs are used, cannot attach volume (%s, %s) to instance %q (%v)", diskName, diskURI, instanceid, err)
}
klog.V(2).Infof("Trying to attach volume %q lun %d to node %q.", diskURI, lun, nodeName)
return vmset.AttachDisk(isManagedDisk, diskName, diskURI, nodeName, lun, cachingMode)
}
// DetachDisk detaches a disk from host. The vhd can be identified by diskName or diskURI.
func (c *controllerCommon) DetachDisk(diskName, diskURI string, nodeName types.NodeName) error {
vmset, err := c.getNodeVMSet(nodeName)
if err != nil {
return err
}
instanceid, err := c.cloud.InstanceID(context.TODO(), nodeName)
if err != nil {
klog.Warningf("failed to get azure instance id (%v)", err)
return fmt.Errorf("failed to get azure instance id for node %q (%v)", nodeName, err)
}
klog.V(2).Infof("detach %v from node %q", diskURI, nodeName)
// make the lock here as small as possible
diskOpMutex.LockKey(instanceid)
resp, err := vmset.DetachDisk(diskName, diskURI, nodeName)
diskOpMutex.UnlockKey(instanceid)
if c.cloud.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
klog.V(2).Infof("azureDisk - update backing off: detach disk(%s, %s), err: %v", diskName, diskURI, err)
retryErr := kwait.ExponentialBackoff(c.cloud.requestBackoff(), func() (bool, error) {
diskOpMutex.LockKey(instanceid)
resp, err := vmset.DetachDisk(diskName, diskURI, nodeName)
diskOpMutex.UnlockKey(instanceid)
return c.cloud.processHTTPRetryResponse(nil, "", resp, err)
})
if retryErr != nil {
err = retryErr
klog.V(2).Infof("azureDisk - update abort backoff: detach disk(%s, %s), err: %v", diskName, diskURI, err)
}
}
if err != nil {
klog.Errorf("azureDisk - detach disk(%s, %s) failed, err: %v", diskName, diskURI, err)
} else {
klog.V(2).Infof("azureDisk - detach disk(%s, %s) succeeded", diskName, diskURI)
}
return err
}
// getNodeDataDisks invokes vmSet interfaces to get data disks for the node.
func (c *controllerCommon) getNodeDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error) {
vmset, err := c.getNodeVMSet(nodeName)
if err != nil {
return nil, err
}
return vmset.GetDataDisks(nodeName)
}
// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI.
func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) {
disks, err := c.getNodeDataDisks(nodeName)
if err != nil {
klog.Errorf("error of getting data disks for node %q: %v", nodeName, err)
return -1, err
}
for _, disk := range disks {
if disk.Lun != nil && (disk.Name != nil && diskName != "" && *disk.Name == diskName) ||
(disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) ||
(disk.ManagedDisk != nil && *disk.ManagedDisk.ID == diskURI) {
// found the disk
klog.V(2).Infof("azureDisk - find disk: lun %d name %q uri %q", *disk.Lun, diskName, diskURI)
return *disk.Lun, nil
}
}
return -1, fmt.Errorf("Cannot find Lun for disk %s", diskName)
}
// GetNextDiskLun searches all vhd attachment on the host and find unused lun. Return -1 if all luns are used.
func (c *controllerCommon) GetNextDiskLun(nodeName types.NodeName) (int32, error) {
disks, err := c.getNodeDataDisks(nodeName)
if err != nil {
klog.Errorf("error of getting data disks for node %q: %v", nodeName, err)
return -1, err
}
used := make([]bool, maxLUN)
for _, disk := range disks {
if disk.Lun != nil {
used[*disk.Lun] = true
}
}
for k, v := range used {
if !v {
return int32(k), nil
}
}
return -1, fmt.Errorf("all luns are used")
}
// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName.
func (c *controllerCommon) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) {
attached := make(map[string]bool)
for _, diskName := range diskNames {
attached[diskName] = false
}
disks, err := c.getNodeDataDisks(nodeName)
if err != nil {
if err == cloudprovider.InstanceNotFound {
// if host doesn't exist, no need to detach
klog.Warningf("azureDisk - Cannot find node %q, DisksAreAttached will assume disks %v are not attached to it.",
nodeName, diskNames)
return attached, nil
}
return attached, err
}
for _, disk := range disks {
for _, diskName := range diskNames {
if disk.Name != nil && diskName != "" && *disk.Name == diskName {
attached[diskName] = true
}
}
}
return attached, nil
}

View File

@@ -1,66 +0,0 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"fmt"
"testing"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute"
)
func TestAttachDisk(t *testing.T) {
c := getTestCloud()
common := &controllerCommon{
location: c.Location,
storageEndpointSuffix: c.Environment.StorageEndpointSuffix,
resourceGroup: c.ResourceGroup,
subscriptionID: c.SubscriptionID,
cloud: c,
}
diskURI := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/disk-name", c.SubscriptionID, c.ResourceGroup)
err := common.AttachDisk(true, "", diskURI, "node1", compute.CachingTypesReadOnly)
if err != nil {
fmt.Printf("TestAttachDisk return expected error: %v", err)
} else {
t.Errorf("TestAttachDisk unexpected nil err")
}
}
func TestDetachDisk(t *testing.T) {
c := getTestCloud()
common := &controllerCommon{
location: c.Location,
storageEndpointSuffix: c.Environment.StorageEndpointSuffix,
resourceGroup: c.ResourceGroup,
subscriptionID: c.SubscriptionID,
cloud: c,
}
diskURI := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/disk-name", c.SubscriptionID, c.ResourceGroup)
err := common.DetachDisk("", diskURI, "node1")
if err != nil {
fmt.Printf("TestAttachDisk return expected error: %v", err)
} else {
t.Errorf("TestAttachDisk unexpected nil err")
}
}

View File

@@ -1,165 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"fmt"
"net/http"
"strings"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute"
"k8s.io/klog"
"k8s.io/apimachinery/pkg/types"
)
// AttachDisk attaches a vhd to vm
// the vhd must exist, can be identified by diskName, diskURI, and lun.
func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error {
vm, err := as.getVirtualMachine(nodeName)
if err != nil {
return err
}
vmName := mapNodeNameToVMName(nodeName)
nodeResourceGroup, err := as.GetNodeResourceGroup(vmName)
if err != nil {
return err
}
disks := *vm.StorageProfile.DataDisks
if isManagedDisk {
disks = append(disks,
compute.DataDisk{
Name: &diskName,
Lun: &lun,
Caching: cachingMode,
CreateOption: "attach",
ManagedDisk: &compute.ManagedDiskParameters{
ID: &diskURI,
},
})
} else {
disks = append(disks,
compute.DataDisk{
Name: &diskName,
Vhd: &compute.VirtualHardDisk{
URI: &diskURI,
},
Lun: &lun,
Caching: cachingMode,
CreateOption: "attach",
})
}
newVM := compute.VirtualMachine{
Location: vm.Location,
VirtualMachineProperties: &compute.VirtualMachineProperties{
HardwareProfile: vm.HardwareProfile,
StorageProfile: &compute.StorageProfile{
DataDisks: &disks,
},
},
}
klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk(%s, %s)", nodeResourceGroup, vmName, diskName, diskURI)
ctx, cancel := getContextWithCancel()
defer cancel()
// Invalidate the cache right after updating
defer as.cloud.vmCache.Delete(vmName)
_, err = as.VirtualMachinesClient.CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM)
if err != nil {
klog.Errorf("azureDisk - attach disk(%s, %s) failed, err: %v", diskName, diskURI, err)
detail := err.Error()
if strings.Contains(detail, errLeaseFailed) || strings.Contains(detail, errDiskBlobNotFound) {
// if lease cannot be acquired or disk not found, immediately detach the disk and return the original error
klog.V(2).Infof("azureDisk - err %v, try detach disk(%s, %s)", err, diskName, diskURI)
as.DetachDisk(diskName, diskURI, nodeName)
}
} else {
klog.V(2).Infof("azureDisk - attach disk(%s, %s) succeeded", diskName, diskURI)
}
return err
}
// DetachDisk detaches a disk from host
// the vhd can be identified by diskName or diskURI
func (as *availabilitySet) DetachDisk(diskName, diskURI string, nodeName types.NodeName) (*http.Response, error) {
vm, err := as.getVirtualMachine(nodeName)
if err != nil {
// if host doesn't exist, no need to detach
klog.Warningf("azureDisk - cannot find node %s, skip detaching disk(%s, %s)", nodeName, diskName, diskURI)
return nil, nil
}
vmName := mapNodeNameToVMName(nodeName)
nodeResourceGroup, err := as.GetNodeResourceGroup(vmName)
if err != nil {
return nil, err
}
disks := *vm.StorageProfile.DataDisks
bFoundDisk := false
for i, disk := range disks {
if disk.Lun != nil && (disk.Name != nil && diskName != "" && *disk.Name == diskName) ||
(disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) ||
(disk.ManagedDisk != nil && diskURI != "" && *disk.ManagedDisk.ID == diskURI) {
// found the disk
klog.V(2).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI)
disks = append(disks[:i], disks[i+1:]...)
bFoundDisk = true
break
}
}
if !bFoundDisk {
return nil, fmt.Errorf("detach azure disk failure, disk %s not found, diskURI: %s", diskName, diskURI)
}
newVM := compute.VirtualMachine{
Location: vm.Location,
VirtualMachineProperties: &compute.VirtualMachineProperties{
HardwareProfile: vm.HardwareProfile,
StorageProfile: &compute.StorageProfile{
DataDisks: &disks,
},
},
}
klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk(%s, %s)", nodeResourceGroup, vmName, diskName, diskURI)
ctx, cancel := getContextWithCancel()
defer cancel()
// Invalidate the cache right after updating
defer as.cloud.vmCache.Delete(vmName)
return as.VirtualMachinesClient.CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM)
}
// GetDataDisks gets a list of data disks attached to the node.
func (as *availabilitySet) GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error) {
vm, err := as.getVirtualMachine(nodeName)
if err != nil {
return nil, err
}
if vm.StorageProfile.DataDisks == nil {
return nil, nil
}
return *vm.StorageProfile.DataDisks, nil
}

View File

@@ -1,175 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"fmt"
"net/http"
"strings"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute"
"k8s.io/klog"
"k8s.io/apimachinery/pkg/types"
)
// AttachDisk attaches a vhd to vm
// the vhd must exist, can be identified by diskName, diskURI, and lun.
func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error {
vmName := mapNodeNameToVMName(nodeName)
ssName, instanceID, vm, err := ss.getVmssVM(vmName)
if err != nil {
return err
}
nodeResourceGroup, err := ss.GetNodeResourceGroup(vmName)
if err != nil {
return err
}
disks := []compute.DataDisk{}
if vm.StorageProfile != nil && vm.StorageProfile.DataDisks != nil {
disks = *vm.StorageProfile.DataDisks
}
if isManagedDisk {
disks = append(disks,
compute.DataDisk{
Name: &diskName,
Lun: &lun,
Caching: compute.CachingTypes(cachingMode),
CreateOption: "attach",
ManagedDisk: &compute.ManagedDiskParameters{
ID: &diskURI,
},
})
} else {
disks = append(disks,
compute.DataDisk{
Name: &diskName,
Vhd: &compute.VirtualHardDisk{
URI: &diskURI,
},
Lun: &lun,
Caching: compute.CachingTypes(cachingMode),
CreateOption: "attach",
})
}
newVM := compute.VirtualMachineScaleSetVM{
Sku: vm.Sku,
Location: vm.Location,
VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{
HardwareProfile: vm.HardwareProfile,
StorageProfile: &compute.StorageProfile{
OsDisk: vm.StorageProfile.OsDisk,
DataDisks: &disks,
},
},
}
ctx, cancel := getContextWithCancel()
defer cancel()
// Invalidate the cache right after updating
key := buildVmssCacheKey(nodeResourceGroup, ss.makeVmssVMName(ssName, instanceID))
defer ss.vmssVMCache.Delete(key)
klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk(%s, %s)", nodeResourceGroup, nodeName, diskName, diskURI)
_, err = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM)
if err != nil {
detail := err.Error()
if strings.Contains(detail, errLeaseFailed) || strings.Contains(detail, errDiskBlobNotFound) {
// if lease cannot be acquired or disk not found, immediately detach the disk and return the original error
klog.Infof("azureDisk - err %s, try detach disk(%s, %s)", detail, diskName, diskURI)
ss.DetachDisk(diskName, diskURI, nodeName)
}
} else {
klog.V(2).Infof("azureDisk - attach disk(%s, %s) succeeded", diskName, diskURI)
}
return err
}
// DetachDisk detaches a disk from host
// the vhd can be identified by diskName or diskURI
func (ss *scaleSet) DetachDisk(diskName, diskURI string, nodeName types.NodeName) (*http.Response, error) {
vmName := mapNodeNameToVMName(nodeName)
ssName, instanceID, vm, err := ss.getVmssVM(vmName)
if err != nil {
return nil, err
}
nodeResourceGroup, err := ss.GetNodeResourceGroup(vmName)
if err != nil {
return nil, err
}
disks := []compute.DataDisk{}
if vm.StorageProfile != nil && vm.StorageProfile.DataDisks != nil {
disks = *vm.StorageProfile.DataDisks
}
bFoundDisk := false
for i, disk := range disks {
if disk.Lun != nil && (disk.Name != nil && diskName != "" && *disk.Name == diskName) ||
(disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) ||
(disk.ManagedDisk != nil && diskURI != "" && *disk.ManagedDisk.ID == diskURI) {
// found the disk
klog.V(2).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI)
disks = append(disks[:i], disks[i+1:]...)
bFoundDisk = true
break
}
}
if !bFoundDisk {
return nil, fmt.Errorf("detach azure disk failure, disk %s not found, diskURI: %s", diskName, diskURI)
}
newVM := compute.VirtualMachineScaleSetVM{
Sku: vm.Sku,
Location: vm.Location,
VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{
HardwareProfile: vm.HardwareProfile,
StorageProfile: &compute.StorageProfile{
OsDisk: vm.StorageProfile.OsDisk,
DataDisks: &disks,
},
},
}
ctx, cancel := getContextWithCancel()
defer cancel()
// Invalidate the cache right after updating
key := buildVmssCacheKey(nodeResourceGroup, ss.makeVmssVMName(ssName, instanceID))
defer ss.vmssVMCache.Delete(key)
klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk(%s, %s)", nodeResourceGroup, nodeName, diskName, diskURI)
return ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM)
}
// GetDataDisks gets a list of data disks attached to the node.
func (ss *scaleSet) GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error) {
_, _, vm, err := ss.getVmssVM(string(nodeName))
if err != nil {
return nil, err
}
if vm.StorageProfile == nil || vm.StorageProfile.DataDisks == nil {
return nil, nil
}
return *vm.StorageProfile.DataDisks, nil
}

View File

@@ -1,927 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"context"
"fmt"
"math/rand"
"net/http"
"strings"
"sync"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
cloudprovider "k8s.io/cloud-provider"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute"
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/to"
)
type fakeAzureLBClient struct {
mutex *sync.Mutex
FakeStore map[string]map[string]network.LoadBalancer
}
func newFakeAzureLBClient() *fakeAzureLBClient {
fLBC := &fakeAzureLBClient{}
fLBC.FakeStore = make(map[string]map[string]network.LoadBalancer)
fLBC.mutex = &sync.Mutex{}
return fLBC
}
func (fLBC *fakeAzureLBClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, loadBalancerName string, parameters network.LoadBalancer) (resp *http.Response, err error) {
fLBC.mutex.Lock()
defer fLBC.mutex.Unlock()
if _, ok := fLBC.FakeStore[resourceGroupName]; !ok {
fLBC.FakeStore[resourceGroupName] = make(map[string]network.LoadBalancer)
}
// For dynamic ip allocation, just fill in the PrivateIPAddress
if parameters.FrontendIPConfigurations != nil {
for idx, config := range *parameters.FrontendIPConfigurations {
if config.PrivateIPAllocationMethod == network.Dynamic {
// Here we randomly assign an ip as private ip
// It doesn't smart enough to know whether it is in the subnet's range
(*parameters.FrontendIPConfigurations)[idx].PrivateIPAddress = getRandomIPPtr()
}
}
}
fLBC.FakeStore[resourceGroupName][loadBalancerName] = parameters
return nil, nil
}
func (fLBC *fakeAzureLBClient) Delete(ctx context.Context, resourceGroupName string, loadBalancerName string) (resp *http.Response, err error) {
fLBC.mutex.Lock()
defer fLBC.mutex.Unlock()
if rgLBs, ok := fLBC.FakeStore[resourceGroupName]; ok {
if _, ok := rgLBs[loadBalancerName]; ok {
delete(rgLBs, loadBalancerName)
return nil, nil
}
}
return &http.Response{
StatusCode: http.StatusNotFound,
}, nil
}
func (fLBC *fakeAzureLBClient) Get(ctx context.Context, resourceGroupName string, loadBalancerName string, expand string) (result network.LoadBalancer, err error) {
fLBC.mutex.Lock()
defer fLBC.mutex.Unlock()
if _, ok := fLBC.FakeStore[resourceGroupName]; ok {
if entity, ok := fLBC.FakeStore[resourceGroupName][loadBalancerName]; ok {
return entity, nil
}
}
return result, autorest.DetailedError{
StatusCode: http.StatusNotFound,
Message: "Not such LB",
}
}
func (fLBC *fakeAzureLBClient) List(ctx context.Context, resourceGroupName string) (result []network.LoadBalancer, err error) {
fLBC.mutex.Lock()
defer fLBC.mutex.Unlock()
var value []network.LoadBalancer
if _, ok := fLBC.FakeStore[resourceGroupName]; ok {
for _, v := range fLBC.FakeStore[resourceGroupName] {
value = append(value, v)
}
}
return value, nil
}
type fakeAzurePIPClient struct {
mutex *sync.Mutex
FakeStore map[string]map[string]network.PublicIPAddress
SubscriptionID string
}
const publicIPAddressIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/publicIPAddresses/%s"
// returns the full identifier of a publicIPAddress.
func getpublicIPAddressID(subscriptionID string, resourceGroupName, pipName string) string {
return fmt.Sprintf(
publicIPAddressIDTemplate,
subscriptionID,
resourceGroupName,
pipName)
}
func newFakeAzurePIPClient(subscriptionID string) *fakeAzurePIPClient {
fAPC := &fakeAzurePIPClient{}
fAPC.FakeStore = make(map[string]map[string]network.PublicIPAddress)
fAPC.SubscriptionID = subscriptionID
fAPC.mutex = &sync.Mutex{}
return fAPC
}
func (fAPC *fakeAzurePIPClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, publicIPAddressName string, parameters network.PublicIPAddress) (resp *http.Response, err error) {
fAPC.mutex.Lock()
defer fAPC.mutex.Unlock()
if _, ok := fAPC.FakeStore[resourceGroupName]; !ok {
fAPC.FakeStore[resourceGroupName] = make(map[string]network.PublicIPAddress)
}
// assign id
pipID := getpublicIPAddressID(fAPC.SubscriptionID, resourceGroupName, publicIPAddressName)
parameters.ID = &pipID
// only create in the case user has not provided
if parameters.PublicIPAddressPropertiesFormat != nil &&
parameters.PublicIPAddressPropertiesFormat.PublicIPAllocationMethod == network.Static {
// assign ip
parameters.IPAddress = getRandomIPPtr()
}
fAPC.FakeStore[resourceGroupName][publicIPAddressName] = parameters
return nil, nil
}
func (fAPC *fakeAzurePIPClient) Delete(ctx context.Context, resourceGroupName string, publicIPAddressName string) (resp *http.Response, err error) {
fAPC.mutex.Lock()
defer fAPC.mutex.Unlock()
if rgPIPs, ok := fAPC.FakeStore[resourceGroupName]; ok {
if _, ok := rgPIPs[publicIPAddressName]; ok {
delete(rgPIPs, publicIPAddressName)
return nil, nil
}
}
return &http.Response{
StatusCode: http.StatusNotFound,
}, nil
}
func (fAPC *fakeAzurePIPClient) Get(ctx context.Context, resourceGroupName string, publicIPAddressName string, expand string) (result network.PublicIPAddress, err error) {
fAPC.mutex.Lock()
defer fAPC.mutex.Unlock()
if _, ok := fAPC.FakeStore[resourceGroupName]; ok {
if entity, ok := fAPC.FakeStore[resourceGroupName][publicIPAddressName]; ok {
return entity, nil
}
}
return result, autorest.DetailedError{
StatusCode: http.StatusNotFound,
Message: "Not such PIP",
}
}
func (fAPC *fakeAzurePIPClient) List(ctx context.Context, resourceGroupName string) (result []network.PublicIPAddress, err error) {
fAPC.mutex.Lock()
defer fAPC.mutex.Unlock()
var value []network.PublicIPAddress
if _, ok := fAPC.FakeStore[resourceGroupName]; ok {
for _, v := range fAPC.FakeStore[resourceGroupName] {
value = append(value, v)
}
}
return value, nil
}
func (fAPC *fakeAzurePIPClient) setFakeStore(store map[string]map[string]network.PublicIPAddress) {
fAPC.mutex.Lock()
defer fAPC.mutex.Unlock()
fAPC.FakeStore = store
}
type fakeAzureInterfacesClient struct {
mutex *sync.Mutex
FakeStore map[string]map[string]network.Interface
}
func newFakeAzureInterfacesClient() *fakeAzureInterfacesClient {
fIC := &fakeAzureInterfacesClient{}
fIC.FakeStore = make(map[string]map[string]network.Interface)
fIC.mutex = &sync.Mutex{}
return fIC
}
func (fIC *fakeAzureInterfacesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, networkInterfaceName string, parameters network.Interface) (resp *http.Response, err error) {
fIC.mutex.Lock()
defer fIC.mutex.Unlock()
if _, ok := fIC.FakeStore[resourceGroupName]; !ok {
fIC.FakeStore[resourceGroupName] = make(map[string]network.Interface)
}
fIC.FakeStore[resourceGroupName][networkInterfaceName] = parameters
return nil, nil
}
func (fIC *fakeAzureInterfacesClient) Get(ctx context.Context, resourceGroupName string, networkInterfaceName string, expand string) (result network.Interface, err error) {
fIC.mutex.Lock()
defer fIC.mutex.Unlock()
if _, ok := fIC.FakeStore[resourceGroupName]; ok {
if entity, ok := fIC.FakeStore[resourceGroupName][networkInterfaceName]; ok {
return entity, nil
}
}
return result, autorest.DetailedError{
StatusCode: http.StatusNotFound,
Message: "Not such Interface",
}
}
func (fIC *fakeAzureInterfacesClient) GetVirtualMachineScaleSetNetworkInterface(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result network.Interface, err error) {
fIC.mutex.Lock()
defer fIC.mutex.Unlock()
if _, ok := fIC.FakeStore[resourceGroupName]; ok {
if entity, ok := fIC.FakeStore[resourceGroupName][networkInterfaceName]; ok {
return entity, nil
}
}
return result, autorest.DetailedError{
StatusCode: http.StatusNotFound,
Message: "Not such Interface",
}
}
func (fIC *fakeAzureInterfacesClient) setFakeStore(store map[string]map[string]network.Interface) {
fIC.mutex.Lock()
defer fIC.mutex.Unlock()
fIC.FakeStore = store
}
type fakeAzureVirtualMachinesClient struct {
mutex *sync.Mutex
FakeStore map[string]map[string]compute.VirtualMachine
}
func newFakeAzureVirtualMachinesClient() *fakeAzureVirtualMachinesClient {
fVMC := &fakeAzureVirtualMachinesClient{}
fVMC.FakeStore = make(map[string]map[string]compute.VirtualMachine)
fVMC.mutex = &sync.Mutex{}
return fVMC
}
func (fVMC *fakeAzureVirtualMachinesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachine) (resp *http.Response, err error) {
fVMC.mutex.Lock()
defer fVMC.mutex.Unlock()
if _, ok := fVMC.FakeStore[resourceGroupName]; !ok {
fVMC.FakeStore[resourceGroupName] = make(map[string]compute.VirtualMachine)
}
fVMC.FakeStore[resourceGroupName][VMName] = parameters
return nil, nil
}
func (fVMC *fakeAzureVirtualMachinesClient) Get(ctx context.Context, resourceGroupName string, VMName string, expand compute.InstanceViewTypes) (result compute.VirtualMachine, err error) {
fVMC.mutex.Lock()
defer fVMC.mutex.Unlock()
if _, ok := fVMC.FakeStore[resourceGroupName]; ok {
if entity, ok := fVMC.FakeStore[resourceGroupName][VMName]; ok {
return entity, nil
}
}
return result, autorest.DetailedError{
StatusCode: http.StatusNotFound,
Message: "Not such VM",
}
}
func (fVMC *fakeAzureVirtualMachinesClient) List(ctx context.Context, resourceGroupName string) (result []compute.VirtualMachine, err error) {
fVMC.mutex.Lock()
defer fVMC.mutex.Unlock()
result = []compute.VirtualMachine{}
if _, ok := fVMC.FakeStore[resourceGroupName]; ok {
for _, v := range fVMC.FakeStore[resourceGroupName] {
result = append(result, v)
}
}
return result, nil
}
func (fVMC *fakeAzureVirtualMachinesClient) setFakeStore(store map[string]map[string]compute.VirtualMachine) {
fVMC.mutex.Lock()
defer fVMC.mutex.Unlock()
fVMC.FakeStore = store
}
type fakeAzureSubnetsClient struct {
mutex *sync.Mutex
FakeStore map[string]map[string]network.Subnet
}
func newFakeAzureSubnetsClient() *fakeAzureSubnetsClient {
fASC := &fakeAzureSubnetsClient{}
fASC.FakeStore = make(map[string]map[string]network.Subnet)
fASC.mutex = &sync.Mutex{}
return fASC
}
func (fASC *fakeAzureSubnetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters network.Subnet) (resp *http.Response, err error) {
fASC.mutex.Lock()
defer fASC.mutex.Unlock()
rgVnet := strings.Join([]string{resourceGroupName, virtualNetworkName}, "AND")
if _, ok := fASC.FakeStore[rgVnet]; !ok {
fASC.FakeStore[rgVnet] = make(map[string]network.Subnet)
}
fASC.FakeStore[rgVnet][subnetName] = subnetParameters
return nil, nil
}
func (fASC *fakeAzureSubnetsClient) Delete(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string) (resp *http.Response, err error) {
fASC.mutex.Lock()
defer fASC.mutex.Unlock()
rgVnet := strings.Join([]string{resourceGroupName, virtualNetworkName}, "AND")
if rgSubnets, ok := fASC.FakeStore[rgVnet]; ok {
if _, ok := rgSubnets[subnetName]; ok {
delete(rgSubnets, subnetName)
return nil, nil
}
}
return &http.Response{
StatusCode: http.StatusNotFound,
}, nil
}
func (fASC *fakeAzureSubnetsClient) Get(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (result network.Subnet, err error) {
fASC.mutex.Lock()
defer fASC.mutex.Unlock()
rgVnet := strings.Join([]string{resourceGroupName, virtualNetworkName}, "AND")
if _, ok := fASC.FakeStore[rgVnet]; ok {
if entity, ok := fASC.FakeStore[rgVnet][subnetName]; ok {
return entity, nil
}
}
return result, autorest.DetailedError{
StatusCode: http.StatusNotFound,
Message: "Not such Subnet",
}
}
func (fASC *fakeAzureSubnetsClient) List(ctx context.Context, resourceGroupName string, virtualNetworkName string) (result []network.Subnet, err error) {
fASC.mutex.Lock()
defer fASC.mutex.Unlock()
rgVnet := strings.Join([]string{resourceGroupName, virtualNetworkName}, "AND")
var value []network.Subnet
if _, ok := fASC.FakeStore[rgVnet]; ok {
for _, v := range fASC.FakeStore[rgVnet] {
value = append(value, v)
}
}
return value, nil
}
type fakeAzureNSGClient struct {
mutex *sync.Mutex
FakeStore map[string]map[string]network.SecurityGroup
}
func newFakeAzureNSGClient() *fakeAzureNSGClient {
fNSG := &fakeAzureNSGClient{}
fNSG.FakeStore = make(map[string]map[string]network.SecurityGroup)
fNSG.mutex = &sync.Mutex{}
return fNSG
}
func (fNSG *fakeAzureNSGClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, parameters network.SecurityGroup) (resp *http.Response, err error) {
fNSG.mutex.Lock()
defer fNSG.mutex.Unlock()
if _, ok := fNSG.FakeStore[resourceGroupName]; !ok {
fNSG.FakeStore[resourceGroupName] = make(map[string]network.SecurityGroup)
}
fNSG.FakeStore[resourceGroupName][networkSecurityGroupName] = parameters
return nil, nil
}
func (fNSG *fakeAzureNSGClient) Delete(ctx context.Context, resourceGroupName string, networkSecurityGroupName string) (resp *http.Response, err error) {
fNSG.mutex.Lock()
defer fNSG.mutex.Unlock()
if rgSGs, ok := fNSG.FakeStore[resourceGroupName]; ok {
if _, ok := rgSGs[networkSecurityGroupName]; ok {
delete(rgSGs, networkSecurityGroupName)
return nil, nil
}
}
return &http.Response{
StatusCode: http.StatusNotFound,
}, nil
}
func (fNSG *fakeAzureNSGClient) Get(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, expand string) (result network.SecurityGroup, err error) {
fNSG.mutex.Lock()
defer fNSG.mutex.Unlock()
if _, ok := fNSG.FakeStore[resourceGroupName]; ok {
if entity, ok := fNSG.FakeStore[resourceGroupName][networkSecurityGroupName]; ok {
return entity, nil
}
}
return result, autorest.DetailedError{
StatusCode: http.StatusNotFound,
Message: "Not such NSG",
}
}
func (fNSG *fakeAzureNSGClient) List(ctx context.Context, resourceGroupName string) (result []network.SecurityGroup, err error) {
fNSG.mutex.Lock()
defer fNSG.mutex.Unlock()
var value []network.SecurityGroup
if _, ok := fNSG.FakeStore[resourceGroupName]; ok {
for _, v := range fNSG.FakeStore[resourceGroupName] {
value = append(value, v)
}
}
return value, nil
}
func getRandomIPPtr() *string {
rand.Seed(time.Now().UnixNano())
return to.StringPtr(fmt.Sprintf("%d.%d.%d.%d", rand.Intn(256), rand.Intn(256), rand.Intn(256), rand.Intn(256)))
}
type fakeVirtualMachineScaleSetVMsClient struct {
mutex *sync.Mutex
FakeStore map[string]map[string]compute.VirtualMachineScaleSetVM
}
func newFakeVirtualMachineScaleSetVMsClient() *fakeVirtualMachineScaleSetVMsClient {
fVMC := &fakeVirtualMachineScaleSetVMsClient{}
fVMC.FakeStore = make(map[string]map[string]compute.VirtualMachineScaleSetVM)
fVMC.mutex = &sync.Mutex{}
return fVMC
}
func (fVMC *fakeVirtualMachineScaleSetVMsClient) setFakeStore(store map[string]map[string]compute.VirtualMachineScaleSetVM) {
fVMC.mutex.Lock()
defer fVMC.mutex.Unlock()
fVMC.FakeStore = store
}
func (fVMC *fakeVirtualMachineScaleSetVMsClient) List(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result []compute.VirtualMachineScaleSetVM, err error) {
fVMC.mutex.Lock()
defer fVMC.mutex.Unlock()
result = []compute.VirtualMachineScaleSetVM{}
if _, ok := fVMC.FakeStore[resourceGroupName]; ok {
for _, v := range fVMC.FakeStore[resourceGroupName] {
result = append(result, v)
}
}
return result, nil
}
func (fVMC *fakeVirtualMachineScaleSetVMsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVM, err error) {
fVMC.mutex.Lock()
defer fVMC.mutex.Unlock()
vmKey := fmt.Sprintf("%s_%s", VMScaleSetName, instanceID)
if scaleSetMap, ok := fVMC.FakeStore[resourceGroupName]; ok {
if entity, ok := scaleSetMap[vmKey]; ok {
return entity, nil
}
}
return result, autorest.DetailedError{
StatusCode: http.StatusNotFound,
Message: "No such VirtualMachineScaleSetVM",
}
}
func (fVMC *fakeVirtualMachineScaleSetVMsClient) GetInstanceView(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMInstanceView, err error) {
_, err = fVMC.Get(ctx, resourceGroupName, VMScaleSetName, instanceID)
if err != nil {
return result, err
}
return result, nil
}
func (fVMC *fakeVirtualMachineScaleSetVMsClient) Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM) (resp *http.Response, err error) {
fVMC.mutex.Lock()
defer fVMC.mutex.Unlock()
vmKey := fmt.Sprintf("%s_%s", VMScaleSetName, instanceID)
if scaleSetMap, ok := fVMC.FakeStore[resourceGroupName]; ok {
if _, ok := scaleSetMap[vmKey]; ok {
scaleSetMap[vmKey] = parameters
}
}
return nil, nil
}
type fakeVirtualMachineScaleSetsClient struct {
mutex *sync.Mutex
FakeStore map[string]map[string]compute.VirtualMachineScaleSet
}
func newFakeVirtualMachineScaleSetsClient() *fakeVirtualMachineScaleSetsClient {
fVMSSC := &fakeVirtualMachineScaleSetsClient{}
fVMSSC.FakeStore = make(map[string]map[string]compute.VirtualMachineScaleSet)
fVMSSC.mutex = &sync.Mutex{}
return fVMSSC
}
func (fVMSSC *fakeVirtualMachineScaleSetsClient) setFakeStore(store map[string]map[string]compute.VirtualMachineScaleSet) {
fVMSSC.mutex.Lock()
defer fVMSSC.mutex.Unlock()
fVMSSC.FakeStore = store
}
func (fVMSSC *fakeVirtualMachineScaleSetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, VMScaleSetName string, parameters compute.VirtualMachineScaleSet) (resp *http.Response, err error) {
fVMSSC.mutex.Lock()
defer fVMSSC.mutex.Unlock()
if _, ok := fVMSSC.FakeStore[resourceGroupName]; !ok {
fVMSSC.FakeStore[resourceGroupName] = make(map[string]compute.VirtualMachineScaleSet)
}
fVMSSC.FakeStore[resourceGroupName][VMScaleSetName] = parameters
return nil, nil
}
func (fVMSSC *fakeVirtualMachineScaleSetsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSet, err error) {
fVMSSC.mutex.Lock()
defer fVMSSC.mutex.Unlock()
if scaleSetMap, ok := fVMSSC.FakeStore[resourceGroupName]; ok {
if entity, ok := scaleSetMap[VMScaleSetName]; ok {
return entity, nil
}
}
return result, autorest.DetailedError{
StatusCode: http.StatusNotFound,
Message: "No such ScaleSet",
}
}
func (fVMSSC *fakeVirtualMachineScaleSetsClient) List(ctx context.Context, resourceGroupName string) (result []compute.VirtualMachineScaleSet, err error) {
fVMSSC.mutex.Lock()
defer fVMSSC.mutex.Unlock()
result = []compute.VirtualMachineScaleSet{}
if _, ok := fVMSSC.FakeStore[resourceGroupName]; ok {
for _, v := range fVMSSC.FakeStore[resourceGroupName] {
result = append(result, v)
}
}
return result, nil
}
func (fVMSSC *fakeVirtualMachineScaleSetsClient) UpdateInstances(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs) (resp *http.Response, err error) {
return nil, nil
}
type fakeRoutesClient struct {
mutex *sync.Mutex
FakeStore map[string]map[string]network.Route
}
func newFakeRoutesClient() *fakeRoutesClient {
fRC := &fakeRoutesClient{}
fRC.FakeStore = make(map[string]map[string]network.Route)
fRC.mutex = &sync.Mutex{}
return fRC
}
func (fRC *fakeRoutesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, routeTableName string, routeName string, routeParameters network.Route) (resp *http.Response, err error) {
fRC.mutex.Lock()
defer fRC.mutex.Unlock()
if _, ok := fRC.FakeStore[routeTableName]; !ok {
fRC.FakeStore[routeTableName] = make(map[string]network.Route)
}
fRC.FakeStore[routeTableName][routeName] = routeParameters
return nil, nil
}
func (fRC *fakeRoutesClient) Delete(ctx context.Context, resourceGroupName string, routeTableName string, routeName string) (resp *http.Response, err error) {
fRC.mutex.Lock()
defer fRC.mutex.Unlock()
if routes, ok := fRC.FakeStore[routeTableName]; ok {
if _, ok := routes[routeName]; ok {
delete(routes, routeName)
return nil, nil
}
}
return &http.Response{
StatusCode: http.StatusNotFound,
}, nil
}
type fakeRouteTablesClient struct {
mutex *sync.Mutex
FakeStore map[string]map[string]network.RouteTable
Calls []string
}
func newFakeRouteTablesClient() *fakeRouteTablesClient {
fRTC := &fakeRouteTablesClient{}
fRTC.FakeStore = make(map[string]map[string]network.RouteTable)
fRTC.mutex = &sync.Mutex{}
return fRTC
}
func (fRTC *fakeRouteTablesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, routeTableName string, parameters network.RouteTable) (resp *http.Response, err error) {
fRTC.mutex.Lock()
defer fRTC.mutex.Unlock()
fRTC.Calls = append(fRTC.Calls, "CreateOrUpdate")
if _, ok := fRTC.FakeStore[resourceGroupName]; !ok {
fRTC.FakeStore[resourceGroupName] = make(map[string]network.RouteTable)
}
fRTC.FakeStore[resourceGroupName][routeTableName] = parameters
return nil, nil
}
func (fRTC *fakeRouteTablesClient) Get(ctx context.Context, resourceGroupName string, routeTableName string, expand string) (result network.RouteTable, err error) {
fRTC.mutex.Lock()
defer fRTC.mutex.Unlock()
fRTC.Calls = append(fRTC.Calls, "Get")
if _, ok := fRTC.FakeStore[resourceGroupName]; ok {
if entity, ok := fRTC.FakeStore[resourceGroupName][routeTableName]; ok {
return entity, nil
}
}
return result, autorest.DetailedError{
StatusCode: http.StatusNotFound,
Message: "Not such RouteTable",
}
}
type fakeFileClient struct {
}
func (fFC *fakeFileClient) createFileShare(accountName, accountKey, name string, sizeGiB int) error {
return nil
}
func (fFC *fakeFileClient) deleteFileShare(accountName, accountKey, name string) error {
return nil
}
func (fFC *fakeFileClient) resizeFileShare(accountName, accountKey, name string, sizeGiB int) error {
return nil
}
type fakeStorageAccountClient struct {
mutex *sync.Mutex
FakeStore map[string]map[string]storage.Account
Keys storage.AccountListKeysResult
Accounts storage.AccountListResult
Err error
}
func newFakeStorageAccountClient() *fakeStorageAccountClient {
fSAC := &fakeStorageAccountClient{}
fSAC.FakeStore = make(map[string]map[string]storage.Account)
fSAC.mutex = &sync.Mutex{}
return fSAC
}
func (fSAC *fakeStorageAccountClient) Create(ctx context.Context, resourceGroupName string, accountName string, parameters storage.AccountCreateParameters) (resp *http.Response, err error) {
fSAC.mutex.Lock()
defer fSAC.mutex.Unlock()
if _, ok := fSAC.FakeStore[resourceGroupName]; !ok {
fSAC.FakeStore[resourceGroupName] = make(map[string]storage.Account)
}
fSAC.FakeStore[resourceGroupName][accountName] = storage.Account{
Name: &accountName,
Sku: parameters.Sku,
Kind: parameters.Kind,
Location: parameters.Location,
Identity: parameters.Identity,
Tags: parameters.Tags,
AccountProperties: &storage.AccountProperties{},
}
return nil, nil
}
func (fSAC *fakeStorageAccountClient) Delete(ctx context.Context, resourceGroupName string, accountName string) (result autorest.Response, err error) {
fSAC.mutex.Lock()
defer fSAC.mutex.Unlock()
if rgAccounts, ok := fSAC.FakeStore[resourceGroupName]; ok {
if _, ok := rgAccounts[accountName]; ok {
delete(rgAccounts, accountName)
result.Response = &http.Response{
StatusCode: http.StatusAccepted,
}
return result, nil
}
}
result.Response = &http.Response{
StatusCode: http.StatusNotFound,
}
err = autorest.DetailedError{
StatusCode: http.StatusNotFound,
Message: "Not such StorageAccount",
}
return result, err
}
func (fSAC *fakeStorageAccountClient) ListKeys(ctx context.Context, resourceGroupName string, accountName string) (result storage.AccountListKeysResult, err error) {
return fSAC.Keys, fSAC.Err
}
func (fSAC *fakeStorageAccountClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result storage.AccountListResult, err error) {
return fSAC.Accounts, fSAC.Err
}
func (fSAC *fakeStorageAccountClient) GetProperties(ctx context.Context, resourceGroupName string, accountName string) (result storage.Account, err error) {
fSAC.mutex.Lock()
defer fSAC.mutex.Unlock()
if _, ok := fSAC.FakeStore[resourceGroupName]; ok {
if entity, ok := fSAC.FakeStore[resourceGroupName][accountName]; ok {
return entity, nil
}
}
return result, autorest.DetailedError{
StatusCode: http.StatusNotFound,
Message: "Not such StorageAccount",
}
}
type fakeDisksClient struct {
mutex *sync.Mutex
FakeStore map[string]map[string]compute.Disk
}
func newFakeDisksClient() *fakeDisksClient {
fDC := &fakeDisksClient{}
fDC.FakeStore = make(map[string]map[string]compute.Disk)
fDC.mutex = &sync.Mutex{}
return fDC
}
func (fDC *fakeDisksClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, diskName string, diskParameter compute.Disk) (resp *http.Response, err error) {
fDC.mutex.Lock()
defer fDC.mutex.Unlock()
if _, ok := fDC.FakeStore[resourceGroupName]; !ok {
fDC.FakeStore[resourceGroupName] = make(map[string]compute.Disk)
}
fDC.FakeStore[resourceGroupName][diskName] = diskParameter
return nil, nil
}
func (fDC *fakeDisksClient) Delete(ctx context.Context, resourceGroupName string, diskName string) (resp *http.Response, err error) {
fDC.mutex.Lock()
defer fDC.mutex.Unlock()
if rgDisks, ok := fDC.FakeStore[resourceGroupName]; ok {
if _, ok := rgDisks[diskName]; ok {
delete(rgDisks, diskName)
return nil, nil
}
}
return &http.Response{
StatusCode: http.StatusAccepted,
}, nil
}
func (fDC *fakeDisksClient) Get(ctx context.Context, resourceGroupName string, diskName string) (result compute.Disk, err error) {
fDC.mutex.Lock()
defer fDC.mutex.Unlock()
if _, ok := fDC.FakeStore[resourceGroupName]; ok {
if entity, ok := fDC.FakeStore[resourceGroupName][diskName]; ok {
return entity, nil
}
}
return result, autorest.DetailedError{
StatusCode: http.StatusNotFound,
Message: "Not such Disk",
}
}
type fakeVMSet struct {
NodeToIP map[string]string
Err error
}
func (f *fakeVMSet) GetInstanceIDByNodeName(name string) (string, error) {
return "", fmt.Errorf("unimplemented")
}
func (f *fakeVMSet) GetInstanceTypeByNodeName(name string) (string, error) {
return "", fmt.Errorf("unimplemented")
}
func (f *fakeVMSet) GetIPByNodeName(name string) (string, string, error) {
ip, found := f.NodeToIP[name]
if !found {
return "", "", fmt.Errorf("not found")
}
return ip, "", nil
}
func (f *fakeVMSet) GetPrimaryInterface(nodeName string) (network.Interface, error) {
return network.Interface{}, fmt.Errorf("unimplemented")
}
func (f *fakeVMSet) GetNodeNameByProviderID(providerID string) (types.NodeName, error) {
return types.NodeName(""), fmt.Errorf("unimplemented")
}
func (f *fakeVMSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) {
return cloudprovider.Zone{}, fmt.Errorf("unimplemented")
}
func (f *fakeVMSet) GetPrimaryVMSetName() string {
return ""
}
func (f *fakeVMSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (availabilitySetNames *[]string, err error) {
return nil, fmt.Errorf("unimplemented")
}
func (f *fakeVMSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error {
return fmt.Errorf("unimplemented")
}
func (f *fakeVMSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeName, backendPoolID string, vmSetName string, isInternal bool) error {
return fmt.Errorf("unimplemented")
}
func (f *fakeVMSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error {
return fmt.Errorf("unimplemented")
}
func (f *fakeVMSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error {
return fmt.Errorf("unimplemented")
}
func (f *fakeVMSet) DetachDisk(diskName, diskURI string, nodeName types.NodeName) (*http.Response, error) {
return nil, fmt.Errorf("unimplemented")
}
func (f *fakeVMSet) GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error) {
return nil, fmt.Errorf("unimplemented")
}
func (f *fakeVMSet) GetPowerStatusByNodeName(name string) (string, error) {
return "", fmt.Errorf("unimplemented")
}

View File

@@ -1,108 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"fmt"
azs "github.com/Azure/azure-sdk-for-go/storage"
"github.com/Azure/go-autorest/autorest/azure"
"k8s.io/klog"
)
const (
useHTTPS = true
)
// FileClient is the interface for creating file shares, interface for test
// injection.
type FileClient interface {
createFileShare(accountName, accountKey, name string, sizeGiB int) error
deleteFileShare(accountName, accountKey, name string) error
resizeFileShare(accountName, accountKey, name string, sizeGiB int) error
}
// create file share
func (az *Cloud) createFileShare(accountName, accountKey, name string, sizeGiB int) error {
return az.FileClient.createFileShare(accountName, accountKey, name, sizeGiB)
}
func (az *Cloud) deleteFileShare(accountName, accountKey, name string) error {
return az.FileClient.deleteFileShare(accountName, accountKey, name)
}
func (az *Cloud) resizeFileShare(accountName, accountKey, name string, sizeGiB int) error {
return az.FileClient.resizeFileShare(accountName, accountKey, name, sizeGiB)
}
type azureFileClient struct {
env azure.Environment
}
func (f *azureFileClient) createFileShare(accountName, accountKey, name string, sizeGiB int) error {
fileClient, err := f.getFileSvcClient(accountName, accountKey)
if err != nil {
return err
}
share := fileClient.GetShareReference(name)
share.Properties.Quota = sizeGiB
newlyCreated, err := share.CreateIfNotExists(nil)
if err != nil {
return fmt.Errorf("failed to create file share, err: %v", err)
}
if !newlyCreated {
klog.V(2).Infof("file share(%s) under account(%s) already exists", name, accountName)
}
return nil
}
// delete a file share
func (f *azureFileClient) deleteFileShare(accountName, accountKey, name string) error {
fileClient, err := f.getFileSvcClient(accountName, accountKey)
if err != nil {
return err
}
return fileClient.GetShareReference(name).Delete(nil)
}
func (f *azureFileClient) resizeFileShare(accountName, accountKey, name string, sizeGiB int) error {
fileClient, err := f.getFileSvcClient(accountName, accountKey)
if err != nil {
return err
}
share := fileClient.GetShareReference(name)
if share.Properties.Quota >= sizeGiB {
klog.Warningf("file share size(%dGi) is already greater or equal than requested size(%dGi), accountName: %s, shareName: %s",
share.Properties.Quota, sizeGiB, accountName, name)
return nil
}
share.Properties.Quota = sizeGiB
if err = share.SetProperties(nil); err != nil {
return fmt.Errorf("failed to set quota on file share %s, err: %v", name, err)
}
klog.V(4).Infof("resize file share completed, accountName: %s, shareName: %s, sizeGiB: %d", accountName, name, sizeGiB)
return nil
}
func (f *azureFileClient) getFileSvcClient(accountName, accountKey string) (*azs.FileServiceClient, error) {
fileClient, err := azs.NewClient(accountName, accountKey, f.env.StorageEndpointSuffix, azs.DefaultAPIVersion, useHTTPS)
if err != nil {
return nil, fmt.Errorf("error creating azure client: %v", err)
}
fc := fileClient.GetFileService()
return &fc, nil
}

View File

@@ -1,159 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"time"
)
const (
metadataCacheTTL = time.Minute
metadataCacheKey = "InstanceMetadata"
metadataURL = "http://169.254.169.254/metadata/instance"
)
// NetworkMetadata contains metadata about an instance's network
type NetworkMetadata struct {
Interface []NetworkInterface `json:"interface"`
}
// NetworkInterface represents an instances network interface.
type NetworkInterface struct {
IPV4 NetworkData `json:"ipv4"`
IPV6 NetworkData `json:"ipv6"`
MAC string `json:"macAddress"`
}
// NetworkData contains IP information for a network.
type NetworkData struct {
IPAddress []IPAddress `json:"ipAddress"`
Subnet []Subnet `json:"subnet"`
}
// IPAddress represents IP address information.
type IPAddress struct {
PrivateIP string `json:"privateIPAddress"`
PublicIP string `json:"publicIPAddress"`
}
// Subnet represents subnet information.
type Subnet struct {
Address string `json:"address"`
Prefix string `json:"prefix"`
}
// ComputeMetadata represents compute information
type ComputeMetadata struct {
SKU string `json:"sku,omitempty"`
Name string `json:"name,omitempty"`
Zone string `json:"zone,omitempty"`
VMSize string `json:"vmSize,omitempty"`
OSType string `json:"osType,omitempty"`
Location string `json:"location,omitempty"`
FaultDomain string `json:"platformFaultDomain,omitempty"`
UpdateDomain string `json:"platformUpdateDomain,omitempty"`
ResourceGroup string `json:"resourceGroupName,omitempty"`
VMScaleSetName string `json:"vmScaleSetName,omitempty"`
}
// InstanceMetadata represents instance information.
type InstanceMetadata struct {
Compute *ComputeMetadata `json:"compute,omitempty"`
Network *NetworkMetadata `json:"network,omitempty"`
}
// InstanceMetadataService knows how to query the Azure instance metadata server.
type InstanceMetadataService struct {
metadataURL string
imsCache *timedCache
}
// NewInstanceMetadataService creates an instance of the InstanceMetadataService accessor object.
func NewInstanceMetadataService(metadataURL string) (*InstanceMetadataService, error) {
ims := &InstanceMetadataService{
metadataURL: metadataURL,
}
imsCache, err := newTimedcache(metadataCacheTTL, ims.getInstanceMetadata)
if err != nil {
return nil, err
}
ims.imsCache = imsCache
return ims, nil
}
func (ims *InstanceMetadataService) getInstanceMetadata(key string) (interface{}, error) {
req, err := http.NewRequest("GET", ims.metadataURL, nil)
if err != nil {
return nil, err
}
req.Header.Add("Metadata", "True")
req.Header.Add("User-Agent", "golang/kubernetes-cloud-provider")
q := req.URL.Query()
q.Add("format", "json")
q.Add("api-version", "2017-12-01")
req.URL.RawQuery = q.Encode()
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return nil, fmt.Errorf("failure of getting instance metadata with response %q", resp.Status)
}
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
obj := InstanceMetadata{}
err = json.Unmarshal(data, &obj)
if err != nil {
return nil, err
}
return &obj, nil
}
// GetMetadata gets instance metadata from cache.
func (ims *InstanceMetadataService) GetMetadata() (*InstanceMetadata, error) {
cache, err := ims.imsCache.Get(metadataCacheKey)
if err != nil {
return nil, err
}
// Cache shouldn't be nil, but added a check incase something wrong.
if cache == nil {
return nil, fmt.Errorf("failure of getting instance metadata")
}
if metadata, ok := cache.(*InstanceMetadata); ok {
return metadata, nil
}
return nil, fmt.Errorf("failure of getting instance metadata")
}

View File

@@ -1,345 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"context"
"fmt"
"os"
"strings"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/klog"
)
const (
vmPowerStatePrefix = "PowerState/"
vmPowerStateStopped = "stopped"
vmPowerStateDeallocated = "deallocated"
)
// NodeAddresses returns the addresses of the specified instance.
func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.NodeAddress, error) {
// Returns nil for unmanaged nodes because azure cloud provider couldn't fetch information for them.
unmanaged, err := az.IsNodeUnmanaged(string(name))
if err != nil {
return nil, err
}
if unmanaged {
klog.V(4).Infof("NodeAddresses: omitting unmanaged node %q", name)
return nil, nil
}
addressGetter := func(nodeName types.NodeName) ([]v1.NodeAddress, error) {
ip, publicIP, err := az.getIPForMachine(nodeName)
if err != nil {
klog.V(2).Infof("NodeAddresses(%s) abort backoff: %v", nodeName, err)
return nil, err
}
addresses := []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: ip},
{Type: v1.NodeHostName, Address: string(name)},
}
if len(publicIP) > 0 {
addresses = append(addresses, v1.NodeAddress{
Type: v1.NodeExternalIP,
Address: publicIP,
})
}
return addresses, nil
}
if az.UseInstanceMetadata {
metadata, err := az.metadata.GetMetadata()
if err != nil {
return nil, err
}
if metadata.Compute == nil || metadata.Network == nil {
return nil, fmt.Errorf("failure of getting instance metadata")
}
isLocalInstance, err := az.isCurrentInstance(name, metadata.Compute.Name)
if err != nil {
return nil, err
}
// Not local instance, get addresses from Azure ARM API.
if !isLocalInstance {
return addressGetter(name)
}
if len(metadata.Network.Interface) == 0 {
return nil, fmt.Errorf("no interface is found for the instance")
}
// Use ip address got from instance metadata.
ipAddress := metadata.Network.Interface[0]
addresses := []v1.NodeAddress{
{Type: v1.NodeHostName, Address: string(name)},
}
if len(ipAddress.IPV4.IPAddress) > 0 && len(ipAddress.IPV4.IPAddress[0].PrivateIP) > 0 {
address := ipAddress.IPV4.IPAddress[0]
addresses = append(addresses, v1.NodeAddress{
Type: v1.NodeInternalIP,
Address: address.PrivateIP,
})
if len(address.PublicIP) > 0 {
addresses = append(addresses, v1.NodeAddress{
Type: v1.NodeExternalIP,
Address: address.PublicIP,
})
}
}
if len(ipAddress.IPV6.IPAddress) > 0 && len(ipAddress.IPV6.IPAddress[0].PrivateIP) > 0 {
address := ipAddress.IPV6.IPAddress[0]
addresses = append(addresses, v1.NodeAddress{
Type: v1.NodeInternalIP,
Address: address.PrivateIP,
})
if len(address.PublicIP) > 0 {
addresses = append(addresses, v1.NodeAddress{
Type: v1.NodeExternalIP,
Address: address.PublicIP,
})
}
}
if len(addresses) == 1 {
// No IP addresses is got from instance metadata service, clean up cache and report errors.
az.metadata.imsCache.Delete(metadataCacheKey)
return nil, fmt.Errorf("get empty IP addresses from instance metadata service")
}
return addresses, nil
}
return addressGetter(name)
}
// NodeAddressesByProviderID returns the node addresses of an instances with the specified unique providerID
// This method will not be called from the node that is requesting this ID. i.e. metadata service
// and other local methods cannot be used here
func (az *Cloud) NodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error) {
// Returns nil for unmanaged nodes because azure cloud provider couldn't fetch information for them.
if az.IsNodeUnmanagedByProviderID(providerID) {
klog.V(4).Infof("NodeAddressesByProviderID: omitting unmanaged node %q", providerID)
return nil, nil
}
name, err := az.vmSet.GetNodeNameByProviderID(providerID)
if err != nil {
return nil, err
}
return az.NodeAddresses(ctx, name)
}
// InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running.
// If false is returned with no error, the instance will be immediately deleted by the cloud controller manager.
func (az *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) {
// Returns true for unmanaged nodes because azure cloud provider always assumes them exists.
if az.IsNodeUnmanagedByProviderID(providerID) {
klog.V(4).Infof("InstanceExistsByProviderID: assuming unmanaged node %q exists", providerID)
return true, nil
}
name, err := az.vmSet.GetNodeNameByProviderID(providerID)
if err != nil {
if err == cloudprovider.InstanceNotFound {
return false, nil
}
return false, err
}
_, err = az.InstanceID(ctx, name)
if err != nil {
if err == cloudprovider.InstanceNotFound {
return false, nil
}
return false, err
}
return true, nil
}
// InstanceShutdownByProviderID returns true if the instance is in safe state to detach volumes
func (az *Cloud) InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error) {
nodeName, err := az.vmSet.GetNodeNameByProviderID(providerID)
if err != nil {
return false, err
}
powerStatus, err := az.vmSet.GetPowerStatusByNodeName(string(nodeName))
if err != nil {
return false, err
}
klog.V(5).Infof("InstanceShutdownByProviderID gets power status %q for node %q", powerStatus, nodeName)
return strings.ToLower(powerStatus) == vmPowerStateStopped || strings.ToLower(powerStatus) == vmPowerStateDeallocated, nil
}
func (az *Cloud) isCurrentInstance(name types.NodeName, metadataVMName string) (bool, error) {
var err error
nodeName := mapNodeNameToVMName(name)
if az.VMType == vmTypeVMSS {
// VMSS vmName is not same with hostname, use hostname instead.
metadataVMName, err = os.Hostname()
if err != nil {
return false, err
}
}
metadataVMName = strings.ToLower(metadataVMName)
return (metadataVMName == nodeName), err
}
// InstanceID returns the cloud provider ID of the specified instance.
// Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound)
func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, error) {
nodeName := mapNodeNameToVMName(name)
unmanaged, err := az.IsNodeUnmanaged(nodeName)
if err != nil {
return "", err
}
if unmanaged {
// InstanceID is same with nodeName for unmanaged nodes.
klog.V(4).Infof("InstanceID: getting ID %q for unmanaged node %q", name, name)
return nodeName, nil
}
if az.UseInstanceMetadata {
metadata, err := az.metadata.GetMetadata()
if err != nil {
return "", err
}
if metadata.Compute == nil {
return "", fmt.Errorf("failure of getting instance metadata")
}
isLocalInstance, err := az.isCurrentInstance(name, metadata.Compute.Name)
if err != nil {
return "", err
}
// Not local instance, get instanceID from Azure ARM API.
if !isLocalInstance {
return az.vmSet.GetInstanceIDByNodeName(nodeName)
}
// Get resource group name.
resourceGroup := strings.ToLower(metadata.Compute.ResourceGroup)
// Compose instanceID based on nodeName for standard instance.
if az.VMType == vmTypeStandard {
return az.getStandardMachineID(resourceGroup, nodeName), nil
}
// Get scale set name and instanceID from vmName for vmss.
ssName, instanceID, err := extractVmssVMName(metadata.Compute.Name)
if err != nil {
if err == ErrorNotVmssInstance {
// Compose machineID for standard Node.
return az.getStandardMachineID(resourceGroup, nodeName), nil
}
return "", err
}
// Compose instanceID based on ssName and instanceID for vmss instance.
return az.getVmssMachineID(resourceGroup, ssName, instanceID), nil
}
return az.vmSet.GetInstanceIDByNodeName(nodeName)
}
// InstanceTypeByProviderID returns the cloudprovider instance type of the node with the specified unique providerID
// This method will not be called from the node that is requesting this ID. i.e. metadata service
// and other local methods cannot be used here
func (az *Cloud) InstanceTypeByProviderID(ctx context.Context, providerID string) (string, error) {
// Returns "" for unmanaged nodes because azure cloud provider couldn't fetch information for them.
if az.IsNodeUnmanagedByProviderID(providerID) {
klog.V(4).Infof("InstanceTypeByProviderID: omitting unmanaged node %q", providerID)
return "", nil
}
name, err := az.vmSet.GetNodeNameByProviderID(providerID)
if err != nil {
return "", err
}
return az.InstanceType(ctx, name)
}
// InstanceType returns the type of the specified instance.
// Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound)
// (Implementer Note): This is used by kubelet. Kubelet will label the node. Real log from kubelet:
// Adding node label from cloud provider: beta.kubernetes.io/instance-type=[value]
func (az *Cloud) InstanceType(ctx context.Context, name types.NodeName) (string, error) {
// Returns "" for unmanaged nodes because azure cloud provider couldn't fetch information for them.
unmanaged, err := az.IsNodeUnmanaged(string(name))
if err != nil {
return "", err
}
if unmanaged {
klog.V(4).Infof("InstanceType: omitting unmanaged node %q", name)
return "", nil
}
if az.UseInstanceMetadata {
metadata, err := az.metadata.GetMetadata()
if err != nil {
return "", err
}
if metadata.Compute == nil {
return "", fmt.Errorf("failure of getting instance metadata")
}
isLocalInstance, err := az.isCurrentInstance(name, metadata.Compute.Name)
if err != nil {
return "", err
}
if isLocalInstance {
if metadata.Compute.VMSize != "" {
return metadata.Compute.VMSize, nil
}
}
}
return az.vmSet.GetInstanceTypeByNodeName(string(name))
}
// AddSSHKeyToAllInstances adds an SSH public key as a legal identity for all instances
// expected format for the key is standard ssh-keygen format: <protocol> <blob>
func (az *Cloud) AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error {
return cloudprovider.NotImplemented
}
// CurrentNodeName returns the name of the node we are currently running on.
// On Azure this is the hostname, so we just return the hostname.
func (az *Cloud) CurrentNodeName(ctx context.Context, hostname string) (types.NodeName, error) {
return types.NodeName(hostname), nil
}
// mapNodeNameToVMName maps a k8s NodeName to an Azure VM Name
// This is a simple string cast.
func mapNodeNameToVMName(nodeName types.NodeName) string {
return string(nodeName)
}

View File

@@ -1,339 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"context"
"fmt"
"net"
"net/http"
"reflect"
"testing"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute"
"github.com/Azure/go-autorest/autorest/to"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
)
// setTestVirtualMachines sets test virtual machine with powerstate.
func setTestVirtualMachines(c *Cloud, vmList map[string]string) {
virtualMachineClient := c.VirtualMachinesClient.(*fakeAzureVirtualMachinesClient)
store := map[string]map[string]compute.VirtualMachine{
"rg": make(map[string]compute.VirtualMachine),
}
for nodeName, powerState := range vmList {
instanceID := fmt.Sprintf("/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/%s", nodeName)
vm := compute.VirtualMachine{
Name: &nodeName,
ID: &instanceID,
Location: &c.Location,
}
if powerState != "" {
status := []compute.InstanceViewStatus{
{
Code: to.StringPtr(powerState),
},
{
Code: to.StringPtr("ProvisioningState/succeeded"),
},
}
vm.VirtualMachineProperties = &compute.VirtualMachineProperties{
InstanceView: &compute.VirtualMachineInstanceView{
Statuses: &status,
},
}
}
store["rg"][nodeName] = vm
}
virtualMachineClient.setFakeStore(store)
}
func TestInstanceID(t *testing.T) {
cloud := getTestCloud()
testcases := []struct {
name string
vmList []string
nodeName string
metadataName string
expected string
expectError bool
}{
{
name: "InstanceID should get instanceID if node's name are equal to metadataName",
vmList: []string{"vm1"},
nodeName: "vm1",
metadataName: "vm1",
expected: "/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm1",
},
{
name: "InstanceID should get instanceID from Azure API if node is not local instance",
vmList: []string{"vm2"},
nodeName: "vm2",
metadataName: "vm1",
expected: "/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm2",
},
{
name: "InstanceID should report error if VM doesn't exist",
vmList: []string{"vm1"},
nodeName: "vm3",
expectError: true,
},
}
for _, test := range testcases {
listener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Errorf("Test [%s] unexpected error: %v", test.name, err)
}
mux := http.NewServeMux()
mux.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, fmt.Sprintf(`{"compute":{"name":"%s"}}`, test.metadataName))
}))
go func() {
http.Serve(listener, mux)
}()
defer listener.Close()
cloud.metadata, err = NewInstanceMetadataService("http://" + listener.Addr().String() + "/")
if err != nil {
t.Errorf("Test [%s] unexpected error: %v", test.name, err)
}
vmListWithPowerState := make(map[string]string)
for _, vm := range test.vmList {
vmListWithPowerState[vm] = ""
}
setTestVirtualMachines(cloud, vmListWithPowerState)
instanceID, err := cloud.InstanceID(context.Background(), types.NodeName(test.nodeName))
if test.expectError {
if err == nil {
t.Errorf("Test [%s] unexpected nil err", test.name)
}
} else {
if err != nil {
t.Errorf("Test [%s] unexpected error: %v", test.name, err)
}
}
if instanceID != test.expected {
t.Errorf("Test [%s] unexpected instanceID: %s, expected %q", test.name, instanceID, test.expected)
}
}
}
func TestInstanceShutdownByProviderID(t *testing.T) {
testcases := []struct {
name string
vmList map[string]string
nodeName string
expected bool
expectError bool
}{
{
name: "InstanceShutdownByProviderID should return false if the vm is in PowerState/Running status",
vmList: map[string]string{"vm1": "PowerState/Running"},
nodeName: "vm1",
expected: false,
},
{
name: "InstanceShutdownByProviderID should return true if the vm is in PowerState/Deallocated status",
vmList: map[string]string{"vm2": "PowerState/Deallocated"},
nodeName: "vm2",
expected: true,
},
{
name: "InstanceShutdownByProviderID should return false if the vm is in PowerState/Deallocating status",
vmList: map[string]string{"vm3": "PowerState/Deallocating"},
nodeName: "vm3",
expected: false,
},
{
name: "InstanceShutdownByProviderID should return false if the vm is in PowerState/Starting status",
vmList: map[string]string{"vm4": "PowerState/Starting"},
nodeName: "vm4",
expected: false,
},
{
name: "InstanceShutdownByProviderID should return true if the vm is in PowerState/Stopped status",
vmList: map[string]string{"vm5": "PowerState/Stopped"},
nodeName: "vm5",
expected: true,
},
{
name: "InstanceShutdownByProviderID should return false if the vm is in PowerState/Stopping status",
vmList: map[string]string{"vm6": "PowerState/Stopping"},
nodeName: "vm6",
expected: false,
},
{
name: "InstanceShutdownByProviderID should return false if the vm is in PowerState/Unknown status",
vmList: map[string]string{"vm7": "PowerState/Unknown"},
nodeName: "vm7",
expected: false,
},
{
name: "InstanceShutdownByProviderID should report error if VM doesn't exist",
vmList: map[string]string{"vm1": "PowerState/running"},
nodeName: "vm8",
expectError: true,
},
}
for _, test := range testcases {
cloud := getTestCloud()
setTestVirtualMachines(cloud, test.vmList)
providerID := "azure://" + cloud.getStandardMachineID("rg", test.nodeName)
hasShutdown, err := cloud.InstanceShutdownByProviderID(context.Background(), providerID)
if test.expectError {
if err == nil {
t.Errorf("Test [%s] unexpected nil err", test.name)
}
} else {
if err != nil {
t.Errorf("Test [%s] unexpected error: %v", test.name, err)
}
}
if hasShutdown != test.expected {
t.Errorf("Test [%s] unexpected hasShutdown: %v, expected %v", test.name, hasShutdown, test.expected)
}
}
}
func TestNodeAddresses(t *testing.T) {
cloud := getTestCloud()
cloud.Config.UseInstanceMetadata = true
metadataTemplate := `{"compute":{"name":"%s"},"network":{"interface":[{"ipv4":{"ipAddress":[{"privateIpAddress":"%s","publicIpAddress":"%s"}]},"ipv6":{"ipAddress":[{"privateIpAddress":"%s","publicIpAddress":"%s"}]}}]}}`
testcases := []struct {
name string
nodeName string
ipV4 string
ipV6 string
ipV4Public string
ipV6Public string
expected []v1.NodeAddress
expectError bool
}{
{
name: "NodeAddresses should get both ipV4 and ipV6 private addresses",
nodeName: "vm1",
ipV4: "10.240.0.1",
ipV6: "1111:11111:00:00:1111:1111:000:111",
expected: []v1.NodeAddress{
{
Type: v1.NodeHostName,
Address: "vm1",
},
{
Type: v1.NodeInternalIP,
Address: "10.240.0.1",
},
{
Type: v1.NodeInternalIP,
Address: "1111:11111:00:00:1111:1111:000:111",
},
},
},
{
name: "NodeAddresses should report error when IPs are empty",
nodeName: "vm1",
expectError: true,
},
{
name: "NodeAddresses should get ipV4 private and public addresses",
nodeName: "vm1",
ipV4: "10.240.0.1",
ipV4Public: "9.9.9.9",
expected: []v1.NodeAddress{
{
Type: v1.NodeHostName,
Address: "vm1",
},
{
Type: v1.NodeInternalIP,
Address: "10.240.0.1",
},
{
Type: v1.NodeExternalIP,
Address: "9.9.9.9",
},
},
},
{
name: "NodeAddresses should get ipV6 private and public addresses",
nodeName: "vm1",
ipV6: "1111:11111:00:00:1111:1111:000:111",
ipV6Public: "2222:22221:00:00:2222:2222:000:111",
expected: []v1.NodeAddress{
{
Type: v1.NodeHostName,
Address: "vm1",
},
{
Type: v1.NodeInternalIP,
Address: "1111:11111:00:00:1111:1111:000:111",
},
{
Type: v1.NodeExternalIP,
Address: "2222:22221:00:00:2222:2222:000:111",
},
},
},
}
for _, test := range testcases {
listener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Errorf("Test [%s] unexpected error: %v", test.name, err)
}
mux := http.NewServeMux()
mux.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, fmt.Sprintf(metadataTemplate, test.nodeName, test.ipV4, test.ipV4Public, test.ipV6, test.ipV6Public))
}))
go func() {
http.Serve(listener, mux)
}()
defer listener.Close()
cloud.metadata, err = NewInstanceMetadataService("http://" + listener.Addr().String() + "/")
if err != nil {
t.Errorf("Test [%s] unexpected error: %v", test.name, err)
}
ipAddresses, err := cloud.NodeAddresses(context.Background(), types.NodeName(test.nodeName))
if test.expectError {
if err == nil {
t.Errorf("Test [%s] unexpected nil err", test.name)
}
} else {
if err != nil {
t.Errorf("Test [%s] unexpected error: %v", test.name, err)
}
}
if !reflect.DeepEqual(ipAddresses, test.expected) {
t.Errorf("Test [%s] unexpected ipAddresses: %s, expected %q", test.name, ipAddresses, test.expected)
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,370 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"context"
"fmt"
"reflect"
"testing"
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
"github.com/Azure/go-autorest/autorest/to"
"github.com/stretchr/testify/assert"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestFindProbe(t *testing.T) {
tests := []struct {
msg string
existingProbe []network.Probe
curProbe network.Probe
expected bool
}{
{
msg: "empty existing probes should return false",
expected: false,
},
{
msg: "probe names match while ports unmatch should return false",
existingProbe: []network.Probe{
{
Name: to.StringPtr("httpProbe"),
ProbePropertiesFormat: &network.ProbePropertiesFormat{
Port: to.Int32Ptr(1),
},
},
},
curProbe: network.Probe{
Name: to.StringPtr("httpProbe"),
ProbePropertiesFormat: &network.ProbePropertiesFormat{
Port: to.Int32Ptr(2),
},
},
expected: false,
},
{
msg: "probe ports match while names unmatch should return false",
existingProbe: []network.Probe{
{
Name: to.StringPtr("probe1"),
ProbePropertiesFormat: &network.ProbePropertiesFormat{
Port: to.Int32Ptr(1),
},
},
},
curProbe: network.Probe{
Name: to.StringPtr("probe2"),
ProbePropertiesFormat: &network.ProbePropertiesFormat{
Port: to.Int32Ptr(1),
},
},
expected: false,
},
{
msg: "both probe ports and names match should return true",
existingProbe: []network.Probe{
{
Name: to.StringPtr("matchName"),
ProbePropertiesFormat: &network.ProbePropertiesFormat{
Port: to.Int32Ptr(1),
},
},
},
curProbe: network.Probe{
Name: to.StringPtr("matchName"),
ProbePropertiesFormat: &network.ProbePropertiesFormat{
Port: to.Int32Ptr(1),
},
},
expected: true,
},
}
for i, test := range tests {
findResult := findProbe(test.existingProbe, test.curProbe)
assert.Equal(t, test.expected, findResult, fmt.Sprintf("TestCase[%d]: %s", i, test.msg))
}
}
func TestFindRule(t *testing.T) {
tests := []struct {
msg string
existingRule []network.LoadBalancingRule
curRule network.LoadBalancingRule
expected bool
}{
{
msg: "empty existing rules should return false",
expected: false,
},
{
msg: "rule names unmatch should return false",
existingRule: []network.LoadBalancingRule{
{
Name: to.StringPtr("httpProbe1"),
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
FrontendPort: to.Int32Ptr(1),
},
},
},
curRule: network.LoadBalancingRule{
Name: to.StringPtr("httpProbe2"),
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
FrontendPort: to.Int32Ptr(1),
},
},
expected: false,
},
{
msg: "rule names match while frontend ports unmatch should return false",
existingRule: []network.LoadBalancingRule{
{
Name: to.StringPtr("httpProbe"),
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
FrontendPort: to.Int32Ptr(1),
},
},
},
curRule: network.LoadBalancingRule{
Name: to.StringPtr("httpProbe"),
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
FrontendPort: to.Int32Ptr(2),
},
},
expected: false,
},
{
msg: "rule names match while backend ports unmatch should return false",
existingRule: []network.LoadBalancingRule{
{
Name: to.StringPtr("httpProbe"),
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
BackendPort: to.Int32Ptr(1),
},
},
},
curRule: network.LoadBalancingRule{
Name: to.StringPtr("httpProbe"),
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
BackendPort: to.Int32Ptr(2),
},
},
expected: false,
},
{
msg: "rule names match while LoadDistribution unmatch should return false",
existingRule: []network.LoadBalancingRule{
{
Name: to.StringPtr("probe1"),
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
LoadDistribution: network.Default,
},
},
},
curRule: network.LoadBalancingRule{
Name: to.StringPtr("probe2"),
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
LoadDistribution: network.SourceIP,
},
},
expected: false,
},
{
msg: "both rule names and LoadBalancingRulePropertiesFormats match should return true",
existingRule: []network.LoadBalancingRule{
{
Name: to.StringPtr("matchName"),
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
BackendPort: to.Int32Ptr(2),
FrontendPort: to.Int32Ptr(2),
LoadDistribution: network.SourceIP,
},
},
},
curRule: network.LoadBalancingRule{
Name: to.StringPtr("matchName"),
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
BackendPort: to.Int32Ptr(2),
FrontendPort: to.Int32Ptr(2),
LoadDistribution: network.SourceIP,
},
},
expected: true,
},
}
for i, test := range tests {
findResult := findRule(test.existingRule, test.curRule, true)
assert.Equal(t, test.expected, findResult, fmt.Sprintf("TestCase[%d]: %s", i, test.msg))
}
}
func TestGetIdleTimeout(t *testing.T) {
for _, c := range []struct {
desc string
annotations map[string]string
i *int32
err bool
}{
{desc: "no annotation"},
{desc: "annotation empty value", annotations: map[string]string{ServiceAnnotationLoadBalancerIdleTimeout: ""}, err: true},
{desc: "annotation not a number", annotations: map[string]string{ServiceAnnotationLoadBalancerIdleTimeout: "cookies"}, err: true},
{desc: "annotation negative value", annotations: map[string]string{ServiceAnnotationLoadBalancerIdleTimeout: "-6"}, err: true},
{desc: "annotation zero value", annotations: map[string]string{ServiceAnnotationLoadBalancerIdleTimeout: "0"}, err: true},
{desc: "annotation too low value", annotations: map[string]string{ServiceAnnotationLoadBalancerIdleTimeout: "3"}, err: true},
{desc: "annotation too high value", annotations: map[string]string{ServiceAnnotationLoadBalancerIdleTimeout: "31"}, err: true},
{desc: "annotation good value", annotations: map[string]string{ServiceAnnotationLoadBalancerIdleTimeout: "24"}, i: to.Int32Ptr(24)},
} {
t.Run(c.desc, func(t *testing.T) {
s := &v1.Service{}
s.Annotations = c.annotations
i, err := getIdleTimeout(s)
if !reflect.DeepEqual(c.i, i) {
t.Fatalf("got unexpected value: %d", to.Int32(i))
}
if (err != nil) != c.err {
t.Fatalf("expected error=%v, got %v", c.err, err)
}
})
}
}
func TestSubnet(t *testing.T) {
for i, c := range []struct {
desc string
service *v1.Service
expected *string
}{
{
desc: "No annotation should return nil",
service: &v1.Service{},
expected: nil,
},
{
desc: "annotation with subnet but no ILB should return nil",
service: &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
ServiceAnnotationLoadBalancerInternalSubnet: "subnet",
},
},
},
expected: nil,
},
{
desc: "annotation with subnet but ILB=false should return nil",
service: &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
ServiceAnnotationLoadBalancerInternalSubnet: "subnet",
ServiceAnnotationLoadBalancerInternal: "false",
},
},
},
expected: nil,
},
{
desc: "annotation with empty subnet should return nil",
service: &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
ServiceAnnotationLoadBalancerInternalSubnet: "",
ServiceAnnotationLoadBalancerInternal: "true",
},
},
},
expected: nil,
},
{
desc: "annotation with subnet and ILB should return subnet",
service: &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
ServiceAnnotationLoadBalancerInternalSubnet: "subnet",
ServiceAnnotationLoadBalancerInternal: "true",
},
},
},
expected: to.StringPtr("subnet"),
},
} {
real := subnet(c.service)
assert.Equal(t, c.expected, real, fmt.Sprintf("TestCase[%d]: %s", i, c.desc))
}
}
func TestEnsureLoadBalancerDeleted(t *testing.T) {
const vmCount = 8
const availabilitySetCount = 4
const serviceCount = 9
tests := []struct {
desc string
service v1.Service
expectCreateError bool
}{
{
desc: "external service should be created and deleted successfully",
service: getTestService("test1", v1.ProtocolTCP, 80),
},
{
desc: "internal service should be created and deleted successfully",
service: getInternalTestService("test2", 80),
},
{
desc: "annotated service with same resourceGroup should be created and deleted successfully",
service: getResourceGroupTestService("test3", "rg", "", 80),
},
{
desc: "annotated service with different resourceGroup shouldn't be created but should be deleted successfully",
service: getResourceGroupTestService("test4", "random-rg", "1.2.3.4", 80),
expectCreateError: true,
},
}
az := getTestCloud()
for i, c := range tests {
clusterResources := getClusterResources(az, vmCount, availabilitySetCount)
getTestSecurityGroup(az)
if c.service.Annotations[ServiceAnnotationLoadBalancerInternal] == "true" {
addTestSubnet(t, az, &c.service)
}
// create the service first.
lbStatus, err := az.EnsureLoadBalancer(context.TODO(), testClusterName, &c.service, clusterResources.nodes)
if c.expectCreateError {
assert.NotNil(t, err, "TestCase[%d]: %s", i, c.desc)
} else {
assert.Nil(t, err, "TestCase[%d]: %s", i, c.desc)
assert.NotNil(t, lbStatus, "TestCase[%d]: %s", i, c.desc)
result, err := az.LoadBalancerClient.List(context.TODO(), az.Config.ResourceGroup)
assert.Nil(t, err, "TestCase[%d]: %s", i, c.desc)
assert.Equal(t, len(result), 1, "TestCase[%d]: %s", i, c.desc)
assert.Equal(t, len(*result[0].LoadBalancingRules), 1, "TestCase[%d]: %s", i, c.desc)
}
// finally, delete it.
err = az.EnsureLoadBalancerDeleted(context.TODO(), testClusterName, &c.service)
assert.Nil(t, err, "TestCase[%d]: %s", i, c.desc)
result, err := az.LoadBalancerClient.List(context.Background(), az.Config.ResourceGroup)
assert.Nil(t, err, "TestCase[%d]: %s", i, c.desc)
assert.Equal(t, len(result), 0, "TestCase[%d]: %s", i, c.desc)
}
}

View File

@@ -1,324 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"context"
"fmt"
"path"
"strconv"
"strings"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute"
"github.com/Azure/go-autorest/autorest/to"
"k8s.io/klog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
kwait "k8s.io/apimachinery/pkg/util/wait"
cloudvolume "k8s.io/cloud-provider/volume"
volumehelpers "k8s.io/cloud-provider/volume/helpers"
)
const (
// default IOPS Caps & Throughput Cap (MBps) per https://docs.microsoft.com/en-us/azure/virtual-machines/linux/disks-ultra-ssd
defaultDiskIOPSReadWrite = 500
defaultDiskMBpsReadWrite = 100
)
//ManagedDiskController : managed disk controller struct
type ManagedDiskController struct {
common *controllerCommon
}
// ManagedDiskOptions specifies the options of managed disks.
type ManagedDiskOptions struct {
// The name of the disk.
DiskName string
// The size in GB.
SizeGB int
// The name of PVC.
PVCName string
// The name of resource group.
ResourceGroup string
// The AvailabilityZone to create the disk.
AvailabilityZone string
// The tags of the disk.
Tags map[string]string
// The SKU of storage account.
StorageAccountType compute.DiskStorageAccountTypes
// IOPS Caps for UltraSSD disk
DiskIOPSReadWrite string
// Throughput Cap (MBps) for UltraSSD disk
DiskMBpsReadWrite string
}
//CreateManagedDisk : create managed disk
func (c *ManagedDiskController) CreateManagedDisk(options *ManagedDiskOptions) (string, error) {
var err error
klog.V(4).Infof("azureDisk - creating new managed Name:%s StorageAccountType:%s Size:%v", options.DiskName, options.StorageAccountType, options.SizeGB)
var createZones *[]string
if len(options.AvailabilityZone) > 0 {
zoneList := []string{c.common.cloud.GetZoneID(options.AvailabilityZone)}
createZones = &zoneList
}
// insert original tags to newTags
newTags := make(map[string]*string)
azureDDTag := "kubernetes-azure-dd"
newTags["created-by"] = &azureDDTag
if options.Tags != nil {
for k, v := range options.Tags {
// Azure won't allow / (forward slash) in tags
newKey := strings.Replace(k, "/", "-", -1)
newValue := strings.Replace(v, "/", "-", -1)
newTags[newKey] = &newValue
}
}
diskSizeGB := int32(options.SizeGB)
diskSku := compute.DiskStorageAccountTypes(options.StorageAccountType)
diskProperties := compute.DiskProperties{
DiskSizeGB: &diskSizeGB,
CreationData: &compute.CreationData{CreateOption: compute.Empty},
}
if diskSku == compute.UltraSSDLRS {
diskIOPSReadWrite := int64(defaultDiskIOPSReadWrite)
if options.DiskIOPSReadWrite != "" {
v, err := strconv.Atoi(options.DiskIOPSReadWrite)
if err != nil {
return "", fmt.Errorf("AzureDisk - failed to parse DiskIOPSReadWrite: %v", err)
}
diskIOPSReadWrite = int64(v)
}
diskProperties.DiskIOPSReadWrite = to.Int64Ptr(diskIOPSReadWrite)
diskMBpsReadWrite := int32(defaultDiskMBpsReadWrite)
if options.DiskMBpsReadWrite != "" {
v, err := strconv.Atoi(options.DiskMBpsReadWrite)
if err != nil {
return "", fmt.Errorf("AzureDisk - failed to parse DiskMBpsReadWrite: %v", err)
}
diskMBpsReadWrite = int32(v)
}
diskProperties.DiskMBpsReadWrite = to.Int32Ptr(diskMBpsReadWrite)
} else {
if options.DiskIOPSReadWrite != "" {
return "", fmt.Errorf("AzureDisk - DiskIOPSReadWrite parameter is only applicable in UltraSSD_LRS disk type")
}
if options.DiskMBpsReadWrite != "" {
return "", fmt.Errorf("AzureDisk - DiskMBpsReadWrite parameter is only applicable in UltraSSD_LRS disk type")
}
}
model := compute.Disk{
Location: &c.common.location,
Tags: newTags,
Zones: createZones,
Sku: &compute.DiskSku{
Name: diskSku,
},
DiskProperties: &diskProperties,
}
if options.ResourceGroup == "" {
options.ResourceGroup = c.common.resourceGroup
}
ctx, cancel := getContextWithCancel()
defer cancel()
_, err = c.common.cloud.DisksClient.CreateOrUpdate(ctx, options.ResourceGroup, options.DiskName, model)
if err != nil {
return "", err
}
diskID := ""
err = kwait.ExponentialBackoff(defaultBackOff, func() (bool, error) {
provisionState, id, err := c.GetDisk(options.ResourceGroup, options.DiskName)
diskID = id
// We are waiting for provisioningState==Succeeded
// We don't want to hand-off managed disks to k8s while they are
//still being provisioned, this is to avoid some race conditions
if err != nil {
return false, err
}
if strings.ToLower(provisionState) == "succeeded" {
return true, nil
}
return false, nil
})
if err != nil {
klog.V(2).Infof("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v but was unable to confirm provisioningState in poll process", options.DiskName, options.StorageAccountType, options.SizeGB)
} else {
klog.V(2).Infof("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v", options.DiskName, options.StorageAccountType, options.SizeGB)
}
return diskID, nil
}
//DeleteManagedDisk : delete managed disk
func (c *ManagedDiskController) DeleteManagedDisk(diskURI string) error {
diskName := path.Base(diskURI)
resourceGroup, err := getResourceGroupFromDiskURI(diskURI)
if err != nil {
return err
}
ctx, cancel := getContextWithCancel()
defer cancel()
_, err = c.common.cloud.DisksClient.Delete(ctx, resourceGroup, diskName)
if err != nil {
return err
}
// We don't need poll here, k8s will immediately stop referencing the disk
// the disk will be eventually deleted - cleanly - by ARM
klog.V(2).Infof("azureDisk - deleted a managed disk: %s", diskURI)
return nil
}
// GetDisk return: disk provisionState, diskID, error
func (c *ManagedDiskController) GetDisk(resourceGroup, diskName string) (string, string, error) {
ctx, cancel := getContextWithCancel()
defer cancel()
result, err := c.common.cloud.DisksClient.Get(ctx, resourceGroup, diskName)
if err != nil {
return "", "", err
}
if result.DiskProperties != nil && (*result.DiskProperties).ProvisioningState != nil {
return *(*result.DiskProperties).ProvisioningState, *result.ID, nil
}
return "", "", err
}
// ResizeDisk Expand the disk to new size
func (c *ManagedDiskController) ResizeDisk(diskURI string, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error) {
ctx, cancel := getContextWithCancel()
defer cancel()
diskName := path.Base(diskURI)
resourceGroup, err := getResourceGroupFromDiskURI(diskURI)
if err != nil {
return oldSize, err
}
result, err := c.common.cloud.DisksClient.Get(ctx, resourceGroup, diskName)
if err != nil {
return oldSize, err
}
if result.DiskProperties == nil || result.DiskProperties.DiskSizeGB == nil {
return oldSize, fmt.Errorf("DiskProperties of disk(%s) is nil", diskName)
}
// Azure resizes in chunks of GiB (not GB)
requestGiB := int32(volumehelpers.RoundUpToGiB(newSize))
newSizeQuant := resource.MustParse(fmt.Sprintf("%dGi", requestGiB))
klog.V(2).Infof("azureDisk - begin to resize disk(%s) with new size(%d), old size(%v)", diskName, requestGiB, oldSize)
// If disk already of greater or equal size than requested we return
if *result.DiskProperties.DiskSizeGB >= requestGiB {
return newSizeQuant, nil
}
result.DiskProperties.DiskSizeGB = &requestGiB
ctx, cancel = getContextWithCancel()
defer cancel()
if _, err := c.common.cloud.DisksClient.CreateOrUpdate(ctx, resourceGroup, diskName, result); err != nil {
return oldSize, err
}
klog.V(2).Infof("azureDisk - resize disk(%s) with new size(%d) completed", diskName, requestGiB)
return newSizeQuant, nil
}
// get resource group name from a managed disk URI, e.g. return {group-name} according to
// /subscriptions/{sub-id}/resourcegroups/{group-name}/providers/microsoft.compute/disks/{disk-id}
// according to https://docs.microsoft.com/en-us/rest/api/compute/disks/get
func getResourceGroupFromDiskURI(diskURI string) (string, error) {
fields := strings.Split(diskURI, "/")
if len(fields) != 9 || fields[3] != "resourceGroups" {
return "", fmt.Errorf("invalid disk URI: %s", diskURI)
}
return fields[4], nil
}
// GetLabelsForVolume implements PVLabeler.GetLabelsForVolume
func (c *Cloud) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVolume) (map[string]string, error) {
// Ignore if not AzureDisk.
if pv.Spec.AzureDisk == nil {
return nil, nil
}
// Ignore any volumes that are being provisioned
if pv.Spec.AzureDisk.DiskName == cloudvolume.ProvisionedVolumeName {
return nil, nil
}
return c.GetAzureDiskLabels(pv.Spec.AzureDisk.DataDiskURI)
}
// GetAzureDiskLabels gets availability zone labels for Azuredisk.
func (c *Cloud) GetAzureDiskLabels(diskURI string) (map[string]string, error) {
// Get disk's resource group.
diskName := path.Base(diskURI)
resourceGroup, err := getResourceGroupFromDiskURI(diskURI)
if err != nil {
klog.Errorf("Failed to get resource group for AzureDisk %q: %v", diskName, err)
return nil, err
}
// Get information of the disk.
ctx, cancel := getContextWithCancel()
defer cancel()
disk, err := c.DisksClient.Get(ctx, resourceGroup, diskName)
if err != nil {
klog.Errorf("Failed to get information for AzureDisk %q: %v", diskName, err)
return nil, err
}
// Check whether availability zone is specified.
if disk.Zones == nil || len(*disk.Zones) == 0 {
klog.V(4).Infof("Azure disk %q is not zoned", diskName)
return nil, nil
}
zones := *disk.Zones
zoneID, err := strconv.Atoi(zones[0])
if err != nil {
return nil, fmt.Errorf("failed to parse zone %v for AzureDisk %v: %v", zones, diskName, err)
}
zone := c.makeZone(zoneID)
klog.V(4).Infof("Got zone %q for Azure disk %q", zone, diskName)
labels := map[string]string{
v1.LabelZoneRegion: c.Location,
v1.LabelZoneFailureDomain: zone,
}
return labels, nil
}

View File

@@ -1,83 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"strings"
"time"
"github.com/prometheus/client_golang/prometheus"
)
type apiCallMetrics struct {
latency *prometheus.HistogramVec
errors *prometheus.CounterVec
}
var (
metricLabels = []string{
"request", // API function that is being invoked
"resource_group", // Resource group of the resource being monitored
"subscription_id", // Subscription ID of the resource being monitored
}
apiMetrics = registerAPIMetrics(metricLabels...)
)
type metricContext struct {
start time.Time
attributes []string
}
func newMetricContext(prefix, request, resourceGroup, subscriptionID string) *metricContext {
return &metricContext{
start: time.Now(),
attributes: []string{prefix + "_" + request, strings.ToLower(resourceGroup), subscriptionID},
}
}
func (mc *metricContext) Observe(err error) {
apiMetrics.latency.WithLabelValues(mc.attributes...).Observe(
time.Since(mc.start).Seconds())
if err != nil {
apiMetrics.errors.WithLabelValues(mc.attributes...).Inc()
}
}
func registerAPIMetrics(attributes ...string) *apiCallMetrics {
metrics := &apiCallMetrics{
latency: prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "cloudprovider_azure_api_request_duration_seconds",
Help: "Latency of an Azure API call",
},
attributes,
),
errors: prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "cloudprovider_azure_api_request_errors",
Help: "Number of errors for an Azure API call",
},
attributes,
),
}
prometheus.MustRegister(metrics.latency)
prometheus.MustRegister(metrics.errors)
return metrics
}

View File

@@ -1,39 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestAzureMetricLabelCardinality(t *testing.T) {
mc := newMetricContext("test", "create", "resource_group", "subscription_id")
assert.Len(t, mc.attributes, len(metricLabels), "cardinalities of labels and values must match")
}
func TestAzureMetricLabelPrefix(t *testing.T) {
mc := newMetricContext("prefix", "request", "resource_group", "subscription_id")
found := false
for _, attribute := range mc.attributes {
if attribute == "prefix_request" {
found = true
}
}
assert.True(t, found, "request label must be prefixed")
}

View File

@@ -1,204 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"context"
"fmt"
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
"github.com/Azure/go-autorest/autorest/to"
"k8s.io/apimachinery/pkg/types"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/klog"
)
// ListRoutes lists all managed routes that belong to the specified clusterName
func (az *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudprovider.Route, error) {
klog.V(10).Infof("ListRoutes: START clusterName=%q", clusterName)
routeTable, existsRouteTable, err := az.getRouteTable()
routes, err := processRoutes(routeTable, existsRouteTable, err)
if err != nil {
return nil, err
}
// Compose routes for unmanaged routes so that node controller won't retry creating routes for them.
unmanagedNodes, err := az.GetUnmanagedNodes()
if err != nil {
return nil, err
}
az.routeCIDRsLock.Lock()
defer az.routeCIDRsLock.Unlock()
for _, nodeName := range unmanagedNodes.List() {
if cidr, ok := az.routeCIDRs[nodeName]; ok {
routes = append(routes, &cloudprovider.Route{
Name: nodeName,
TargetNode: mapRouteNameToNodeName(nodeName),
DestinationCIDR: cidr,
})
}
}
return routes, nil
}
// Injectable for testing
func processRoutes(routeTable network.RouteTable, exists bool, err error) ([]*cloudprovider.Route, error) {
if err != nil {
return nil, err
}
if !exists {
return []*cloudprovider.Route{}, nil
}
var kubeRoutes []*cloudprovider.Route
if routeTable.RouteTablePropertiesFormat != nil && routeTable.Routes != nil {
kubeRoutes = make([]*cloudprovider.Route, len(*routeTable.Routes))
for i, route := range *routeTable.Routes {
instance := mapRouteNameToNodeName(*route.Name)
cidr := *route.AddressPrefix
klog.V(10).Infof("ListRoutes: * instance=%q, cidr=%q", instance, cidr)
kubeRoutes[i] = &cloudprovider.Route{
Name: *route.Name,
TargetNode: instance,
DestinationCIDR: cidr,
}
}
}
klog.V(10).Info("ListRoutes: FINISH")
return kubeRoutes, nil
}
func (az *Cloud) createRouteTableIfNotExists(clusterName string, kubeRoute *cloudprovider.Route) error {
if _, existsRouteTable, err := az.getRouteTable(); err != nil {
klog.V(2).Infof("createRouteTableIfNotExists error: couldn't get routetable. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
return err
} else if existsRouteTable {
return nil
}
return az.createRouteTable()
}
func (az *Cloud) createRouteTable() error {
routeTable := network.RouteTable{
Name: to.StringPtr(az.RouteTableName),
Location: to.StringPtr(az.Location),
RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{},
}
klog.V(3).Infof("createRouteTableIfNotExists: creating routetable. routeTableName=%q", az.RouteTableName)
err := az.CreateOrUpdateRouteTable(routeTable)
if err != nil {
return err
}
// Invalidate the cache right after updating
az.rtCache.Delete(az.RouteTableName)
return nil
}
// CreateRoute creates the described managed route
// route.Name will be ignored, although the cloud-provider may use nameHint
// to create a more user-meaningful name.
func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint string, kubeRoute *cloudprovider.Route) error {
// Returns for unmanaged nodes because azure cloud provider couldn't fetch information for them.
nodeName := string(kubeRoute.TargetNode)
unmanaged, err := az.IsNodeUnmanaged(nodeName)
if err != nil {
return err
}
if unmanaged {
klog.V(2).Infof("CreateRoute: omitting unmanaged node %q", kubeRoute.TargetNode)
az.routeCIDRsLock.Lock()
defer az.routeCIDRsLock.Unlock()
az.routeCIDRs[nodeName] = kubeRoute.DestinationCIDR
return nil
}
klog.V(2).Infof("CreateRoute: creating route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
if err := az.createRouteTableIfNotExists(clusterName, kubeRoute); err != nil {
return err
}
targetIP, _, err := az.getIPForMachine(kubeRoute.TargetNode)
if err != nil {
return err
}
routeName := mapNodeNameToRouteName(kubeRoute.TargetNode)
route := network.Route{
Name: to.StringPtr(routeName),
RoutePropertiesFormat: &network.RoutePropertiesFormat{
AddressPrefix: to.StringPtr(kubeRoute.DestinationCIDR),
NextHopType: network.RouteNextHopTypeVirtualAppliance,
NextHopIPAddress: to.StringPtr(targetIP),
},
}
klog.V(3).Infof("CreateRoute: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
err = az.CreateOrUpdateRoute(route)
if err != nil {
return err
}
klog.V(2).Infof("CreateRoute: route created. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
return nil
}
// DeleteRoute deletes the specified managed route
// Route should be as returned by ListRoutes
func (az *Cloud) DeleteRoute(ctx context.Context, clusterName string, kubeRoute *cloudprovider.Route) error {
// Returns for unmanaged nodes because azure cloud provider couldn't fetch information for them.
nodeName := string(kubeRoute.TargetNode)
unmanaged, err := az.IsNodeUnmanaged(nodeName)
if err != nil {
return err
}
if unmanaged {
klog.V(2).Infof("DeleteRoute: omitting unmanaged node %q", kubeRoute.TargetNode)
az.routeCIDRsLock.Lock()
defer az.routeCIDRsLock.Unlock()
delete(az.routeCIDRs, nodeName)
return nil
}
klog.V(2).Infof("DeleteRoute: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
routeName := mapNodeNameToRouteName(kubeRoute.TargetNode)
err = az.DeleteRouteWithName(routeName)
if err != nil {
return err
}
klog.V(2).Infof("DeleteRoute: route deleted. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
return nil
}
// This must be kept in sync with mapRouteNameToNodeName.
// These two functions enable stashing the instance name in the route
// and then retrieving it later when listing. This is needed because
// Azure does not let you put tags/descriptions on the Route itself.
func mapNodeNameToRouteName(nodeName types.NodeName) string {
return fmt.Sprintf("%s", nodeName)
}
// Used with mapNodeNameToRouteName. See comment on mapNodeNameToRouteName.
func mapRouteNameToNodeName(routeName string) types.NodeName {
return types.NodeName(fmt.Sprintf("%s", routeName))
}

View File

@@ -1,392 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"context"
"fmt"
"reflect"
"testing"
"k8s.io/apimachinery/pkg/util/sets"
cloudprovider "k8s.io/cloud-provider"
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
"github.com/Azure/go-autorest/autorest/to"
)
func TestDeleteRoute(t *testing.T) {
fakeRoutes := newFakeRoutesClient()
cloud := &Cloud{
RoutesClient: fakeRoutes,
Config: Config{
RouteTableResourceGroup: "foo",
RouteTableName: "bar",
Location: "location",
},
unmanagedNodes: sets.NewString(),
nodeInformerSynced: func() bool { return true },
}
route := cloudprovider.Route{TargetNode: "node", DestinationCIDR: "1.2.3.4/24"}
routeName := mapNodeNameToRouteName(route.TargetNode)
fakeRoutes.FakeStore = map[string]map[string]network.Route{
cloud.RouteTableName: {
routeName: {},
},
}
err := cloud.DeleteRoute(context.TODO(), "cluster", &route)
if err != nil {
t.Errorf("unexpected error deleting route: %v", err)
t.FailNow()
}
mp, found := fakeRoutes.FakeStore[cloud.RouteTableName]
if !found {
t.Errorf("unexpected missing item for %s", cloud.RouteTableName)
t.FailNow()
}
ob, found := mp[routeName]
if found {
t.Errorf("unexpectedly found: %v that should have been deleted.", ob)
t.FailNow()
}
// test delete route for unmanaged nodes.
nodeName := "node1"
nodeCIDR := "4.3.2.1/24"
cloud.unmanagedNodes.Insert(nodeName)
cloud.routeCIDRs = map[string]string{
nodeName: nodeCIDR,
}
route1 := cloudprovider.Route{
TargetNode: mapRouteNameToNodeName(nodeName),
DestinationCIDR: nodeCIDR,
}
err = cloud.DeleteRoute(context.TODO(), "cluster", &route1)
if err != nil {
t.Errorf("unexpected error deleting route: %v", err)
t.FailNow()
}
cidr, found := cloud.routeCIDRs[nodeName]
if found {
t.Errorf("unexpected CIDR item (%q) for %s", cidr, nodeName)
}
}
func TestCreateRoute(t *testing.T) {
fakeTable := newFakeRouteTablesClient()
fakeVM := &fakeVMSet{}
fakeRoutes := newFakeRoutesClient()
cloud := &Cloud{
RouteTablesClient: fakeTable,
RoutesClient: fakeRoutes,
vmSet: fakeVM,
Config: Config{
RouteTableResourceGroup: "foo",
RouteTableName: "bar",
Location: "location",
},
unmanagedNodes: sets.NewString(),
nodeInformerSynced: func() bool { return true },
}
cache, _ := cloud.newRouteTableCache()
cloud.rtCache = cache
expectedTable := network.RouteTable{
Name: &cloud.RouteTableName,
Location: &cloud.Location,
}
fakeTable.FakeStore = map[string]map[string]network.RouteTable{}
fakeTable.FakeStore[cloud.RouteTableResourceGroup] = map[string]network.RouteTable{
cloud.RouteTableName: expectedTable,
}
route := cloudprovider.Route{TargetNode: "node", DestinationCIDR: "1.2.3.4/24"}
nodeIP := "2.4.6.8"
fakeVM.NodeToIP = map[string]string{
"node": nodeIP,
}
err := cloud.CreateRoute(context.TODO(), "cluster", "unused", &route)
if err != nil {
t.Errorf("unexpected error create if not exists route table: %v", err)
t.FailNow()
}
if len(fakeTable.Calls) != 1 || fakeTable.Calls[0] != "Get" {
t.Errorf("unexpected calls create if not exists, exists: %v", fakeTable.Calls)
}
routeName := mapNodeNameToRouteName(route.TargetNode)
routeInfo, found := fakeRoutes.FakeStore[cloud.RouteTableName][routeName]
if !found {
t.Errorf("could not find route: %v in %v", routeName, fakeRoutes.FakeStore)
t.FailNow()
}
if *routeInfo.AddressPrefix != route.DestinationCIDR {
t.Errorf("Expected cidr: %s, saw %s", *routeInfo.AddressPrefix, route.DestinationCIDR)
}
if routeInfo.NextHopType != network.RouteNextHopTypeVirtualAppliance {
t.Errorf("Expected next hop: %v, saw %v", network.RouteNextHopTypeVirtualAppliance, routeInfo.NextHopType)
}
if *routeInfo.NextHopIPAddress != nodeIP {
t.Errorf("Expected IP address: %s, saw %s", nodeIP, *routeInfo.NextHopIPAddress)
}
// test create route for unmanaged nodes.
nodeName := "node1"
nodeCIDR := "4.3.2.1/24"
cloud.unmanagedNodes.Insert(nodeName)
cloud.routeCIDRs = map[string]string{}
route1 := cloudprovider.Route{
TargetNode: mapRouteNameToNodeName(nodeName),
DestinationCIDR: nodeCIDR,
}
err = cloud.CreateRoute(context.TODO(), "cluster", "unused", &route1)
if err != nil {
t.Errorf("unexpected error creating route: %v", err)
t.FailNow()
}
cidr, found := cloud.routeCIDRs[nodeName]
if !found {
t.Errorf("unexpected missing item for %s", nodeName)
t.FailNow()
}
if cidr != nodeCIDR {
t.Errorf("unexpected cidr %s, saw %s", nodeCIDR, cidr)
}
}
func TestCreateRouteTableIfNotExists_Exists(t *testing.T) {
fake := newFakeRouteTablesClient()
cloud := &Cloud{
RouteTablesClient: fake,
Config: Config{
RouteTableResourceGroup: "foo",
RouteTableName: "bar",
Location: "location",
},
}
cache, _ := cloud.newRouteTableCache()
cloud.rtCache = cache
expectedTable := network.RouteTable{
Name: &cloud.RouteTableName,
Location: &cloud.Location,
}
fake.FakeStore = map[string]map[string]network.RouteTable{}
fake.FakeStore[cloud.RouteTableResourceGroup] = map[string]network.RouteTable{
cloud.RouteTableName: expectedTable,
}
err := cloud.createRouteTableIfNotExists("clusterName", &cloudprovider.Route{TargetNode: "node", DestinationCIDR: "1.2.3.4/16"})
if err != nil {
t.Errorf("unexpected error create if not exists route table: %v", err)
t.FailNow()
}
if len(fake.Calls) != 1 || fake.Calls[0] != "Get" {
t.Errorf("unexpected calls create if not exists, exists: %v", fake.Calls)
}
}
func TestCreateRouteTableIfNotExists_NotExists(t *testing.T) {
fake := newFakeRouteTablesClient()
cloud := &Cloud{
RouteTablesClient: fake,
Config: Config{
RouteTableResourceGroup: "foo",
RouteTableName: "bar",
Location: "location",
},
}
cache, _ := cloud.newRouteTableCache()
cloud.rtCache = cache
expectedTable := network.RouteTable{
Name: &cloud.RouteTableName,
Location: &cloud.Location,
}
err := cloud.createRouteTableIfNotExists("clusterName", &cloudprovider.Route{TargetNode: "node", DestinationCIDR: "1.2.3.4/16"})
if err != nil {
t.Errorf("unexpected error create if not exists route table: %v", err)
t.FailNow()
}
table := fake.FakeStore[cloud.RouteTableResourceGroup][cloud.RouteTableName]
if *table.Location != *expectedTable.Location {
t.Errorf("mismatch: %s vs %s", *table.Location, *expectedTable.Location)
}
if *table.Name != *expectedTable.Name {
t.Errorf("mismatch: %s vs %s", *table.Name, *expectedTable.Name)
}
if len(fake.Calls) != 2 || fake.Calls[0] != "Get" || fake.Calls[1] != "CreateOrUpdate" {
t.Errorf("unexpected calls create if not exists, exists: %v", fake.Calls)
}
}
func TestCreateRouteTable(t *testing.T) {
fake := newFakeRouteTablesClient()
cloud := &Cloud{
RouteTablesClient: fake,
Config: Config{
RouteTableResourceGroup: "foo",
RouteTableName: "bar",
Location: "location",
},
}
cache, _ := cloud.newRouteTableCache()
cloud.rtCache = cache
expectedTable := network.RouteTable{
Name: &cloud.RouteTableName,
Location: &cloud.Location,
}
err := cloud.createRouteTable()
if err != nil {
t.Errorf("unexpected error in creating route table: %v", err)
t.FailNow()
}
table := fake.FakeStore["foo"]["bar"]
if *table.Location != *expectedTable.Location {
t.Errorf("mismatch: %s vs %s", *table.Location, *expectedTable.Location)
}
if *table.Name != *expectedTable.Name {
t.Errorf("mismatch: %s vs %s", *table.Name, *expectedTable.Name)
}
}
func TestProcessRoutes(t *testing.T) {
tests := []struct {
rt network.RouteTable
exists bool
err error
expectErr bool
expectedError string
expectedRoute []cloudprovider.Route
name string
}{
{
err: fmt.Errorf("test error"),
expectErr: true,
expectedError: "test error",
},
{
exists: false,
name: "doesn't exist",
},
{
rt: network.RouteTable{},
exists: true,
name: "nil routes",
},
{
rt: network.RouteTable{
RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{},
},
exists: true,
name: "no routes",
},
{
rt: network.RouteTable{
RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{
Routes: &[]network.Route{
{
Name: to.StringPtr("name"),
RoutePropertiesFormat: &network.RoutePropertiesFormat{
AddressPrefix: to.StringPtr("1.2.3.4/16"),
},
},
},
},
},
exists: true,
expectedRoute: []cloudprovider.Route{
{
Name: "name",
TargetNode: mapRouteNameToNodeName("name"),
DestinationCIDR: "1.2.3.4/16",
},
},
name: "one route",
},
{
rt: network.RouteTable{
RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{
Routes: &[]network.Route{
{
Name: to.StringPtr("name"),
RoutePropertiesFormat: &network.RoutePropertiesFormat{
AddressPrefix: to.StringPtr("1.2.3.4/16"),
},
},
{
Name: to.StringPtr("name2"),
RoutePropertiesFormat: &network.RoutePropertiesFormat{
AddressPrefix: to.StringPtr("5.6.7.8/16"),
},
},
},
},
},
exists: true,
expectedRoute: []cloudprovider.Route{
{
Name: "name",
TargetNode: mapRouteNameToNodeName("name"),
DestinationCIDR: "1.2.3.4/16",
},
{
Name: "name2",
TargetNode: mapRouteNameToNodeName("name2"),
DestinationCIDR: "5.6.7.8/16",
},
},
name: "more routes",
},
}
for _, test := range tests {
routes, err := processRoutes(test.rt, test.exists, test.err)
if test.expectErr {
if err == nil {
t.Errorf("%s: unexpected non-error", test.name)
continue
}
if err.Error() != test.expectedError {
t.Errorf("%s: Expected error: %v, saw error: %v", test.name, test.expectedError, err.Error())
continue
}
}
if !test.expectErr && err != nil {
t.Errorf("%s; unexpected error: %v", test.name, err)
continue
}
if len(routes) != len(test.expectedRoute) {
t.Errorf("%s: Unexpected difference: %#v vs %#v", test.name, routes, test.expectedRoute)
continue
}
for ix := range test.expectedRoute {
if !reflect.DeepEqual(test.expectedRoute[ix], *routes[ix]) {
t.Errorf("%s: Unexpected difference: %#v vs %#v", test.name, test.expectedRoute[ix], *routes[ix])
}
}
}
}

View File

@@ -1,750 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"context"
"errors"
"fmt"
"hash/crc32"
"regexp"
"sort"
"strconv"
"strings"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute"
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
"github.com/Azure/go-autorest/autorest/to"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/klog"
)
const (
loadBalancerMinimumPriority = 500
loadBalancerMaximumPriority = 4096
machineIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachines/%s"
availabilitySetIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/availabilitySets/%s"
frontendIPConfigIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/frontendIPConfigurations/%s"
backendPoolIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/backendAddressPools/%s"
loadBalancerProbeIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/probes/%s"
// InternalLoadBalancerNameSuffix is load balancer posfix
InternalLoadBalancerNameSuffix = "-internal"
// nodeLabelRole specifies the role of a node
nodeLabelRole = "kubernetes.io/role"
nicFailedState = "Failed"
storageAccountNameMaxLength = 24
)
var errNotInVMSet = errors.New("vm is not in the vmset")
var providerIDRE = regexp.MustCompile(`^` + CloudProviderName + `://(?:.*)/Microsoft.Compute/virtualMachines/(.+)$`)
var backendPoolIDRE = regexp.MustCompile(`^/subscriptions/(?:.*)/resourceGroups/(?:.*)/providers/Microsoft.Network/loadBalancers/(.+)/backendAddressPools/(?:.*)`)
var nicResourceGroupRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(.+)/providers/Microsoft.Network/networkInterfaces/(?:.*)`)
var publicIPResourceGroupRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(.+)/providers/Microsoft.Network/publicIPAddresses/(?:.*)`)
// getStandardMachineID returns the full identifier of a virtual machine.
func (az *Cloud) getStandardMachineID(resourceGroup, machineName string) string {
return fmt.Sprintf(
machineIDTemplate,
az.SubscriptionID,
strings.ToLower(resourceGroup),
machineName)
}
// returns the full identifier of an availabilitySet
func (az *Cloud) getAvailabilitySetID(resourceGroup, availabilitySetName string) string {
return fmt.Sprintf(
availabilitySetIDTemplate,
az.SubscriptionID,
resourceGroup,
availabilitySetName)
}
// returns the full identifier of a loadbalancer frontendipconfiguration.
func (az *Cloud) getFrontendIPConfigID(lbName, fipConfigName string) string {
return fmt.Sprintf(
frontendIPConfigIDTemplate,
az.SubscriptionID,
az.ResourceGroup,
lbName,
fipConfigName)
}
// returns the full identifier of a loadbalancer backendpool.
func (az *Cloud) getBackendPoolID(lbName, backendPoolName string) string {
return fmt.Sprintf(
backendPoolIDTemplate,
az.SubscriptionID,
az.ResourceGroup,
lbName,
backendPoolName)
}
// returns the full identifier of a loadbalancer probe.
func (az *Cloud) getLoadBalancerProbeID(lbName, lbRuleName string) string {
return fmt.Sprintf(
loadBalancerProbeIDTemplate,
az.SubscriptionID,
az.ResourceGroup,
lbName,
lbRuleName)
}
func (az *Cloud) mapLoadBalancerNameToVMSet(lbName string, clusterName string) (vmSetName string) {
vmSetName = strings.TrimSuffix(lbName, InternalLoadBalancerNameSuffix)
if strings.EqualFold(clusterName, vmSetName) {
vmSetName = az.vmSet.GetPrimaryVMSetName()
}
return vmSetName
}
// For a load balancer, all frontend ip should reference either a subnet or publicIpAddress.
// Thus Azure do not allow mixed type (public and internal) load balancer.
// So we'd have a separate name for internal load balancer.
// This would be the name for Azure LoadBalancer resource.
func (az *Cloud) getAzureLoadBalancerName(clusterName string, vmSetName string, isInternal bool) string {
lbNamePrefix := vmSetName
if strings.EqualFold(vmSetName, az.vmSet.GetPrimaryVMSetName()) || az.useStandardLoadBalancer() {
lbNamePrefix = clusterName
}
if isInternal {
return fmt.Sprintf("%s%s", lbNamePrefix, InternalLoadBalancerNameSuffix)
}
return lbNamePrefix
}
// isMasterNode returns true if the node has a master role label.
// The master role is determined by looking for:
// * a kubernetes.io/role="master" label
func isMasterNode(node *v1.Node) bool {
if val, ok := node.Labels[nodeLabelRole]; ok && val == "master" {
return true
}
return false
}
// returns the deepest child's identifier from a full identifier string.
func getLastSegment(ID string) (string, error) {
parts := strings.Split(ID, "/")
name := parts[len(parts)-1]
if len(name) == 0 {
return "", fmt.Errorf("resource name was missing from identifier")
}
return name, nil
}
// returns the equivalent LoadBalancerRule, SecurityRule and LoadBalancerProbe
// protocol types for the given Kubernetes protocol type.
func getProtocolsFromKubernetesProtocol(protocol v1.Protocol) (*network.TransportProtocol, *network.SecurityRuleProtocol, *network.ProbeProtocol, error) {
var transportProto network.TransportProtocol
var securityProto network.SecurityRuleProtocol
var probeProto network.ProbeProtocol
switch protocol {
case v1.ProtocolTCP:
transportProto = network.TransportProtocolTCP
securityProto = network.SecurityRuleProtocolTCP
probeProto = network.ProbeProtocolTCP
return &transportProto, &securityProto, &probeProto, nil
case v1.ProtocolUDP:
transportProto = network.TransportProtocolUDP
securityProto = network.SecurityRuleProtocolUDP
return &transportProto, &securityProto, nil, nil
default:
return &transportProto, &securityProto, &probeProto, fmt.Errorf("only TCP and UDP are supported for Azure LoadBalancers")
}
}
// This returns the full identifier of the primary NIC for the given VM.
func getPrimaryInterfaceID(machine compute.VirtualMachine) (string, error) {
if len(*machine.NetworkProfile.NetworkInterfaces) == 1 {
return *(*machine.NetworkProfile.NetworkInterfaces)[0].ID, nil
}
for _, ref := range *machine.NetworkProfile.NetworkInterfaces {
if *ref.Primary {
return *ref.ID, nil
}
}
return "", fmt.Errorf("failed to find a primary nic for the vm. vmname=%q", *machine.Name)
}
func getPrimaryIPConfig(nic network.Interface) (*network.InterfaceIPConfiguration, error) {
if nic.IPConfigurations == nil {
return nil, fmt.Errorf("nic.IPConfigurations for nic (nicname=%q) is nil", *nic.Name)
}
if len(*nic.IPConfigurations) == 1 {
return &((*nic.IPConfigurations)[0]), nil
}
for _, ref := range *nic.IPConfigurations {
if *ref.Primary {
return &ref, nil
}
}
return nil, fmt.Errorf("failed to determine the primary ipconfig. nicname=%q", *nic.Name)
}
func isInternalLoadBalancer(lb *network.LoadBalancer) bool {
return strings.HasSuffix(*lb.Name, InternalLoadBalancerNameSuffix)
}
func getBackendPoolName(clusterName string) string {
return clusterName
}
func (az *Cloud) getLoadBalancerRuleName(service *v1.Service, protocol v1.Protocol, port int32, subnetName *string) string {
prefix := az.getRulePrefix(service)
if subnetName == nil {
return fmt.Sprintf("%s-%s-%d", prefix, protocol, port)
}
return fmt.Sprintf("%s-%s-%s-%d", prefix, *subnetName, protocol, port)
}
func (az *Cloud) getSecurityRuleName(service *v1.Service, port v1.ServicePort, sourceAddrPrefix string) string {
if useSharedSecurityRule(service) {
safePrefix := strings.Replace(sourceAddrPrefix, "/", "_", -1)
return fmt.Sprintf("shared-%s-%d-%s", port.Protocol, port.Port, safePrefix)
}
safePrefix := strings.Replace(sourceAddrPrefix, "/", "_", -1)
rulePrefix := az.getRulePrefix(service)
return fmt.Sprintf("%s-%s-%d-%s", rulePrefix, port.Protocol, port.Port, safePrefix)
}
// This returns a human-readable version of the Service used to tag some resources.
// This is only used for human-readable convenience, and not to filter.
func getServiceName(service *v1.Service) string {
return fmt.Sprintf("%s/%s", service.Namespace, service.Name)
}
// This returns a prefix for loadbalancer/security rules.
func (az *Cloud) getRulePrefix(service *v1.Service) string {
return az.GetLoadBalancerName(context.TODO(), "", service)
}
func (az *Cloud) getPublicIPName(clusterName string, service *v1.Service) string {
return fmt.Sprintf("%s-%s", clusterName, az.GetLoadBalancerName(context.TODO(), clusterName, service))
}
func (az *Cloud) serviceOwnsRule(service *v1.Service, rule string) bool {
prefix := az.getRulePrefix(service)
return strings.HasPrefix(strings.ToUpper(rule), strings.ToUpper(prefix))
}
func (az *Cloud) serviceOwnsFrontendIP(fip network.FrontendIPConfiguration, service *v1.Service) bool {
baseName := az.GetLoadBalancerName(context.TODO(), "", service)
return strings.HasPrefix(*fip.Name, baseName)
}
func (az *Cloud) getFrontendIPConfigName(service *v1.Service, subnetName *string) string {
baseName := az.GetLoadBalancerName(context.TODO(), "", service)
if subnetName != nil {
return fmt.Sprintf("%s-%s", baseName, *subnetName)
}
return baseName
}
// This returns the next available rule priority level for a given set of security rules.
func getNextAvailablePriority(rules []network.SecurityRule) (int32, error) {
var smallest int32 = loadBalancerMinimumPriority
var spread int32 = 1
outer:
for smallest < loadBalancerMaximumPriority {
for _, rule := range rules {
if *rule.Priority == smallest {
smallest += spread
continue outer
}
}
// no one else had it
return smallest, nil
}
return -1, fmt.Errorf("securityGroup priorities are exhausted")
}
var polyTable = crc32.MakeTable(crc32.Koopman)
//MakeCRC32 : convert string to CRC32 format
func MakeCRC32(str string) string {
crc := crc32.New(polyTable)
crc.Write([]byte(str))
hash := crc.Sum32()
return strconv.FormatUint(uint64(hash), 10)
}
// availabilitySet implements VMSet interface for Azure availability sets.
type availabilitySet struct {
*Cloud
}
// newStandardSet creates a new availabilitySet.
func newAvailabilitySet(az *Cloud) VMSet {
return &availabilitySet{
Cloud: az,
}
}
// GetInstanceIDByNodeName gets the cloud provider ID by node name.
// It must return ("", cloudprovider.InstanceNotFound) if the instance does
// not exist or is no longer running.
func (as *availabilitySet) GetInstanceIDByNodeName(name string) (string, error) {
var machine compute.VirtualMachine
var err error
machine, err = as.getVirtualMachine(types.NodeName(name))
if err == cloudprovider.InstanceNotFound {
return "", cloudprovider.InstanceNotFound
}
if err != nil {
if as.CloudProviderBackoff {
klog.V(2).Infof("GetInstanceIDByNodeName(%s) backing off", name)
machine, err = as.GetVirtualMachineWithRetry(types.NodeName(name))
if err != nil {
klog.V(2).Infof("GetInstanceIDByNodeName(%s) abort backoff", name)
return "", err
}
} else {
return "", err
}
}
resourceID := *machine.ID
convertedResourceID, err := convertResourceGroupNameToLower(resourceID)
if err != nil {
klog.Errorf("convertResourceGroupNameToLower failed with error: %v", err)
return "", err
}
return convertedResourceID, nil
}
// GetPowerStatusByNodeName returns the power state of the specified node.
func (as *availabilitySet) GetPowerStatusByNodeName(name string) (powerState string, err error) {
vm, err := as.getVirtualMachine(types.NodeName(name))
if err != nil {
return powerState, err
}
if vm.InstanceView != nil && vm.InstanceView.Statuses != nil {
statuses := *vm.InstanceView.Statuses
for _, status := range statuses {
state := to.String(status.Code)
if strings.HasPrefix(state, vmPowerStatePrefix) {
return strings.TrimPrefix(state, vmPowerStatePrefix), nil
}
}
}
return "", fmt.Errorf("failed to get power status for node %q", name)
}
// GetNodeNameByProviderID gets the node name by provider ID.
func (as *availabilitySet) GetNodeNameByProviderID(providerID string) (types.NodeName, error) {
// NodeName is part of providerID for standard instances.
matches := providerIDRE.FindStringSubmatch(providerID)
if len(matches) != 2 {
return "", errors.New("error splitting providerID")
}
return types.NodeName(matches[1]), nil
}
// GetInstanceTypeByNodeName gets the instance type by node name.
func (as *availabilitySet) GetInstanceTypeByNodeName(name string) (string, error) {
machine, err := as.getVirtualMachine(types.NodeName(name))
if err != nil {
klog.Errorf("as.GetInstanceTypeByNodeName(%s) failed: as.getVirtualMachine(%s) err=%v", name, name, err)
return "", err
}
return string(machine.HardwareProfile.VMSize), nil
}
// GetZoneByNodeName gets availability zone for the specified node. If the node is not running
// with availability zone, then it returns fault domain.
func (as *availabilitySet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) {
vm, err := as.getVirtualMachine(types.NodeName(name))
if err != nil {
return cloudprovider.Zone{}, err
}
var failureDomain string
if vm.Zones != nil && len(*vm.Zones) > 0 {
// Get availability zone for the node.
zones := *vm.Zones
zoneID, err := strconv.Atoi(zones[0])
if err != nil {
return cloudprovider.Zone{}, fmt.Errorf("failed to parse zone %q: %v", zones, err)
}
failureDomain = as.makeZone(zoneID)
} else {
// Availability zone is not used for the node, falling back to fault domain.
failureDomain = strconv.Itoa(int(*vm.VirtualMachineProperties.InstanceView.PlatformFaultDomain))
}
zone := cloudprovider.Zone{
FailureDomain: failureDomain,
Region: *(vm.Location),
}
return zone, nil
}
// GetPrimaryVMSetName returns the VM set name depending on the configured vmType.
// It returns config.PrimaryScaleSetName for vmss and config.PrimaryAvailabilitySetName for standard vmType.
func (as *availabilitySet) GetPrimaryVMSetName() string {
return as.Config.PrimaryAvailabilitySetName
}
// GetIPByNodeName gets machine private IP and public IP by node name.
func (as *availabilitySet) GetIPByNodeName(name string) (string, string, error) {
nic, err := as.GetPrimaryInterface(name)
if err != nil {
return "", "", err
}
ipConfig, err := getPrimaryIPConfig(nic)
if err != nil {
klog.Errorf("as.GetIPByNodeName(%s) failed: getPrimaryIPConfig(%v), err=%v", name, nic, err)
return "", "", err
}
privateIP := *ipConfig.PrivateIPAddress
publicIP := ""
if ipConfig.PublicIPAddress != nil && ipConfig.PublicIPAddress.ID != nil {
pipID := *ipConfig.PublicIPAddress.ID
pipName, err := getLastSegment(pipID)
if err != nil {
return "", "", fmt.Errorf("failed to publicIP name for node %q with pipID %q", name, pipID)
}
pip, existsPip, err := as.getPublicIPAddress(as.ResourceGroup, pipName)
if err != nil {
return "", "", err
}
if existsPip {
publicIP = *pip.IPAddress
}
}
return privateIP, publicIP, nil
}
// getAgentPoolAvailabiliySets lists the virtual machines for the resource group and then builds
// a list of availability sets that match the nodes available to k8s.
func (as *availabilitySet) getAgentPoolAvailabiliySets(nodes []*v1.Node) (agentPoolAvailabilitySets *[]string, err error) {
vms, err := as.ListVirtualMachines(as.ResourceGroup)
if err != nil {
klog.Errorf("as.getNodeAvailabilitySet - ListVirtualMachines failed, err=%v", err)
return nil, err
}
vmNameToAvailabilitySetID := make(map[string]string, len(vms))
for vmx := range vms {
vm := vms[vmx]
if vm.AvailabilitySet != nil {
vmNameToAvailabilitySetID[*vm.Name] = *vm.AvailabilitySet.ID
}
}
availabilitySetIDs := sets.NewString()
agentPoolAvailabilitySets = &[]string{}
for nx := range nodes {
nodeName := (*nodes[nx]).Name
if isMasterNode(nodes[nx]) {
continue
}
asID, ok := vmNameToAvailabilitySetID[nodeName]
if !ok {
klog.Errorf("as.getNodeAvailabilitySet - Node(%s) has no availability sets", nodeName)
return nil, fmt.Errorf("Node (%s) - has no availability sets", nodeName)
}
if availabilitySetIDs.Has(asID) {
// already added in the list
continue
}
asName, err := getLastSegment(asID)
if err != nil {
klog.Errorf("as.getNodeAvailabilitySet - Node (%s)- getLastSegment(%s), err=%v", nodeName, asID, err)
return nil, err
}
// AvailabilitySet ID is currently upper cased in a indeterministic way
// We want to keep it lower case, before the ID get fixed
asName = strings.ToLower(asName)
*agentPoolAvailabilitySets = append(*agentPoolAvailabilitySets, asName)
}
return agentPoolAvailabilitySets, nil
}
// GetVMSetNames selects all possible availability sets or scale sets
// (depending vmType configured) for service load balancer, if the service has
// no loadbalancer mode annotaion returns the primary VMSet. If service annotation
// for loadbalancer exists then return the eligible VMSet.
func (as *availabilitySet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (availabilitySetNames *[]string, err error) {
hasMode, isAuto, serviceAvailabilitySetNames := getServiceLoadBalancerMode(service)
if !hasMode {
// no mode specified in service annotation default to PrimaryAvailabilitySetName
availabilitySetNames = &[]string{as.Config.PrimaryAvailabilitySetName}
return availabilitySetNames, nil
}
availabilitySetNames, err = as.getAgentPoolAvailabiliySets(nodes)
if err != nil {
klog.Errorf("as.GetVMSetNames - getAgentPoolAvailabiliySets failed err=(%v)", err)
return nil, err
}
if len(*availabilitySetNames) == 0 {
klog.Errorf("as.GetVMSetNames - No availability sets found for nodes in the cluster, node count(%d)", len(nodes))
return nil, fmt.Errorf("No availability sets found for nodes, node count(%d)", len(nodes))
}
// sort the list to have deterministic selection
sort.Strings(*availabilitySetNames)
if !isAuto {
if serviceAvailabilitySetNames == nil || len(serviceAvailabilitySetNames) == 0 {
return nil, fmt.Errorf("service annotation for LoadBalancerMode is empty, it should have __auto__ or availability sets value")
}
// validate availability set exists
var found bool
for sasx := range serviceAvailabilitySetNames {
for asx := range *availabilitySetNames {
if strings.EqualFold((*availabilitySetNames)[asx], serviceAvailabilitySetNames[sasx]) {
found = true
serviceAvailabilitySetNames[sasx] = (*availabilitySetNames)[asx]
break
}
}
if !found {
klog.Errorf("as.GetVMSetNames - Availability set (%s) in service annotation not found", serviceAvailabilitySetNames[sasx])
return nil, fmt.Errorf("availability set (%s) - not found", serviceAvailabilitySetNames[sasx])
}
}
availabilitySetNames = &serviceAvailabilitySetNames
}
return availabilitySetNames, nil
}
// GetPrimaryInterface gets machine primary network interface by node name.
func (as *availabilitySet) GetPrimaryInterface(nodeName string) (network.Interface, error) {
return as.getPrimaryInterfaceWithVMSet(nodeName, "")
}
// extractResourceGroupByNicID extracts the resource group name by nicID.
func extractResourceGroupByNicID(nicID string) (string, error) {
matches := nicResourceGroupRE.FindStringSubmatch(nicID)
if len(matches) != 2 {
return "", fmt.Errorf("error of extracting resourceGroup from nicID %q", nicID)
}
return matches[1], nil
}
// getPrimaryInterfaceWithVMSet gets machine primary network interface by node name and vmSet.
func (as *availabilitySet) getPrimaryInterfaceWithVMSet(nodeName, vmSetName string) (network.Interface, error) {
var machine compute.VirtualMachine
machine, err := as.GetVirtualMachineWithRetry(types.NodeName(nodeName))
if err != nil {
klog.V(2).Infof("GetPrimaryInterface(%s, %s) abort backoff", nodeName, vmSetName)
return network.Interface{}, err
}
primaryNicID, err := getPrimaryInterfaceID(machine)
if err != nil {
return network.Interface{}, err
}
nicName, err := getLastSegment(primaryNicID)
if err != nil {
return network.Interface{}, err
}
nodeResourceGroup, err := as.GetNodeResourceGroup(nodeName)
if err != nil {
return network.Interface{}, err
}
// Check availability set name. Note that vmSetName is empty string when getting
// the Node's IP address. While vmSetName is not empty, it should be checked with
// Node's real availability set name:
// - For basic SKU load balancer, errNotInVMSet should be returned if the node's
// availability set is mismatched with vmSetName.
// - For standard SKU load balancer, backend could belong to multiple VMAS, so we
// don't check vmSet for it.
if vmSetName != "" && !as.useStandardLoadBalancer() {
expectedAvailabilitySetName := as.getAvailabilitySetID(nodeResourceGroup, vmSetName)
if machine.AvailabilitySet == nil || !strings.EqualFold(*machine.AvailabilitySet.ID, expectedAvailabilitySetName) {
klog.V(3).Infof(
"GetPrimaryInterface: nic (%s) is not in the availabilitySet(%s)", nicName, vmSetName)
return network.Interface{}, errNotInVMSet
}
}
nicResourceGroup, err := extractResourceGroupByNicID(primaryNicID)
if err != nil {
return network.Interface{}, err
}
ctx, cancel := getContextWithCancel()
defer cancel()
nic, err := as.InterfacesClient.Get(ctx, nicResourceGroup, nicName, "")
if err != nil {
return network.Interface{}, err
}
return nic, nil
}
// EnsureHostInPool ensures the given VM's Primary NIC's Primary IP Configuration is
// participating in the specified LoadBalancer Backend Pool.
func (as *availabilitySet) EnsureHostInPool(service *v1.Service, nodeName types.NodeName, backendPoolID string, vmSetName string, isInternal bool) error {
vmName := mapNodeNameToVMName(nodeName)
serviceName := getServiceName(service)
nic, err := as.getPrimaryInterfaceWithVMSet(vmName, vmSetName)
if err != nil {
if err == errNotInVMSet {
klog.V(3).Infof("EnsureHostInPool skips node %s because it is not in the vmSet %s", nodeName, vmSetName)
return nil
}
klog.Errorf("error: az.EnsureHostInPool(%s), az.vmSet.GetPrimaryInterface.Get(%s, %s), err=%v", nodeName, vmName, vmSetName, err)
return err
}
if nic.ProvisioningState != nil && *nic.ProvisioningState == nicFailedState {
klog.Warningf("EnsureHostInPool skips node %s because its primary nic %s is in Failed state", nodeName, *nic.Name)
return nil
}
var primaryIPConfig *network.InterfaceIPConfiguration
primaryIPConfig, err = getPrimaryIPConfig(nic)
if err != nil {
return err
}
foundPool := false
newBackendPools := []network.BackendAddressPool{}
if primaryIPConfig.LoadBalancerBackendAddressPools != nil {
newBackendPools = *primaryIPConfig.LoadBalancerBackendAddressPools
}
for _, existingPool := range newBackendPools {
if strings.EqualFold(backendPoolID, *existingPool.ID) {
foundPool = true
break
}
}
if !foundPool {
if as.useStandardLoadBalancer() && len(newBackendPools) > 0 {
// Although standard load balancer supports backends from multiple availability
// sets, the same network interface couldn't be added to more than one load balancer of
// the same type. Omit those nodes (e.g. masters) so Azure ARM won't complain
// about this.
newBackendPoolsIDs := make([]string, 0, len(newBackendPools))
for _, pool := range newBackendPools {
if pool.ID != nil {
newBackendPoolsIDs = append(newBackendPoolsIDs, *pool.ID)
}
}
isSameLB, oldLBName, err := isBackendPoolOnSameLB(backendPoolID, newBackendPoolsIDs)
if err != nil {
return err
}
if !isSameLB {
klog.V(4).Infof("Node %q has already been added to LB %q, omit adding it to a new one", nodeName, oldLBName)
return nil
}
}
newBackendPools = append(newBackendPools,
network.BackendAddressPool{
ID: to.StringPtr(backendPoolID),
})
primaryIPConfig.LoadBalancerBackendAddressPools = &newBackendPools
nicName := *nic.Name
klog.V(3).Infof("nicupdate(%s): nic(%s) - updating", serviceName, nicName)
err := as.CreateOrUpdateInterface(service, nic)
if err != nil {
return err
}
}
return nil
}
// EnsureHostsInPool ensures the given Node's primary IP configurations are
// participating in the specified LoadBalancer Backend Pool.
func (as *availabilitySet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error {
hostUpdates := make([]func() error, 0, len(nodes))
for _, node := range nodes {
localNodeName := node.Name
if as.useStandardLoadBalancer() && as.excludeMasterNodesFromStandardLB() && isMasterNode(node) {
klog.V(4).Infof("Excluding master node %q from load balancer backendpool %q", localNodeName, backendPoolID)
continue
}
if as.ShouldNodeExcludedFromLoadBalancer(node) {
klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", localNodeName)
continue
}
f := func() error {
err := as.EnsureHostInPool(service, types.NodeName(localNodeName), backendPoolID, vmSetName, isInternal)
if err != nil {
return fmt.Errorf("ensure(%s): backendPoolID(%s) - failed to ensure host in pool: %q", getServiceName(service), backendPoolID, err)
}
return nil
}
hostUpdates = append(hostUpdates, f)
}
errs := utilerrors.AggregateGoroutines(hostUpdates...)
if errs != nil {
return utilerrors.Flatten(errs)
}
return nil
}
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified nodes.
func (as *availabilitySet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error {
// Do nothing for availability set.
return nil
}
// get a storage account by UUID
func generateStorageAccountName(accountNamePrefix string) string {
uniqueID := strings.Replace(string(uuid.NewUUID()), "-", "", -1)
accountName := strings.ToLower(accountNamePrefix + uniqueID)
if len(accountName) > storageAccountNameMaxLength {
return accountName[:storageAccountNameMaxLength-1]
}
return accountName
}

View File

@@ -1,253 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestIsMasterNode(t *testing.T) {
if isMasterNode(&v1.Node{}) {
t.Errorf("Empty node should not be master!")
}
if isMasterNode(&v1.Node{
ObjectMeta: meta.ObjectMeta{
Labels: map[string]string{
nodeLabelRole: "worker",
},
},
}) {
t.Errorf("Node labelled 'worker' should not be master!")
}
if !isMasterNode(&v1.Node{
ObjectMeta: meta.ObjectMeta{
Labels: map[string]string{
nodeLabelRole: "master",
},
},
}) {
t.Errorf("Node should be master!")
}
}
func TestGetLastSegment(t *testing.T) {
tests := []struct {
ID string
expected string
expectErr bool
}{
{
ID: "",
expected: "",
expectErr: true,
},
{
ID: "foo/",
expected: "",
expectErr: true,
},
{
ID: "foo/bar",
expected: "bar",
expectErr: false,
},
{
ID: "foo/bar/baz",
expected: "baz",
expectErr: false,
},
}
for _, test := range tests {
s, e := getLastSegment(test.ID)
if test.expectErr && e == nil {
t.Errorf("Expected err, but it was nil")
continue
}
if !test.expectErr && e != nil {
t.Errorf("Unexpected error: %v", e)
continue
}
if s != test.expected {
t.Errorf("expected: %s, got %s", test.expected, s)
}
}
}
func TestGenerateStorageAccountName(t *testing.T) {
tests := []struct {
prefix string
}{
{
prefix: "",
},
{
prefix: "pvc",
},
{
prefix: "1234512345123451234512345",
},
}
for _, test := range tests {
accountName := generateStorageAccountName(test.prefix)
if len(accountName) > storageAccountNameMaxLength || len(accountName) < 3 {
t.Errorf("input prefix: %s, output account name: %s, length not in [3,%d]", test.prefix, accountName, storageAccountNameMaxLength)
}
for _, char := range accountName {
if (char < 'a' || char > 'z') && (char < '0' || char > '9') {
t.Errorf("input prefix: %s, output account name: %s, there is non-digit or non-letter(%q)", test.prefix, accountName, char)
break
}
}
}
}
func TestMapLoadBalancerNameToVMSet(t *testing.T) {
az := getTestCloud()
az.PrimaryAvailabilitySetName = "primary"
cases := []struct {
description string
lbName string
useStandardLB bool
clusterName string
expectedVMSet string
}{
{
description: "default external LB should map to primary vmset",
lbName: "azure",
clusterName: "azure",
expectedVMSet: "primary",
},
{
description: "default internal LB should map to primary vmset",
lbName: "azure-internal",
clusterName: "azure",
expectedVMSet: "primary",
},
{
description: "non-default external LB should map to its own vmset",
lbName: "azuretest-internal",
clusterName: "azure",
expectedVMSet: "azuretest",
},
{
description: "non-default internal LB should map to its own vmset",
lbName: "azuretest-internal",
clusterName: "azure",
expectedVMSet: "azuretest",
},
}
for _, c := range cases {
if c.useStandardLB {
az.Config.LoadBalancerSku = loadBalancerSkuStandard
} else {
az.Config.LoadBalancerSku = loadBalancerSkuBasic
}
vmset := az.mapLoadBalancerNameToVMSet(c.lbName, c.clusterName)
assert.Equal(t, c.expectedVMSet, vmset, c.description)
}
}
func TestGetAzureLoadBalancerName(t *testing.T) {
az := getTestCloud()
az.PrimaryAvailabilitySetName = "primary"
cases := []struct {
description string
vmSet string
isInternal bool
useStandardLB bool
clusterName string
expected string
}{
{
description: "default external LB should get primary vmset",
vmSet: "primary",
clusterName: "azure",
expected: "azure",
},
{
description: "default internal LB should get primary vmset",
vmSet: "primary",
clusterName: "azure",
isInternal: true,
expected: "azure-internal",
},
{
description: "non-default external LB should get its own vmset",
vmSet: "as",
clusterName: "azure",
expected: "as",
},
{
description: "non-default internal LB should get its own vmset",
vmSet: "as",
clusterName: "azure",
isInternal: true,
expected: "as-internal",
},
{
description: "default standard external LB should get cluster name",
vmSet: "primary",
useStandardLB: true,
clusterName: "azure",
expected: "azure",
},
{
description: "default standard internal LB should get cluster name",
vmSet: "primary",
useStandardLB: true,
isInternal: true,
clusterName: "azure",
expected: "azure-internal",
},
{
description: "non-default standard external LB should get cluster-name",
vmSet: "as",
useStandardLB: true,
clusterName: "azure",
expected: "azure",
},
{
description: "non-default standard internal LB should get cluster-name",
vmSet: "as",
useStandardLB: true,
isInternal: true,
clusterName: "azure",
expected: "azure-internal",
},
}
for _, c := range cases {
if c.useStandardLB {
az.Config.LoadBalancerSku = loadBalancerSkuStandard
} else {
az.Config.LoadBalancerSku = loadBalancerSkuBasic
}
loadbalancerName := az.getAzureLoadBalancerName(c.clusterName, c.vmSet, c.isInternal)
assert.Equal(t, c.expected, loadbalancerName, c.description)
}
}

View File

@@ -1,65 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"fmt"
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage"
"k8s.io/klog"
)
const (
defaultStorageAccountType = string(storage.StandardLRS)
defaultStorageAccountKind = storage.StorageV2
fileShareAccountNamePrefix = "f"
sharedDiskAccountNamePrefix = "ds"
dedicatedDiskAccountNamePrefix = "dd"
)
// CreateFileShare creates a file share, using a matching storage account type, account kind, etc.
// storage account will be created if specified account is not found
func (az *Cloud) CreateFileShare(shareName, accountName, accountType, accountKind, resourceGroup, location string, requestGiB int) (string, string, error) {
if resourceGroup == "" {
resourceGroup = az.resourceGroup
}
account, key, err := az.EnsureStorageAccount(accountName, accountType, accountKind, resourceGroup, location, fileShareAccountNamePrefix)
if err != nil {
return "", "", fmt.Errorf("could not get storage key for storage account %s: %v", accountName, err)
}
if err := az.createFileShare(account, key, shareName, requestGiB); err != nil {
return "", "", fmt.Errorf("failed to create share %s in account %s: %v", shareName, account, err)
}
klog.V(4).Infof("created share %s in account %s", shareName, account)
return account, key, nil
}
// DeleteFileShare deletes a file share using storage account name and key
func (az *Cloud) DeleteFileShare(accountName, accountKey, shareName string) error {
if err := az.deleteFileShare(accountName, accountKey, shareName); err != nil {
return err
}
klog.V(4).Infof("share %s deleted", shareName)
return nil
}
// ResizeFileShare resizes a file share
func (az *Cloud) ResizeFileShare(accountName, accountKey, name string, sizeGiB int) error {
return az.resizeFileShare(accountName, accountKey, name, sizeGiB)
}

View File

@@ -1,142 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"testing"
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage"
)
func TestCreateFileShare(t *testing.T) {
cloud := &Cloud{}
fake := newFakeStorageAccountClient()
cloud.StorageAccountClient = fake
cloud.FileClient = &fakeFileClient{}
name := "baz"
sku := "sku"
kind := "StorageV2"
location := "centralus"
value := "foo key"
bogus := "bogus"
tests := []struct {
name string
acct string
acctType string
acctKind string
loc string
gb int
accounts storage.AccountListResult
keys storage.AccountListKeysResult
err error
expectErr bool
expectAcct string
expectKey string
}{
{
name: "foo",
acct: "bar",
acctType: "type",
acctKind: "StorageV2",
loc: "eastus",
gb: 10,
expectErr: true,
},
{
name: "foo",
acct: "",
acctType: "type",
acctKind: "StorageV2",
loc: "eastus",
gb: 10,
expectErr: true,
},
{
name: "foo",
acct: "",
acctType: sku,
acctKind: kind,
loc: location,
gb: 10,
accounts: storage.AccountListResult{
Value: &[]storage.Account{
{Name: &name, Sku: &storage.Sku{Name: storage.SkuName(sku)}, Kind: storage.Kind(kind), Location: &location},
},
},
keys: storage.AccountListKeysResult{
Keys: &[]storage.AccountKey{
{Value: &value},
},
},
expectAcct: "baz",
expectKey: "key",
},
{
name: "foo",
acct: "",
acctType: sku,
acctKind: kind,
loc: location,
gb: 10,
accounts: storage.AccountListResult{
Value: &[]storage.Account{
{Name: &bogus, Sku: &storage.Sku{Name: storage.SkuName(sku)}, Location: &location},
},
},
expectErr: true,
},
{
name: "foo",
acct: "",
acctType: sku,
acctKind: kind,
loc: location,
gb: 10,
accounts: storage.AccountListResult{
Value: &[]storage.Account{
{Name: &name, Sku: &storage.Sku{Name: storage.SkuName(sku)}, Location: &bogus},
},
},
expectErr: true,
},
}
for _, test := range tests {
fake.Accounts = test.accounts
fake.Keys = test.keys
fake.Err = test.err
account, key, err := cloud.CreateFileShare(test.name, test.acct, test.acctType, test.acctKind, "rg", test.loc, test.gb)
if test.expectErr && err == nil {
t.Errorf("unexpected non-error")
continue
}
if !test.expectErr && err != nil {
t.Errorf("unexpected error: %v", err)
continue
}
if test.expectAcct != account {
t.Errorf("Expected: %s, got %s", test.expectAcct, account)
}
if test.expectKey != key {
t.Errorf("Expected: %s, got %s", test.expectKey, key)
}
}
}

View File

@@ -1,146 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"fmt"
"strings"
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage"
"github.com/Azure/go-autorest/autorest/to"
"k8s.io/klog"
)
type accountWithLocation struct {
Name, StorageType, Location string
}
// getStorageAccounts gets name, type, location of all storage accounts in a resource group which matches matchingAccountType, matchingLocation
func (az *Cloud) getStorageAccounts(matchingAccountType, matchingAccountKind, resourceGroup, matchingLocation string) ([]accountWithLocation, error) {
ctx, cancel := getContextWithCancel()
defer cancel()
result, err := az.StorageAccountClient.ListByResourceGroup(ctx, resourceGroup)
if err != nil {
return nil, err
}
if result.Value == nil {
return nil, fmt.Errorf("unexpected error when listing storage accounts from resource group %s", resourceGroup)
}
accounts := []accountWithLocation{}
for _, acct := range *result.Value {
if acct.Name != nil && acct.Location != nil && acct.Sku != nil {
storageType := string((*acct.Sku).Name)
if matchingAccountType != "" && !strings.EqualFold(matchingAccountType, storageType) {
continue
}
if matchingAccountKind != "" && !strings.EqualFold(matchingAccountKind, string(acct.Kind)) {
continue
}
location := *acct.Location
if matchingLocation != "" && !strings.EqualFold(matchingLocation, location) {
continue
}
accounts = append(accounts, accountWithLocation{Name: *acct.Name, StorageType: storageType, Location: location})
}
}
return accounts, nil
}
// GetStorageAccesskey gets the storage account access key
func (az *Cloud) GetStorageAccesskey(account, resourceGroup string) (string, error) {
ctx, cancel := getContextWithCancel()
defer cancel()
result, err := az.StorageAccountClient.ListKeys(ctx, resourceGroup, account)
if err != nil {
return "", err
}
if result.Keys == nil {
return "", fmt.Errorf("empty keys")
}
for _, k := range *result.Keys {
if k.Value != nil && *k.Value != "" {
v := *k.Value
if ind := strings.LastIndex(v, " "); ind >= 0 {
v = v[(ind + 1):]
}
return v, nil
}
}
return "", fmt.Errorf("no valid keys")
}
// EnsureStorageAccount search storage account, create one storage account(with genAccountNamePrefix) if not found, return accountName, accountKey
func (az *Cloud) EnsureStorageAccount(accountName, accountType, accountKind, resourceGroup, location, genAccountNamePrefix string) (string, string, error) {
if len(accountName) == 0 {
// find a storage account that matches accountType
accounts, err := az.getStorageAccounts(accountType, accountKind, resourceGroup, location)
if err != nil {
return "", "", fmt.Errorf("could not list storage accounts for account type %s: %v", accountType, err)
}
if len(accounts) > 0 {
accountName = accounts[0].Name
klog.V(4).Infof("found a matching account %s type %s location %s", accounts[0].Name, accounts[0].StorageType, accounts[0].Location)
}
if len(accountName) == 0 {
// not found a matching account, now create a new account in current resource group
accountName = generateStorageAccountName(genAccountNamePrefix)
if location == "" {
location = az.Location
}
if accountType == "" {
accountType = defaultStorageAccountType
}
// use StorageV2 by default per https://docs.microsoft.com/en-us/azure/storage/common/storage-account-options
kind := defaultStorageAccountKind
if accountKind != "" {
kind = storage.Kind(accountKind)
}
klog.V(2).Infof("azure - no matching account found, begin to create a new account %s in resource group %s, location: %s, accountType: %s, accountKind: %s",
accountName, resourceGroup, location, accountType, kind)
cp := storage.AccountCreateParameters{
Sku: &storage.Sku{Name: storage.SkuName(accountType)},
Kind: kind,
AccountPropertiesCreateParameters: &storage.AccountPropertiesCreateParameters{EnableHTTPSTrafficOnly: to.BoolPtr(true)},
Tags: map[string]*string{"created-by": to.StringPtr("azure")},
Location: &location}
ctx, cancel := getContextWithCancel()
defer cancel()
_, err := az.StorageAccountClient.Create(ctx, resourceGroup, accountName, cp)
if err != nil {
return "", "", fmt.Errorf(fmt.Sprintf("Failed to create storage account %s, error: %s", accountName, err))
}
}
}
// find the access key with this account
accountKey, err := az.GetStorageAccesskey(accountName, resourceGroup)
if err != nil {
return "", "", fmt.Errorf("could not get storage key for storage account %s: %v", accountName, err)
}
return accountName, accountKey, nil
}

View File

@@ -1,80 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"fmt"
"testing"
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage"
)
func TestGetStorageAccessKeys(t *testing.T) {
cloud := &Cloud{}
fake := newFakeStorageAccountClient()
cloud.StorageAccountClient = fake
value := "foo bar"
tests := []struct {
results storage.AccountListKeysResult
expectedKey string
expectErr bool
err error
}{
{storage.AccountListKeysResult{}, "", true, nil},
{
storage.AccountListKeysResult{
Keys: &[]storage.AccountKey{
{Value: &value},
},
},
"bar",
false,
nil,
},
{
storage.AccountListKeysResult{
Keys: &[]storage.AccountKey{
{},
{Value: &value},
},
},
"bar",
false,
nil,
},
{storage.AccountListKeysResult{}, "", true, fmt.Errorf("test error")},
}
for _, test := range tests {
expectedKey := test.expectedKey
fake.Keys = test.results
fake.Err = test.err
key, err := cloud.GetStorageAccesskey("acct", "rg")
if test.expectErr && err == nil {
t.Errorf("Unexpected non-error")
continue
}
if !test.expectErr && err != nil {
t.Errorf("Unexpected error: %v", err)
continue
}
if key != expectedKey {
t.Errorf("expected: %s, saw %s", expectedKey, key)
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,75 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"net/http"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute"
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
cloudprovider "k8s.io/cloud-provider"
)
// VMSet defines functions all vmsets (including scale set and availability
// set) should be implemented.
type VMSet interface {
// GetInstanceIDByNodeName gets the cloud provider ID by node name.
// It must return ("", cloudprovider.InstanceNotFound) if the instance does
// not exist or is no longer running.
GetInstanceIDByNodeName(name string) (string, error)
// GetInstanceTypeByNodeName gets the instance type by node name.
GetInstanceTypeByNodeName(name string) (string, error)
// GetIPByNodeName gets machine private IP and public IP by node name.
GetIPByNodeName(name string) (string, string, error)
// GetPrimaryInterface gets machine primary network interface by node name.
GetPrimaryInterface(nodeName string) (network.Interface, error)
// GetNodeNameByProviderID gets the node name by provider ID.
GetNodeNameByProviderID(providerID string) (types.NodeName, error)
// GetZoneByNodeName gets cloudprovider.Zone by node name.
GetZoneByNodeName(name string) (cloudprovider.Zone, error)
// GetPrimaryVMSetName returns the VM set name depending on the configured vmType.
// It returns config.PrimaryScaleSetName for vmss and config.PrimaryAvailabilitySetName for standard vmType.
GetPrimaryVMSetName() string
// GetVMSetNames selects all possible availability sets or scale sets
// (depending vmType configured) for service load balancer, if the service has
// no loadbalancer mode annotation returns the primary VMSet. If service annotation
// for loadbalancer exists then return the eligible VMSet.
GetVMSetNames(service *v1.Service, nodes []*v1.Node) (availabilitySetNames *[]string, err error)
// EnsureHostsInPool ensures the given Node's primary IP configurations are
// participating in the specified LoadBalancer Backend Pool.
EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error
// EnsureHostInPool ensures the given VM's Primary NIC's Primary IP Configuration is
// participating in the specified LoadBalancer Backend Pool.
EnsureHostInPool(service *v1.Service, nodeName types.NodeName, backendPoolID string, vmSetName string, isInternal bool) error
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified nodes.
EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error
// AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI, and lun.
AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error
// DetachDisk detaches a vhd from host. The vhd can be identified by diskName or diskURI.
DetachDisk(diskName, diskURI string, nodeName types.NodeName) (*http.Response, error)
// GetDataDisks gets a list of data disks attached to the node.
GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error)
// GetPowerStatusByNodeName returns the power state of the specified node.
GetPowerStatusByNodeName(name string) (string, error)
}

View File

@@ -1,943 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"errors"
"fmt"
"regexp"
"sort"
"strconv"
"strings"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute"
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
"github.com/Azure/go-autorest/autorest/to"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/klog"
)
var (
// ErrorNotVmssInstance indicates an instance is not belongint to any vmss.
ErrorNotVmssInstance = errors.New("not a vmss instance")
scaleSetNameRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/Microsoft.Compute/virtualMachineScaleSets/(.+)/virtualMachines(?:.*)`)
resourceGroupRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(.+)/providers/Microsoft.Compute/virtualMachineScaleSets/(?:.*)/virtualMachines(?:.*)`)
vmssNicResourceGroupRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(.+)/providers/Microsoft.Compute/virtualMachineScaleSets/(?:.*)/virtualMachines/(?:.*)/networkInterfaces/(?:.*)`)
vmssMachineIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%s"
vmssIPConfigurationRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(.+)/providers/Microsoft.Compute/virtualMachineScaleSets/(.+)/virtualMachines/(.+)/networkInterfaces(?:.*)`)
)
// scaleSet implements VMSet interface for Azure scale set.
type scaleSet struct {
*Cloud
// availabilitySet is also required for scaleSet because some instances
// (e.g. master nodes) may not belong to any scale sets.
availabilitySet VMSet
vmssCache *timedCache
vmssVMCache *timedCache
nodeNameToScaleSetMappingCache *timedCache
availabilitySetNodesCache *timedCache
}
// newScaleSet creates a new scaleSet.
func newScaleSet(az *Cloud) (VMSet, error) {
var err error
ss := &scaleSet{
Cloud: az,
availabilitySet: newAvailabilitySet(az),
}
ss.nodeNameToScaleSetMappingCache, err = ss.newNodeNameToScaleSetMappingCache()
if err != nil {
return nil, err
}
ss.availabilitySetNodesCache, err = ss.newAvailabilitySetNodesCache()
if err != nil {
return nil, err
}
ss.vmssCache, err = ss.newVmssCache()
if err != nil {
return nil, err
}
ss.vmssVMCache, err = ss.newVmssVMCache()
if err != nil {
return nil, err
}
return ss, nil
}
// getVmssVM gets virtualMachineScaleSetVM by nodeName from cache.
// It returns cloudprovider.InstanceNotFound if node does not belong to any scale sets.
func (ss *scaleSet) getVmssVM(nodeName string) (ssName, instanceID string, vm compute.VirtualMachineScaleSetVM, err error) {
instanceID, err = getScaleSetVMInstanceID(nodeName)
if err != nil {
return ssName, instanceID, vm, err
}
ssName, err = ss.getScaleSetNameByNodeName(nodeName)
if err != nil {
return ssName, instanceID, vm, err
}
if ssName == "" {
return "", "", vm, cloudprovider.InstanceNotFound
}
resourceGroup, err := ss.GetNodeResourceGroup(nodeName)
if err != nil {
return "", "", vm, err
}
klog.V(4).Infof("getVmssVM gets scaleSetName (%q) and instanceID (%q) for node %q", ssName, instanceID, nodeName)
key := buildVmssCacheKey(resourceGroup, ss.makeVmssVMName(ssName, instanceID))
cachedVM, err := ss.vmssVMCache.Get(key)
if err != nil {
return ssName, instanceID, vm, err
}
if cachedVM == nil {
klog.Errorf("Can't find node (%q) in any scale sets", nodeName)
return ssName, instanceID, vm, cloudprovider.InstanceNotFound
}
return ssName, instanceID, *(cachedVM.(*compute.VirtualMachineScaleSetVM)), nil
}
// GetPowerStatusByNodeName returns the power state of the specified node.
func (ss *scaleSet) GetPowerStatusByNodeName(name string) (powerState string, err error) {
_, _, vm, err := ss.getVmssVM(name)
if err != nil {
return powerState, err
}
if vm.InstanceView != nil && vm.InstanceView.Statuses != nil {
statuses := *vm.InstanceView.Statuses
for _, status := range statuses {
state := to.String(status.Code)
if strings.HasPrefix(state, vmPowerStatePrefix) {
return strings.TrimPrefix(state, vmPowerStatePrefix), nil
}
}
}
return "", fmt.Errorf("failed to get power status for node %q", name)
}
// getCachedVirtualMachineByInstanceID gets scaleSetVMInfo from cache.
// The node must belong to one of scale sets.
func (ss *scaleSet) getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID string) (vm compute.VirtualMachineScaleSetVM, err error) {
vmName := ss.makeVmssVMName(scaleSetName, instanceID)
key := buildVmssCacheKey(resourceGroup, vmName)
cachedVM, err := ss.vmssVMCache.Get(key)
if err != nil {
return vm, err
}
if cachedVM == nil {
klog.Errorf("couldn't find vmss virtual machine by scaleSetName (%s) and instanceID (%s)", scaleSetName, instanceID)
return vm, cloudprovider.InstanceNotFound
}
return *(cachedVM.(*compute.VirtualMachineScaleSetVM)), nil
}
// GetInstanceIDByNodeName gets the cloud provider ID by node name.
// It must return ("", cloudprovider.InstanceNotFound) if the instance does
// not exist or is no longer running.
func (ss *scaleSet) GetInstanceIDByNodeName(name string) (string, error) {
managedByAS, err := ss.isNodeManagedByAvailabilitySet(name)
if err != nil {
klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
return "", err
}
if managedByAS {
// vm is managed by availability set.
return ss.availabilitySet.GetInstanceIDByNodeName(name)
}
_, _, vm, err := ss.getVmssVM(name)
if err != nil {
return "", err
}
resourceID := *vm.ID
convertedResourceID, err := convertResourceGroupNameToLower(resourceID)
if err != nil {
klog.Errorf("convertResourceGroupNameToLower failed with error: %v", err)
return "", err
}
return convertedResourceID, nil
}
// GetNodeNameByProviderID gets the node name by provider ID.
func (ss *scaleSet) GetNodeNameByProviderID(providerID string) (types.NodeName, error) {
// NodeName is not part of providerID for vmss instances.
scaleSetName, err := extractScaleSetNameByProviderID(providerID)
if err != nil {
klog.V(4).Infof("Can not extract scale set name from providerID (%s), assuming it is mananaged by availability set: %v", providerID, err)
return ss.availabilitySet.GetNodeNameByProviderID(providerID)
}
resourceGroup, err := extractResourceGroupByProviderID(providerID)
if err != nil {
return "", fmt.Errorf("error of extracting resource group for node %q", providerID)
}
instanceID, err := getLastSegment(providerID)
if err != nil {
klog.V(4).Infof("Can not extract instanceID from providerID (%s), assuming it is mananaged by availability set: %v", providerID, err)
return ss.availabilitySet.GetNodeNameByProviderID(providerID)
}
vm, err := ss.getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID)
if err != nil {
return "", err
}
if vm.OsProfile != nil && vm.OsProfile.ComputerName != nil {
nodeName := strings.ToLower(*vm.OsProfile.ComputerName)
return types.NodeName(nodeName), nil
}
return "", nil
}
// GetInstanceTypeByNodeName gets the instance type by node name.
func (ss *scaleSet) GetInstanceTypeByNodeName(name string) (string, error) {
managedByAS, err := ss.isNodeManagedByAvailabilitySet(name)
if err != nil {
klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
return "", err
}
if managedByAS {
// vm is managed by availability set.
return ss.availabilitySet.GetInstanceTypeByNodeName(name)
}
_, _, vm, err := ss.getVmssVM(name)
if err != nil {
return "", err
}
if vm.Sku != nil && vm.Sku.Name != nil {
return *vm.Sku.Name, nil
}
return "", nil
}
// GetZoneByNodeName gets availability zone for the specified node. If the node is not running
// with availability zone, then it returns fault domain.
func (ss *scaleSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) {
managedByAS, err := ss.isNodeManagedByAvailabilitySet(name)
if err != nil {
klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
return cloudprovider.Zone{}, err
}
if managedByAS {
// vm is managed by availability set.
return ss.availabilitySet.GetZoneByNodeName(name)
}
_, _, vm, err := ss.getVmssVM(name)
if err != nil {
return cloudprovider.Zone{}, err
}
var failureDomain string
if vm.Zones != nil && len(*vm.Zones) > 0 {
// Get availability zone for the node.
zones := *vm.Zones
zoneID, err := strconv.Atoi(zones[0])
if err != nil {
return cloudprovider.Zone{}, fmt.Errorf("failed to parse zone %q: %v", zones, err)
}
failureDomain = ss.makeZone(zoneID)
} else if vm.InstanceView != nil && vm.InstanceView.PlatformFaultDomain != nil {
// Availability zone is not used for the node, falling back to fault domain.
failureDomain = strconv.Itoa(int(*vm.InstanceView.PlatformFaultDomain))
}
return cloudprovider.Zone{
FailureDomain: failureDomain,
Region: *vm.Location,
}, nil
}
// GetPrimaryVMSetName returns the VM set name depending on the configured vmType.
// It returns config.PrimaryScaleSetName for vmss and config.PrimaryAvailabilitySetName for standard vmType.
func (ss *scaleSet) GetPrimaryVMSetName() string {
return ss.Config.PrimaryScaleSetName
}
// GetIPByNodeName gets machine private IP and public IP by node name.
func (ss *scaleSet) GetIPByNodeName(nodeName string) (string, string, error) {
nic, err := ss.GetPrimaryInterface(nodeName)
if err != nil {
klog.Errorf("error: ss.GetIPByNodeName(%s), GetPrimaryInterface(%q), err=%v", nodeName, nodeName, err)
return "", "", err
}
ipConfig, err := getPrimaryIPConfig(nic)
if err != nil {
klog.Errorf("error: ss.GetIPByNodeName(%s), getPrimaryIPConfig(%v), err=%v", nodeName, nic, err)
return "", "", err
}
internalIP := *ipConfig.PrivateIPAddress
publicIP := ""
if ipConfig.PublicIPAddress != nil && ipConfig.PublicIPAddress.ID != nil {
pipID := *ipConfig.PublicIPAddress.ID
pipName, err := getLastSegment(pipID)
if err != nil {
return "", "", fmt.Errorf("failed to get publicIP name for node %q with pipID %q", nodeName, pipID)
}
resourceGroup, err := ss.GetNodeResourceGroup(nodeName)
if err != nil {
return "", "", err
}
pip, existsPip, err := ss.getPublicIPAddress(resourceGroup, pipName)
if err != nil {
return "", "", err
}
if existsPip {
publicIP = *pip.IPAddress
}
}
return internalIP, publicIP, nil
}
// This returns the full identifier of the primary NIC for the given VM.
func (ss *scaleSet) getPrimaryInterfaceID(machine compute.VirtualMachineScaleSetVM) (string, error) {
if len(*machine.NetworkProfile.NetworkInterfaces) == 1 {
return *(*machine.NetworkProfile.NetworkInterfaces)[0].ID, nil
}
for _, ref := range *machine.NetworkProfile.NetworkInterfaces {
if *ref.Primary {
return *ref.ID, nil
}
}
return "", fmt.Errorf("failed to find a primary nic for the vm. vmname=%q", *machine.Name)
}
// getVmssMachineID returns the full identifier of a vmss virtual machine.
func (az *Cloud) getVmssMachineID(resourceGroup, scaleSetName, instanceID string) string {
return fmt.Sprintf(
vmssMachineIDTemplate,
az.SubscriptionID,
strings.ToLower(resourceGroup),
scaleSetName,
instanceID)
}
// machineName is composed of computerNamePrefix and 36-based instanceID.
// And instanceID part if in fixed length of 6 characters.
// Refer https://msftstack.wordpress.com/2017/05/10/figuring-out-azure-vm-scale-set-machine-names/.
func getScaleSetVMInstanceID(machineName string) (string, error) {
nameLength := len(machineName)
if nameLength < 6 {
return "", ErrorNotVmssInstance
}
instanceID, err := strconv.ParseUint(machineName[nameLength-6:], 36, 64)
if err != nil {
return "", ErrorNotVmssInstance
}
return fmt.Sprintf("%d", instanceID), nil
}
// extractScaleSetNameByProviderID extracts the scaleset name by vmss node's ProviderID.
func extractScaleSetNameByProviderID(providerID string) (string, error) {
matches := scaleSetNameRE.FindStringSubmatch(providerID)
if len(matches) != 2 {
return "", ErrorNotVmssInstance
}
return matches[1], nil
}
// extractResourceGroupByProviderID extracts the resource group name by vmss node's ProviderID.
func extractResourceGroupByProviderID(providerID string) (string, error) {
matches := resourceGroupRE.FindStringSubmatch(providerID)
if len(matches) != 2 {
return "", ErrorNotVmssInstance
}
return matches[1], nil
}
// listScaleSets lists all scale sets.
func (ss *scaleSet) listScaleSets(resourceGroup string) ([]string, error) {
var err error
ctx, cancel := getContextWithCancel()
defer cancel()
allScaleSets, err := ss.VirtualMachineScaleSetsClient.List(ctx, resourceGroup)
if err != nil {
klog.Errorf("VirtualMachineScaleSetsClient.List failed: %v", err)
return nil, err
}
ssNames := make([]string, len(allScaleSets))
for i := range allScaleSets {
ssNames[i] = *(allScaleSets[i].Name)
}
return ssNames, nil
}
// listScaleSetVMs lists VMs belonging to the specified scale set.
func (ss *scaleSet) listScaleSetVMs(scaleSetName, resourceGroup string) ([]compute.VirtualMachineScaleSetVM, error) {
var err error
ctx, cancel := getContextWithCancel()
defer cancel()
allVMs, err := ss.VirtualMachineScaleSetVMsClient.List(ctx, resourceGroup, scaleSetName, "", "", string(compute.InstanceView))
if err != nil {
klog.Errorf("VirtualMachineScaleSetVMsClient.List failed: %v", err)
return nil, err
}
return allVMs, nil
}
// getAgentPoolScaleSets lists the virtual machines for the resource group and then builds
// a list of scale sets that match the nodes available to k8s.
func (ss *scaleSet) getAgentPoolScaleSets(nodes []*v1.Node) (*[]string, error) {
agentPoolScaleSets := &[]string{}
for nx := range nodes {
if isMasterNode(nodes[nx]) {
continue
}
if ss.ShouldNodeExcludedFromLoadBalancer(nodes[nx]) {
continue
}
nodeName := nodes[nx].Name
ssName, err := ss.getScaleSetNameByNodeName(nodeName)
if err != nil {
return nil, err
}
if ssName == "" {
klog.V(3).Infof("Node %q is not belonging to any known scale sets", nodeName)
continue
}
*agentPoolScaleSets = append(*agentPoolScaleSets, ssName)
}
return agentPoolScaleSets, nil
}
// GetVMSetNames selects all possible availability sets or scale sets
// (depending vmType configured) for service load balancer. If the service has
// no loadbalancer mode annotation returns the primary VMSet. If service annotation
// for loadbalancer exists then return the eligible VMSet.
func (ss *scaleSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (vmSetNames *[]string, err error) {
hasMode, isAuto, serviceVMSetNames := getServiceLoadBalancerMode(service)
if !hasMode {
// no mode specified in service annotation default to PrimaryScaleSetName.
scaleSetNames := &[]string{ss.Config.PrimaryScaleSetName}
return scaleSetNames, nil
}
scaleSetNames, err := ss.getAgentPoolScaleSets(nodes)
if err != nil {
klog.Errorf("ss.GetVMSetNames - getAgentPoolScaleSets failed err=(%v)", err)
return nil, err
}
if len(*scaleSetNames) == 0 {
klog.Errorf("ss.GetVMSetNames - No scale sets found for nodes in the cluster, node count(%d)", len(nodes))
return nil, fmt.Errorf("No scale sets found for nodes, node count(%d)", len(nodes))
}
// sort the list to have deterministic selection
sort.Strings(*scaleSetNames)
if !isAuto {
if serviceVMSetNames == nil || len(serviceVMSetNames) == 0 {
return nil, fmt.Errorf("service annotation for LoadBalancerMode is empty, it should have __auto__ or availability sets value")
}
// validate scale set exists
var found bool
for sasx := range serviceVMSetNames {
for asx := range *scaleSetNames {
if strings.EqualFold((*scaleSetNames)[asx], serviceVMSetNames[sasx]) {
found = true
serviceVMSetNames[sasx] = (*scaleSetNames)[asx]
break
}
}
if !found {
klog.Errorf("ss.GetVMSetNames - scale set (%s) in service annotation not found", serviceVMSetNames[sasx])
return nil, fmt.Errorf("scale set (%s) - not found", serviceVMSetNames[sasx])
}
}
vmSetNames = &serviceVMSetNames
}
return vmSetNames, nil
}
// extractResourceGroupByVMSSNicID extracts the resource group name by vmss nicID.
func extractResourceGroupByVMSSNicID(nicID string) (string, error) {
matches := vmssNicResourceGroupRE.FindStringSubmatch(nicID)
if len(matches) != 2 {
return "", fmt.Errorf("error of extracting resourceGroup from nicID %q", nicID)
}
return matches[1], nil
}
// GetPrimaryInterface gets machine primary network interface by node name and vmSet.
func (ss *scaleSet) GetPrimaryInterface(nodeName string) (network.Interface, error) {
managedByAS, err := ss.isNodeManagedByAvailabilitySet(nodeName)
if err != nil {
klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
return network.Interface{}, err
}
if managedByAS {
// vm is managed by availability set.
return ss.availabilitySet.GetPrimaryInterface(nodeName)
}
ssName, instanceID, vm, err := ss.getVmssVM(nodeName)
if err != nil {
// VM is availability set, but not cached yet in availabilitySetNodesCache.
if err == ErrorNotVmssInstance {
return ss.availabilitySet.GetPrimaryInterface(nodeName)
}
klog.Errorf("error: ss.GetPrimaryInterface(%s), ss.getVmssVM(%s), err=%v", nodeName, nodeName, err)
return network.Interface{}, err
}
primaryInterfaceID, err := ss.getPrimaryInterfaceID(vm)
if err != nil {
klog.Errorf("error: ss.GetPrimaryInterface(%s), ss.getPrimaryInterfaceID(), err=%v", nodeName, err)
return network.Interface{}, err
}
nicName, err := getLastSegment(primaryInterfaceID)
if err != nil {
klog.Errorf("error: ss.GetPrimaryInterface(%s), getLastSegment(%s), err=%v", nodeName, primaryInterfaceID, err)
return network.Interface{}, err
}
resourceGroup, err := extractResourceGroupByVMSSNicID(primaryInterfaceID)
if err != nil {
return network.Interface{}, err
}
ctx, cancel := getContextWithCancel()
defer cancel()
nic, err := ss.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface(ctx, resourceGroup, ssName, instanceID, nicName, "")
if err != nil {
klog.Errorf("error: ss.GetPrimaryInterface(%s), ss.GetVirtualMachineScaleSetNetworkInterface.Get(%s, %s, %s), err=%v", nodeName, resourceGroup, ssName, nicName, err)
return network.Interface{}, err
}
// Fix interface's location, which is required when updating the interface.
// TODO: is this a bug of azure SDK?
if nic.Location == nil || *nic.Location == "" {
nic.Location = vm.Location
}
return nic, nil
}
// getPrimarynetworkInterfaceConfiguration gets primary network interface configuration for scale set virtual machine.
func (ss *scaleSet) getPrimarynetworkInterfaceConfiguration(networkConfigurations []compute.VirtualMachineScaleSetNetworkConfiguration, nodeName string) (*compute.VirtualMachineScaleSetNetworkConfiguration, error) {
if len(networkConfigurations) == 1 {
return &networkConfigurations[0], nil
}
for idx := range networkConfigurations {
networkConfig := &networkConfigurations[idx]
if networkConfig.Primary != nil && *networkConfig.Primary == true {
return networkConfig, nil
}
}
return nil, fmt.Errorf("failed to find a primary network configuration for the scale set VM %q", nodeName)
}
func (ss *scaleSet) getPrimaryIPConfigForScaleSet(config *compute.VirtualMachineScaleSetNetworkConfiguration, nodeName string) (*compute.VirtualMachineScaleSetIPConfiguration, error) {
ipConfigurations := *config.IPConfigurations
if len(ipConfigurations) == 1 {
return &ipConfigurations[0], nil
}
for idx := range ipConfigurations {
ipConfig := &ipConfigurations[idx]
if ipConfig.Primary != nil && *ipConfig.Primary == true {
return ipConfig, nil
}
}
return nil, fmt.Errorf("failed to find a primary IP configuration for the scale set VM %q", nodeName)
}
// EnsureHostInPool ensures the given VM's Primary NIC's Primary IP Configuration is
// participating in the specified LoadBalancer Backend Pool.
func (ss *scaleSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeName, backendPoolID string, vmSetName string, isInternal bool) error {
klog.V(3).Infof("ensuring node %q of scaleset %q in LB backendpool %q", nodeName, vmSetName, backendPoolID)
vmName := mapNodeNameToVMName(nodeName)
ssName, instanceID, vm, err := ss.getVmssVM(vmName)
if err != nil {
return err
}
// Check scale set name:
// - For basic SKU load balancer, errNotInVMSet should be returned if the node's
// scale set is mismatched with vmSetName.
// - For standard SKU load balancer, backend could belong to multiple VMSS, so we
// don't check vmSet for it.
if vmSetName != "" && !ss.useStandardLoadBalancer() && !strings.EqualFold(vmSetName, ssName) {
klog.V(3).Infof("EnsureHostInPool skips node %s because it is not in the scaleSet %s", vmName, vmSetName)
return nil
}
// Find primary network interface configuration.
networkInterfaceConfigurations := *vm.NetworkProfileConfiguration.NetworkInterfaceConfigurations
primaryNetworkInterfaceConfiguration, err := ss.getPrimarynetworkInterfaceConfiguration(networkInterfaceConfigurations, vmName)
if err != nil {
return err
}
// Find primary IP configuration.
primaryIPConfiguration, err := ss.getPrimaryIPConfigForScaleSet(primaryNetworkInterfaceConfiguration, vmName)
if err != nil {
return err
}
// Update primary IP configuration's LoadBalancerBackendAddressPools.
foundPool := false
newBackendPools := []compute.SubResource{}
if primaryIPConfiguration.LoadBalancerBackendAddressPools != nil {
newBackendPools = *primaryIPConfiguration.LoadBalancerBackendAddressPools
}
for _, existingPool := range newBackendPools {
if strings.EqualFold(backendPoolID, *existingPool.ID) {
foundPool = true
break
}
}
// The backendPoolID has already been found from existing LoadBalancerBackendAddressPools.
if foundPool {
return nil
}
if ss.useStandardLoadBalancer() && len(newBackendPools) > 0 {
// Although standard load balancer supports backends from multiple scale
// sets, the same network interface couldn't be added to more than one load balancer of
// the same type. Omit those nodes (e.g. masters) so Azure ARM won't complain
// about this.
newBackendPoolsIDs := make([]string, 0, len(newBackendPools))
for _, pool := range newBackendPools {
if pool.ID != nil {
newBackendPoolsIDs = append(newBackendPoolsIDs, *pool.ID)
}
}
isSameLB, oldLBName, err := isBackendPoolOnSameLB(backendPoolID, newBackendPoolsIDs)
if err != nil {
return err
}
if !isSameLB {
klog.V(4).Infof("Node %q has already been added to LB %q, omit adding it to a new one", nodeName, oldLBName)
return nil
}
}
// Compose a new vmssVM with added backendPoolID.
newBackendPools = append(newBackendPools,
compute.SubResource{
ID: to.StringPtr(backendPoolID),
})
primaryIPConfiguration.LoadBalancerBackendAddressPools = &newBackendPools
newVM := compute.VirtualMachineScaleSetVM{
Sku: vm.Sku,
Location: vm.Location,
VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{
HardwareProfile: vm.HardwareProfile,
NetworkProfileConfiguration: &compute.VirtualMachineScaleSetVMNetworkProfileConfiguration{
NetworkInterfaceConfigurations: &networkInterfaceConfigurations,
},
},
}
// Get the node resource group.
nodeResourceGroup, err := ss.GetNodeResourceGroup(vmName)
if err != nil {
return err
}
// Invalidate the cache since we would update it.
key := buildVmssCacheKey(nodeResourceGroup, ss.makeVmssVMName(ssName, instanceID))
defer ss.vmssVMCache.Delete(key)
// Update vmssVM with backoff.
ctx, cancel := getContextWithCancel()
defer cancel()
klog.V(2).Infof("EnsureHostInPool begins to update vmssVM(%s) with new backendPoolID %s", vmName, backendPoolID)
resp, err := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM)
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
klog.V(2).Infof("EnsureHostInPool update backing off vmssVM(%s) with new backendPoolID %s, err: %v", vmName, backendPoolID, err)
retryErr := ss.UpdateVmssVMWithRetry(nodeResourceGroup, ssName, instanceID, newVM)
if retryErr != nil {
err = retryErr
klog.Errorf("EnsureHostInPool update abort backoff vmssVM(%s) with new backendPoolID %s, err: %v", vmName, backendPoolID, err)
}
}
return err
}
// EnsureHostsInPool ensures the given Node's primary IP configurations are
// participating in the specified LoadBalancer Backend Pool.
func (ss *scaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error {
hostUpdates := make([]func() error, 0, len(nodes))
for _, node := range nodes {
localNodeName := node.Name
if ss.useStandardLoadBalancer() && ss.excludeMasterNodesFromStandardLB() && isMasterNode(node) {
klog.V(4).Infof("Excluding master node %q from load balancer backendpool %q", localNodeName, backendPoolID)
continue
}
if ss.ShouldNodeExcludedFromLoadBalancer(node) {
klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", localNodeName)
continue
}
f := func() error {
// VMAS nodes should also be added to the SLB backends.
if ss.useStandardLoadBalancer() {
// Check whether the node is VMAS virtual machine.
managedByAS, err := ss.isNodeManagedByAvailabilitySet(localNodeName)
if err != nil {
klog.Errorf("Failed to check isNodeManagedByAvailabilitySet(%s): %v", localNodeName, err)
return err
}
if managedByAS {
return ss.availabilitySet.EnsureHostInPool(service, types.NodeName(localNodeName), backendPoolID, vmSetName, isInternal)
}
}
err := ss.EnsureHostInPool(service, types.NodeName(localNodeName), backendPoolID, vmSetName, isInternal)
if err != nil {
return fmt.Errorf("EnsureHostInPool(%s): backendPoolID(%s) - failed to ensure host in pool: %q", getServiceName(service), backendPoolID, err)
}
return nil
}
hostUpdates = append(hostUpdates, f)
}
errs := utilerrors.AggregateGoroutines(hostUpdates...)
if errs != nil {
return utilerrors.Flatten(errs)
}
return nil
}
// ensureBackendPoolDeletedFromNode ensures the loadBalancer backendAddressPools deleted from the specified node.
func (ss *scaleSet) ensureBackendPoolDeletedFromNode(service *v1.Service, nodeName, backendPoolID string) error {
ssName, instanceID, vm, err := ss.getVmssVM(nodeName)
if err != nil {
return err
}
// Find primary network interface configuration.
networkInterfaceConfigurations := *vm.NetworkProfileConfiguration.NetworkInterfaceConfigurations
primaryNetworkInterfaceConfiguration, err := ss.getPrimarynetworkInterfaceConfiguration(networkInterfaceConfigurations, nodeName)
if err != nil {
return err
}
// Find primary IP configuration.4
primaryIPConfiguration, err := ss.getPrimaryIPConfigForScaleSet(primaryNetworkInterfaceConfiguration, nodeName)
if err != nil {
return err
}
if primaryIPConfiguration.LoadBalancerBackendAddressPools == nil || len(*primaryIPConfiguration.LoadBalancerBackendAddressPools) == 0 {
return nil
}
// Construct new loadBalancerBackendAddressPools and remove backendAddressPools from primary IP configuration.
existingBackendPools := *primaryIPConfiguration.LoadBalancerBackendAddressPools
newBackendPools := []compute.SubResource{}
foundPool := false
for i := len(existingBackendPools) - 1; i >= 0; i-- {
curPool := existingBackendPools[i]
if strings.EqualFold(backendPoolID, *curPool.ID) {
klog.V(10).Infof("ensureBackendPoolDeletedFromNode gets unwanted backend pool %q for node %s", backendPoolID, nodeName)
foundPool = true
newBackendPools = append(existingBackendPools[:i], existingBackendPools[i+1:]...)
}
}
// Pool not found, assume it has been already removed.
if !foundPool {
return nil
}
// Compose a new vmssVM with added backendPoolID.
primaryIPConfiguration.LoadBalancerBackendAddressPools = &newBackendPools
newVM := compute.VirtualMachineScaleSetVM{
Sku: vm.Sku,
Location: vm.Location,
VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{
HardwareProfile: vm.HardwareProfile,
NetworkProfileConfiguration: &compute.VirtualMachineScaleSetVMNetworkProfileConfiguration{
NetworkInterfaceConfigurations: &networkInterfaceConfigurations,
},
},
}
// Get the node resource group.
nodeResourceGroup, err := ss.GetNodeResourceGroup(nodeName)
if err != nil {
return err
}
// Invalidate the cache since we would update it.
key := buildVmssCacheKey(nodeResourceGroup, ss.makeVmssVMName(ssName, instanceID))
defer ss.vmssVMCache.Delete(key)
// Update vmssVM with backoff.
ctx, cancel := getContextWithCancel()
defer cancel()
klog.V(2).Infof("ensureBackendPoolDeletedFromNode begins to update vmssVM(%s) with backendPoolID %s", nodeName, backendPoolID)
resp, err := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM)
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
klog.V(2).Infof("ensureBackendPoolDeletedFromNode update backing off vmssVM(%s) with backendPoolID %s, err: %v", nodeName, backendPoolID, err)
retryErr := ss.UpdateVmssVMWithRetry(nodeResourceGroup, ssName, instanceID, newVM)
if retryErr != nil {
err = retryErr
klog.Errorf("ensureBackendPoolDeletedFromNode update abort backoff vmssVM(%s) with backendPoolID %s, err: %v", nodeName, backendPoolID, err)
}
}
if err != nil {
klog.Errorf("ensureBackendPoolDeletedFromNode failed to update vmssVM(%s) with backendPoolID %s: %v", nodeName, backendPoolID, err)
} else {
klog.V(2).Infof("ensureBackendPoolDeletedFromNode update vmssVM(%s) with backendPoolID %s succeeded", nodeName, backendPoolID)
}
return err
}
// getNodeNameByIPConfigurationID gets the node name by IP configuration ID.
func (ss *scaleSet) getNodeNameByIPConfigurationID(ipConfigurationID string) (string, error) {
matches := vmssIPConfigurationRE.FindStringSubmatch(ipConfigurationID)
if len(matches) != 4 {
klog.V(4).Infof("Can not extract scale set name from ipConfigurationID (%s), assuming it is mananaged by availability set", ipConfigurationID)
return "", ErrorNotVmssInstance
}
resourceGroup := matches[1]
scaleSetName := matches[2]
instanceID := matches[3]
vm, err := ss.getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID)
if err != nil {
return "", err
}
if vm.OsProfile != nil && vm.OsProfile.ComputerName != nil {
return strings.ToLower(*vm.OsProfile.ComputerName), nil
}
return "", nil
}
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified nodes.
func (ss *scaleSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error {
// Returns nil if backend address pools already deleted.
if backendAddressPools == nil {
return nil
}
ipConfigurationIDs := []string{}
for _, backendPool := range *backendAddressPools {
if strings.EqualFold(*backendPool.ID, backendPoolID) && backendPool.BackendIPConfigurations != nil {
for _, ipConf := range *backendPool.BackendIPConfigurations {
if ipConf.ID == nil {
continue
}
ipConfigurationIDs = append(ipConfigurationIDs, *ipConf.ID)
}
}
}
hostUpdates := make([]func() error, 0, len(ipConfigurationIDs))
for i := range ipConfigurationIDs {
ipConfigurationID := ipConfigurationIDs[i]
f := func() error {
if scaleSetName, err := extractScaleSetNameByProviderID(ipConfigurationID); err == nil {
// Only remove nodes belonging to specified vmSet to basic LB backends.
if !ss.useStandardLoadBalancer() && !strings.EqualFold(scaleSetName, vmSetName) {
return nil
}
}
nodeName, err := ss.getNodeNameByIPConfigurationID(ipConfigurationID)
if err != nil {
if err == ErrorNotVmssInstance { // Do nothing for the VMAS nodes.
return nil
}
klog.Errorf("Failed to getNodeNameByIPConfigurationID(%s): %v", ipConfigurationID, err)
return err
}
err = ss.ensureBackendPoolDeletedFromNode(service, nodeName, backendPoolID)
if err != nil {
return fmt.Errorf("failed to ensure backend pool %s deleted from node %s: %v", backendPoolID, nodeName, err)
}
return nil
}
hostUpdates = append(hostUpdates, f)
}
errs := utilerrors.AggregateGoroutines(hostUpdates...)
if errs != nil {
return utilerrors.Flatten(errs)
}
return nil
}

View File

@@ -1,264 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"fmt"
"strings"
"time"
"k8s.io/klog"
"k8s.io/apimachinery/pkg/util/sets"
)
var (
vmssNameSeparator = "_"
vmssCacheSeparator = "#"
nodeNameToScaleSetMappingKey = "k8sNodeNameToScaleSetMappingKey"
availabilitySetNodesKey = "k8sAvailabilitySetNodesKey"
vmssCacheTTL = time.Minute
vmssVMCacheTTL = time.Minute
availabilitySetNodesCacheTTL = 5 * time.Minute
nodeNameToScaleSetMappingCacheTTL = 5 * time.Minute
)
// nodeNameToScaleSetMapping maps nodeName to scaleSet name.
// The map is required because vmss nodeName is not equal to its vmName.
type nodeNameToScaleSetMapping map[string]string
func (ss *scaleSet) makeVmssVMName(scaleSetName, instanceID string) string {
return fmt.Sprintf("%s%s%s", scaleSetName, vmssNameSeparator, instanceID)
}
func extractVmssVMName(name string) (string, string, error) {
split := strings.SplitAfter(name, vmssNameSeparator)
if len(split) < 2 {
klog.V(3).Infof("Failed to extract vmssVMName %q", name)
return "", "", ErrorNotVmssInstance
}
ssName := strings.Join(split[0:len(split)-1], "")
// removing the trailing `vmssNameSeparator` since we used SplitAfter
ssName = ssName[:len(ssName)-1]
instanceID := split[len(split)-1]
return ssName, instanceID, nil
}
// vmssCache only holds vmss from ss.ResourceGroup because nodes from other resourceGroups
// will be excluded from LB backends.
func (ss *scaleSet) newVmssCache() (*timedCache, error) {
getter := func(key string) (interface{}, error) {
ctx, cancel := getContextWithCancel()
defer cancel()
result, err := ss.VirtualMachineScaleSetsClient.Get(ctx, ss.ResourceGroup, key)
exists, message, realErr := checkResourceExistsFromError(err)
if realErr != nil {
return nil, realErr
}
if !exists {
klog.V(2).Infof("Virtual machine scale set %q not found with message: %q", key, message)
return nil, nil
}
return &result, nil
}
return newTimedcache(vmssCacheTTL, getter)
}
func (ss *scaleSet) newNodeNameToScaleSetMappingCache() (*timedCache, error) {
getter := func(key string) (interface{}, error) {
localCache := make(nodeNameToScaleSetMapping)
allResourceGroups, err := ss.GetResourceGroups()
if err != nil {
return nil, err
}
for _, resourceGroup := range allResourceGroups.List() {
scaleSetNames, err := ss.listScaleSets(resourceGroup)
if err != nil {
return nil, err
}
for _, ssName := range scaleSetNames {
vms, err := ss.listScaleSetVMs(ssName, resourceGroup)
if err != nil {
return nil, err
}
for _, vm := range vms {
if vm.OsProfile == nil || vm.OsProfile.ComputerName == nil {
klog.Warningf("failed to get computerName for vmssVM (%q)", ssName)
continue
}
computerName := strings.ToLower(*vm.OsProfile.ComputerName)
localCache[computerName] = ssName
}
}
}
return localCache, nil
}
return newTimedcache(nodeNameToScaleSetMappingCacheTTL, getter)
}
func (ss *scaleSet) newAvailabilitySetNodesCache() (*timedCache, error) {
getter := func(key string) (interface{}, error) {
localCache := sets.NewString()
resourceGroups, err := ss.GetResourceGroups()
if err != nil {
return nil, err
}
for _, resourceGroup := range resourceGroups.List() {
vmList, err := ss.Cloud.ListVirtualMachines(resourceGroup)
if err != nil {
return nil, err
}
for _, vm := range vmList {
if vm.Name != nil {
localCache.Insert(*vm.Name)
}
}
}
return localCache, nil
}
return newTimedcache(availabilitySetNodesCacheTTL, getter)
}
func buildVmssCacheKey(resourceGroup, name string) string {
// key is composed of <resourceGroup>#<vmName>
return fmt.Sprintf("%s%s%s", strings.ToLower(resourceGroup), vmssCacheSeparator, name)
}
func extractVmssCacheKey(key string) (string, string, error) {
// key is composed of <resourceGroup>#<vmName>
keyItems := strings.Split(key, vmssCacheSeparator)
if len(keyItems) != 2 {
return "", "", fmt.Errorf("key %q is not in format '<resourceGroup>#<vmName>'", key)
}
resourceGroup := keyItems[0]
vmName := keyItems[1]
return resourceGroup, vmName, nil
}
func (ss *scaleSet) newVmssVMCache() (*timedCache, error) {
getter := func(key string) (interface{}, error) {
// key is composed of <resourceGroup>#<vmName>
resourceGroup, vmName, err := extractVmssCacheKey(key)
if err != nil {
return nil, err
}
// vmName's format is 'scaleSetName_instanceID'
ssName, instanceID, err := extractVmssVMName(vmName)
if err != nil {
return nil, err
}
// Not found, the VM doesn't belong to any known scale sets.
if ssName == "" {
return nil, nil
}
ctx, cancel := getContextWithCancel()
defer cancel()
result, err := ss.VirtualMachineScaleSetVMsClient.Get(ctx, resourceGroup, ssName, instanceID)
exists, message, realErr := checkResourceExistsFromError(err)
if realErr != nil {
return nil, realErr
}
if !exists {
klog.V(2).Infof("Virtual machine scale set VM %q not found with message: %q", key, message)
return nil, nil
}
// Get instanceView for vmssVM.
if result.InstanceView == nil {
viewCtx, viewCancel := getContextWithCancel()
defer viewCancel()
view, err := ss.VirtualMachineScaleSetVMsClient.GetInstanceView(viewCtx, resourceGroup, ssName, instanceID)
// It is possible that the vmssVM gets removed just before this call. So check whether the VM exist again.
exists, message, realErr = checkResourceExistsFromError(err)
if realErr != nil {
return nil, realErr
}
if !exists {
klog.V(2).Infof("Virtual machine scale set VM %q not found with message: %q", key, message)
return nil, nil
}
result.InstanceView = &view
}
return &result, nil
}
return newTimedcache(vmssVMCacheTTL, getter)
}
func (ss *scaleSet) getScaleSetNameByNodeName(nodeName string) (string, error) {
getScaleSetName := func(nodeName string) (string, error) {
nodeNameMapping, err := ss.nodeNameToScaleSetMappingCache.Get(nodeNameToScaleSetMappingKey)
if err != nil {
return "", err
}
realMapping := nodeNameMapping.(nodeNameToScaleSetMapping)
if ssName, ok := realMapping[nodeName]; ok {
return ssName, nil
}
return "", nil
}
ssName, err := getScaleSetName(nodeName)
if err != nil {
return "", err
}
if ssName != "" {
return ssName, nil
}
// ssName is still not found, it is likely that new Nodes are created.
// Force refresh the cache and try again.
ss.nodeNameToScaleSetMappingCache.Delete(nodeNameToScaleSetMappingKey)
return getScaleSetName(nodeName)
}
func (ss *scaleSet) isNodeManagedByAvailabilitySet(nodeName string) (bool, error) {
cached, err := ss.availabilitySetNodesCache.Get(availabilitySetNodesKey)
if err != nil {
return false, err
}
availabilitySetNodes := cached.(sets.String)
return availabilitySetNodes.Has(nodeName), nil
}

View File

@@ -1,67 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestExtractVmssVMName(t *testing.T) {
cases := []struct {
description string
vmName string
expectError bool
expectedScaleSet string
expectedInstanceID string
}{
{
description: "wrong vmss VM name should report error",
vmName: "vm1234",
expectError: true,
},
{
description: "wrong VM name separator should report error",
vmName: "vm-1234",
expectError: true,
},
{
description: "correct vmss VM name should return correct scaleSet and instanceID",
vmName: "vm_1234",
expectedScaleSet: "vm",
expectedInstanceID: "1234",
},
{
description: "correct vmss VM name with Extra Separator should return correct scaleSet and instanceID",
vmName: "vm_test_1234",
expectedScaleSet: "vm_test",
expectedInstanceID: "1234",
},
}
for _, c := range cases {
ssName, instanceID, err := extractVmssVMName(c.vmName)
if c.expectError {
assert.Error(t, err, c.description)
continue
}
assert.Equal(t, c.expectedScaleSet, ssName, c.description)
assert.Equal(t, c.expectedInstanceID, instanceID, c.description)
}
}

View File

@@ -1,376 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"fmt"
"testing"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute"
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
"github.com/Azure/go-autorest/autorest/to"
"github.com/stretchr/testify/assert"
)
const (
fakePrivateIP = "10.240.0.10"
fakePublicIP = "10.10.10.10"
)
func newTestScaleSet(scaleSetName, zone string, faultDomain int32, vmList []string) (*scaleSet, error) {
cloud := getTestCloud()
setTestVirtualMachineCloud(cloud, scaleSetName, zone, faultDomain, vmList)
ss, err := newScaleSet(cloud)
if err != nil {
return nil, err
}
return ss.(*scaleSet), nil
}
func setTestVirtualMachineCloud(ss *Cloud, scaleSetName, zone string, faultDomain int32, vmList []string) {
virtualMachineScaleSetsClient := newFakeVirtualMachineScaleSetsClient()
virtualMachineScaleSetVMsClient := newFakeVirtualMachineScaleSetVMsClient()
publicIPAddressesClient := newFakeAzurePIPClient("rg")
interfaceClient := newFakeAzureInterfacesClient()
// set test scale sets.
scaleSets := make(map[string]map[string]compute.VirtualMachineScaleSet)
scaleSets["rg"] = map[string]compute.VirtualMachineScaleSet{
scaleSetName: {
Name: &scaleSetName,
},
}
virtualMachineScaleSetsClient.setFakeStore(scaleSets)
testInterfaces := map[string]map[string]network.Interface{
"rg": make(map[string]network.Interface),
}
testPIPs := map[string]map[string]network.PublicIPAddress{
"rg": make(map[string]network.PublicIPAddress),
}
ssVMs := map[string]map[string]compute.VirtualMachineScaleSetVM{
"rg": make(map[string]compute.VirtualMachineScaleSetVM),
}
for i := range vmList {
nodeName := vmList[i]
ID := fmt.Sprintf("/subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%d", scaleSetName, i)
interfaceID := fmt.Sprintf("/subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%d/networkInterfaces/%s", scaleSetName, i, nodeName)
publicAddressID := fmt.Sprintf("/subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%d/networkInterfaces/%s/ipConfigurations/ipconfig1/publicIPAddresses/%s", scaleSetName, i, nodeName, nodeName)
instanceID := fmt.Sprintf("%d", i)
vmName := fmt.Sprintf("%s_%s", scaleSetName, instanceID)
// set vmss virtual machine.
networkInterfaces := []compute.NetworkInterfaceReference{
{
ID: &interfaceID,
},
}
ipConfigurations := []compute.VirtualMachineScaleSetIPConfiguration{
{
Name: to.StringPtr("ipconfig1"),
VirtualMachineScaleSetIPConfigurationProperties: &compute.VirtualMachineScaleSetIPConfigurationProperties{},
},
}
networkConfigurations := []compute.VirtualMachineScaleSetNetworkConfiguration{
{
Name: to.StringPtr("ipconfig1"),
ID: to.StringPtr("fakeNetworkConfiguration"),
VirtualMachineScaleSetNetworkConfigurationProperties: &compute.VirtualMachineScaleSetNetworkConfigurationProperties{
IPConfigurations: &ipConfigurations,
},
},
}
vmssVM := compute.VirtualMachineScaleSetVM{
VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{
OsProfile: &compute.OSProfile{
ComputerName: &nodeName,
},
NetworkProfile: &compute.NetworkProfile{
NetworkInterfaces: &networkInterfaces,
},
NetworkProfileConfiguration: &compute.VirtualMachineScaleSetVMNetworkProfileConfiguration{
NetworkInterfaceConfigurations: &networkConfigurations,
},
InstanceView: &compute.VirtualMachineScaleSetVMInstanceView{
PlatformFaultDomain: &faultDomain,
},
},
ID: &ID,
InstanceID: &instanceID,
Name: &vmName,
Location: &ss.Location,
}
if zone != "" {
zones := []string{zone}
vmssVM.Zones = &zones
}
ssVMs["rg"][vmName] = vmssVM
// set interfaces.
testInterfaces["rg"][nodeName] = network.Interface{
ID: &interfaceID,
InterfacePropertiesFormat: &network.InterfacePropertiesFormat{
IPConfigurations: &[]network.InterfaceIPConfiguration{
{
InterfaceIPConfigurationPropertiesFormat: &network.InterfaceIPConfigurationPropertiesFormat{
Primary: to.BoolPtr(true),
PrivateIPAddress: to.StringPtr(fakePrivateIP),
PublicIPAddress: &network.PublicIPAddress{
ID: to.StringPtr(publicAddressID),
},
},
},
},
},
}
// set public IPs.
testPIPs["rg"][nodeName] = network.PublicIPAddress{
ID: to.StringPtr(publicAddressID),
PublicIPAddressPropertiesFormat: &network.PublicIPAddressPropertiesFormat{
IPAddress: to.StringPtr(fakePublicIP),
},
}
}
virtualMachineScaleSetVMsClient.setFakeStore(ssVMs)
interfaceClient.setFakeStore(testInterfaces)
publicIPAddressesClient.setFakeStore(testPIPs)
ss.VirtualMachineScaleSetsClient = virtualMachineScaleSetsClient
ss.VirtualMachineScaleSetVMsClient = virtualMachineScaleSetVMsClient
ss.InterfacesClient = interfaceClient
ss.PublicIPAddressesClient = publicIPAddressesClient
}
func TestGetScaleSetVMInstanceID(t *testing.T) {
tests := []struct {
msg string
machineName string
expectError bool
expectedInstanceID string
}{{
msg: "invalid vmss instance name",
machineName: "vmvm",
expectError: true,
},
{
msg: "valid vmss instance name",
machineName: "vm00000Z",
expectError: false,
expectedInstanceID: "35",
},
}
for i, test := range tests {
instanceID, err := getScaleSetVMInstanceID(test.machineName)
if test.expectError {
assert.Error(t, err, fmt.Sprintf("TestCase[%d]: %s", i, test.msg))
} else {
assert.Equal(t, test.expectedInstanceID, instanceID, fmt.Sprintf("TestCase[%d]: %s", i, test.msg))
}
}
}
func TestGetInstanceIDByNodeName(t *testing.T) {
testCases := []struct {
description string
scaleSet string
vmList []string
nodeName string
expected string
expectError bool
}{
{
description: "scaleSet should get instance by node name",
scaleSet: "ss",
vmList: []string{"vmssee6c2000000", "vmssee6c2000001"},
nodeName: "vmssee6c2000001",
expected: "/subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/ss/virtualMachines/1",
},
{
description: "scaleSet should get instance by node name with upper cases hostname",
scaleSet: "ss",
vmList: []string{"VMSSEE6C2000000", "VMSSEE6C2000001"},
nodeName: "vmssee6c2000000",
expected: "/subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/ss/virtualMachines/0",
},
{
description: "scaleSet should not get instance for non-exist nodes",
scaleSet: "ss",
vmList: []string{"vmssee6c2000000", "vmssee6c2000001"},
nodeName: "agente6c2000005",
expectError: true,
},
}
for _, test := range testCases {
ss, err := newTestScaleSet(test.scaleSet, "", 0, test.vmList)
assert.NoError(t, err, test.description)
real, err := ss.GetInstanceIDByNodeName(test.nodeName)
if test.expectError {
assert.Error(t, err, test.description)
continue
}
assert.NoError(t, err, test.description)
assert.Equal(t, test.expected, real, test.description)
}
}
func TestGetZoneByNodeName(t *testing.T) {
testCases := []struct {
description string
scaleSet string
vmList []string
nodeName string
zone string
faultDomain int32
expected string
expectError bool
}{
{
description: "scaleSet should get faultDomain for non-zoned nodes",
scaleSet: "ss",
vmList: []string{"vmssee6c2000000", "vmssee6c2000001"},
nodeName: "vmssee6c2000000",
faultDomain: 3,
expected: "3",
},
{
description: "scaleSet should get availability zone for zoned nodes",
scaleSet: "ss",
vmList: []string{"vmssee6c2000000", "vmssee6c2000001"},
nodeName: "vmssee6c2000000",
zone: "2",
faultDomain: 3,
expected: "westus-2",
},
{
description: "scaleSet should return error for non-exist nodes",
scaleSet: "ss",
faultDomain: 3,
vmList: []string{"vmssee6c2000000", "vmssee6c2000001"},
nodeName: "agente6c2000005",
expectError: true,
},
}
for _, test := range testCases {
ss, err := newTestScaleSet(test.scaleSet, test.zone, test.faultDomain, test.vmList)
assert.NoError(t, err, test.description)
real, err := ss.GetZoneByNodeName(test.nodeName)
if test.expectError {
assert.Error(t, err, test.description)
continue
}
assert.NoError(t, err, test.description)
assert.Equal(t, test.expected, real.FailureDomain, test.description)
}
}
func TestGetIPByNodeName(t *testing.T) {
testCases := []struct {
description string
scaleSet string
vmList []string
nodeName string
expected []string
expectError bool
}{
{
description: "GetIPByNodeName should get node's privateIP and publicIP",
scaleSet: "ss",
vmList: []string{"vmssee6c2000000", "vmssee6c2000001"},
nodeName: "vmssee6c2000000",
expected: []string{fakePrivateIP, fakePublicIP},
},
{
description: "GetIPByNodeName should return error for non-exist nodes",
scaleSet: "ss",
vmList: []string{"vmssee6c2000000", "vmssee6c2000001"},
nodeName: "agente6c2000005",
expectError: true,
},
}
for _, test := range testCases {
ss, err := newTestScaleSet(test.scaleSet, "", 0, test.vmList)
assert.NoError(t, err, test.description)
privateIP, publicIP, err := ss.GetIPByNodeName(test.nodeName)
if test.expectError {
assert.Error(t, err, test.description)
continue
}
assert.NoError(t, err, test.description)
assert.Equal(t, test.expected, []string{privateIP, publicIP}, test.description)
}
}
func TestGetNodeNameByIPConfigurationID(t *testing.T) {
ipConfigurationIDTemplate := "/subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%s/networkInterfaces/%s/ipConfigurations/ipconfig1"
testCases := []struct {
description string
scaleSet string
vmList []string
ipConfigurationID string
expected string
expectError bool
}{
{
description: "getNodeNameByIPConfigurationID should get node's Name when the node is existing",
scaleSet: "scaleset1",
ipConfigurationID: fmt.Sprintf(ipConfigurationIDTemplate, "scaleset1", "0", "scaleset1"),
vmList: []string{"vmssee6c2000000", "vmssee6c2000001"},
expected: "vmssee6c2000000",
},
{
description: "getNodeNameByIPConfigurationID should return error for non-exist nodes",
scaleSet: "scaleset2",
ipConfigurationID: fmt.Sprintf(ipConfigurationIDTemplate, "scaleset2", "3", "scaleset1"),
vmList: []string{"vmssee6c2000002", "vmssee6c2000003"},
expectError: true,
},
{
description: "getNodeNameByIPConfigurationID should return error for wrong ipConfigurationID",
scaleSet: "scaleset3",
ipConfigurationID: "invalid-configuration-id",
vmList: []string{"vmssee6c2000004", "vmssee6c2000005"},
expectError: true,
},
}
for _, test := range testCases {
ss, err := newTestScaleSet(test.scaleSet, "", 0, test.vmList)
assert.NoError(t, err, test.description)
nodeName, err := ss.getNodeNameByIPConfigurationID(test.ipConfigurationID)
if test.expectError {
assert.Error(t, err, test.description)
continue
}
assert.NoError(t, err, test.description)
assert.Equal(t, test.expected, nodeName, test.description)
}
}

View File

@@ -1,364 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"fmt"
"net/http"
"regexp"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute"
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
"github.com/Azure/go-autorest/autorest"
"k8s.io/apimachinery/pkg/types"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/klog"
)
var (
vmCacheTTL = time.Minute
lbCacheTTL = 2 * time.Minute
nsgCacheTTL = 2 * time.Minute
rtCacheTTL = 2 * time.Minute
azureNodeProviderIDRE = regexp.MustCompile(`^azure:///subscriptions/(?:.*)/resourceGroups/(?:.*)/providers/Microsoft.Compute/(?:.*)`)
azureResourceGroupNameRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(.+)/providers/(?:.*)`)
)
// checkExistsFromError inspects an error and returns a true if err is nil,
// false if error is an autorest.Error with StatusCode=404 and will return the
// error back if error is another status code or another type of error.
func checkResourceExistsFromError(err error) (bool, string, error) {
if err == nil {
return true, "", nil
}
v, ok := err.(autorest.DetailedError)
if !ok {
return false, "", err
}
if v.StatusCode == http.StatusNotFound {
return false, err.Error(), nil
}
return false, "", v
}
// If it is StatusNotFound return nil,
// Otherwise, return what it is
func ignoreStatusNotFoundFromError(err error) error {
if err == nil {
return nil
}
v, ok := err.(autorest.DetailedError)
if ok && v.StatusCode == http.StatusNotFound {
return nil
}
return err
}
// ignoreStatusForbiddenFromError returns nil if the status code is StatusForbidden.
// This happens when AuthorizationFailed is reported from Azure API.
func ignoreStatusForbiddenFromError(err error) error {
if err == nil {
return nil
}
v, ok := err.(autorest.DetailedError)
if ok && v.StatusCode == http.StatusForbidden {
return nil
}
return err
}
/// getVirtualMachine calls 'VirtualMachinesClient.Get' with a timed cache
/// The service side has throttling control that delays responses if there're multiple requests onto certain vm
/// resource request in short period.
func (az *Cloud) getVirtualMachine(nodeName types.NodeName) (vm compute.VirtualMachine, err error) {
vmName := string(nodeName)
cachedVM, err := az.vmCache.Get(vmName)
if err != nil {
return vm, err
}
if cachedVM == nil {
return vm, cloudprovider.InstanceNotFound
}
return *(cachedVM.(*compute.VirtualMachine)), nil
}
func (az *Cloud) getRouteTable() (routeTable network.RouteTable, exists bool, err error) {
cachedRt, err := az.rtCache.Get(az.RouteTableName)
if err != nil {
return routeTable, false, err
}
if cachedRt == nil {
return routeTable, false, nil
}
return *(cachedRt.(*network.RouteTable)), true, nil
}
func (az *Cloud) getPublicIPAddress(pipResourceGroup string, pipName string) (pip network.PublicIPAddress, exists bool, err error) {
resourceGroup := az.ResourceGroup
if pipResourceGroup != "" {
resourceGroup = pipResourceGroup
}
var realErr error
var message string
ctx, cancel := getContextWithCancel()
defer cancel()
pip, err = az.PublicIPAddressesClient.Get(ctx, resourceGroup, pipName, "")
exists, message, realErr = checkResourceExistsFromError(err)
if realErr != nil {
return pip, false, realErr
}
if !exists {
klog.V(2).Infof("Public IP %q not found with message: %q", pipName, message)
return pip, false, nil
}
return pip, exists, err
}
func (az *Cloud) getSubnet(virtualNetworkName string, subnetName string) (subnet network.Subnet, exists bool, err error) {
var realErr error
var message string
var rg string
if len(az.VnetResourceGroup) > 0 {
rg = az.VnetResourceGroup
} else {
rg = az.ResourceGroup
}
ctx, cancel := getContextWithCancel()
defer cancel()
subnet, err = az.SubnetsClient.Get(ctx, rg, virtualNetworkName, subnetName, "")
exists, message, realErr = checkResourceExistsFromError(err)
if realErr != nil {
return subnet, false, realErr
}
if !exists {
klog.V(2).Infof("Subnet %q not found with message: %q", subnetName, message)
return subnet, false, nil
}
return subnet, exists, err
}
func (az *Cloud) getAzureLoadBalancer(name string) (lb network.LoadBalancer, exists bool, err error) {
cachedLB, err := az.lbCache.Get(name)
if err != nil {
return lb, false, err
}
if cachedLB == nil {
return lb, false, nil
}
return *(cachedLB.(*network.LoadBalancer)), true, nil
}
func (az *Cloud) getSecurityGroup() (nsg network.SecurityGroup, err error) {
if az.SecurityGroupName == "" {
return nsg, fmt.Errorf("securityGroupName is not configured")
}
securityGroup, err := az.nsgCache.Get(az.SecurityGroupName)
if err != nil {
return nsg, err
}
if securityGroup == nil {
return nsg, fmt.Errorf("nsg %q not found", az.SecurityGroupName)
}
return *(securityGroup.(*network.SecurityGroup)), nil
}
func (az *Cloud) newVMCache() (*timedCache, error) {
getter := func(key string) (interface{}, error) {
// Currently InstanceView request are used by azure_zones, while the calls come after non-InstanceView
// request. If we first send an InstanceView request and then a non InstanceView request, the second
// request will still hit throttling. This is what happens now for cloud controller manager: In this
// case we do get instance view every time to fulfill the azure_zones requirement without hitting
// throttling.
// Consider adding separate parameter for controlling 'InstanceView' once node update issue #56276 is fixed
ctx, cancel := getContextWithCancel()
defer cancel()
resourceGroup, err := az.GetNodeResourceGroup(key)
if err != nil {
return nil, err
}
vm, err := az.VirtualMachinesClient.Get(ctx, resourceGroup, key, compute.InstanceView)
exists, message, realErr := checkResourceExistsFromError(err)
if realErr != nil {
return nil, realErr
}
if !exists {
klog.V(2).Infof("Virtual machine %q not found with message: %q", key, message)
return nil, nil
}
return &vm, nil
}
return newTimedcache(vmCacheTTL, getter)
}
func (az *Cloud) newLBCache() (*timedCache, error) {
getter := func(key string) (interface{}, error) {
ctx, cancel := getContextWithCancel()
defer cancel()
lb, err := az.LoadBalancerClient.Get(ctx, az.ResourceGroup, key, "")
exists, message, realErr := checkResourceExistsFromError(err)
if realErr != nil {
return nil, realErr
}
if !exists {
klog.V(2).Infof("Load balancer %q not found with message: %q", key, message)
return nil, nil
}
return &lb, nil
}
return newTimedcache(lbCacheTTL, getter)
}
func (az *Cloud) newNSGCache() (*timedCache, error) {
getter := func(key string) (interface{}, error) {
ctx, cancel := getContextWithCancel()
defer cancel()
nsg, err := az.SecurityGroupsClient.Get(ctx, az.ResourceGroup, key, "")
exists, message, realErr := checkResourceExistsFromError(err)
if realErr != nil {
return nil, realErr
}
if !exists {
klog.V(2).Infof("Security group %q not found with message: %q", key, message)
return nil, nil
}
return &nsg, nil
}
return newTimedcache(nsgCacheTTL, getter)
}
func (az *Cloud) newRouteTableCache() (*timedCache, error) {
getter := func(key string) (interface{}, error) {
ctx, cancel := getContextWithCancel()
defer cancel()
rt, err := az.RouteTablesClient.Get(ctx, az.RouteTableResourceGroup, key, "")
exists, message, realErr := checkResourceExistsFromError(err)
if realErr != nil {
return nil, realErr
}
if !exists {
klog.V(2).Infof("Route table %q not found with message: %q", key, message)
return nil, nil
}
return &rt, nil
}
return newTimedcache(rtCacheTTL, getter)
}
func (az *Cloud) useStandardLoadBalancer() bool {
return strings.EqualFold(az.LoadBalancerSku, loadBalancerSkuStandard)
}
func (az *Cloud) excludeMasterNodesFromStandardLB() bool {
return az.ExcludeMasterFromStandardLB != nil && *az.ExcludeMasterFromStandardLB
}
func (az *Cloud) disableLoadBalancerOutboundSNAT() bool {
if !az.useStandardLoadBalancer() || az.DisableOutboundSNAT == nil {
return false
}
return *az.DisableOutboundSNAT
}
// IsNodeUnmanaged returns true if the node is not managed by Azure cloud provider.
// Those nodes includes on-prem or VMs from other clouds. They will not be added to load balancer
// backends. Azure routes and managed disks are also not supported for them.
func (az *Cloud) IsNodeUnmanaged(nodeName string) (bool, error) {
unmanagedNodes, err := az.GetUnmanagedNodes()
if err != nil {
return false, err
}
return unmanagedNodes.Has(nodeName), nil
}
// IsNodeUnmanagedByProviderID returns true if the node is not managed by Azure cloud provider.
// All managed node's providerIDs are in format 'azure:///subscriptions/<id>/resourceGroups/<rg>/providers/Microsoft.Compute/.*'
func (az *Cloud) IsNodeUnmanagedByProviderID(providerID string) bool {
return !azureNodeProviderIDRE.Match([]byte(providerID))
}
// convertResourceGroupNameToLower converts the resource group name in the resource ID to be lowered.
func convertResourceGroupNameToLower(resourceID string) (string, error) {
matches := azureResourceGroupNameRE.FindStringSubmatch(resourceID)
if len(matches) != 2 {
return "", fmt.Errorf("%q isn't in Azure resource ID format %q", resourceID, azureResourceGroupNameRE.String())
}
resourceGroup := matches[1]
return strings.Replace(resourceID, resourceGroup, strings.ToLower(resourceGroup), 1), nil
}
// isBackendPoolOnSameLB checks whether newBackendPoolID is on the same load balancer as existingBackendPools.
// Since both public and internal LBs are supported, lbName and lbName-internal are treated as same.
// If not same, the lbName for existingBackendPools would also be returned.
func isBackendPoolOnSameLB(newBackendPoolID string, existingBackendPools []string) (bool, string, error) {
matches := backendPoolIDRE.FindStringSubmatch(newBackendPoolID)
if len(matches) != 2 {
return false, "", fmt.Errorf("new backendPoolID %q is in wrong format", newBackendPoolID)
}
newLBName := matches[1]
newLBNameTrimmed := strings.TrimRight(newLBName, InternalLoadBalancerNameSuffix)
for _, backendPool := range existingBackendPools {
matches := backendPoolIDRE.FindStringSubmatch(backendPool)
if len(matches) != 2 {
return false, "", fmt.Errorf("existing backendPoolID %q is in wrong format", backendPool)
}
lbName := matches[1]
if !strings.EqualFold(strings.TrimRight(lbName, InternalLoadBalancerNameSuffix), newLBNameTrimmed) {
return false, lbName, nil
}
}
return true, "", nil
}

View File

@@ -1,275 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"fmt"
"net/http"
"reflect"
"testing"
"github.com/Azure/go-autorest/autorest"
"github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/util/sets"
)
func TestExtractNotFound(t *testing.T) {
notFound := autorest.DetailedError{StatusCode: http.StatusNotFound}
otherHTTP := autorest.DetailedError{StatusCode: http.StatusForbidden}
otherErr := fmt.Errorf("other error")
tests := []struct {
err error
expectedErr error
exists bool
}{
{nil, nil, true},
{otherErr, otherErr, false},
{notFound, nil, false},
{otherHTTP, otherHTTP, false},
}
for _, test := range tests {
exists, _, err := checkResourceExistsFromError(test.err)
if test.exists != exists {
t.Errorf("expected: %v, saw: %v", test.exists, exists)
}
if !reflect.DeepEqual(test.expectedErr, err) {
t.Errorf("expected err: %v, saw: %v", test.expectedErr, err)
}
}
}
func TestIsNodeUnmanaged(t *testing.T) {
tests := []struct {
name string
unmanagedNodes sets.String
node string
expected bool
expectErr bool
}{
{
name: "unmanaged node should return true",
unmanagedNodes: sets.NewString("node1", "node2"),
node: "node1",
expected: true,
},
{
name: "managed node should return false",
unmanagedNodes: sets.NewString("node1", "node2"),
node: "node3",
expected: false,
},
{
name: "empty unmanagedNodes should return true",
unmanagedNodes: sets.NewString(),
node: "node3",
expected: false,
},
{
name: "no synced informer should report error",
unmanagedNodes: sets.NewString(),
node: "node1",
expectErr: true,
},
}
az := getTestCloud()
for _, test := range tests {
az.unmanagedNodes = test.unmanagedNodes
if test.expectErr {
az.nodeInformerSynced = func() bool {
return false
}
}
real, err := az.IsNodeUnmanaged(test.node)
if test.expectErr {
assert.Error(t, err, test.name)
continue
}
assert.NoError(t, err, test.name)
assert.Equal(t, test.expected, real, test.name)
}
}
func TestIsNodeUnmanagedByProviderID(t *testing.T) {
tests := []struct {
providerID string
expected bool
name string
}{
{
providerID: CloudProviderName + ":///subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroupName/providers/Microsoft.Compute/virtualMachines/k8s-agent-AAAAAAAA-0",
expected: false,
},
{
providerID: CloudProviderName + "://",
expected: true,
},
{
providerID: ":///subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroupName/providers/Microsoft.Compute/virtualMachines/k8s-agent-AAAAAAAA-0",
expected: true,
},
{
providerID: "aws:///subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroupName/providers/Microsoft.Compute/virtualMachines/k8s-agent-AAAAAAAA-0",
expected: true,
},
{
providerID: "k8s-agent-AAAAAAAA-0",
expected: true,
},
}
az := getTestCloud()
for _, test := range tests {
isUnmanagedNode := az.IsNodeUnmanagedByProviderID(test.providerID)
assert.Equal(t, test.expected, isUnmanagedNode, test.providerID)
}
}
func TestConvertResourceGroupNameToLower(t *testing.T) {
tests := []struct {
desc string
resourceID string
expected string
expectError bool
}{
{
desc: "empty string should report error",
resourceID: "",
expectError: true,
},
{
desc: "resourceID not in Azure format should report error",
resourceID: "invalid-id",
expectError: true,
},
{
desc: "providerID not in Azure format should report error",
resourceID: "azure://invalid-id",
expectError: true,
},
{
desc: "resource group name in VM providerID should be converted",
resourceID: CloudProviderName + ":///subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroupName/providers/Microsoft.Compute/virtualMachines/k8s-agent-AAAAAAAA-0",
expected: CloudProviderName + ":///subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroupname/providers/Microsoft.Compute/virtualMachines/k8s-agent-AAAAAAAA-0",
},
{
desc: "resource group name in VM resourceID should be converted",
resourceID: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroupName/providers/Microsoft.Compute/virtualMachines/k8s-agent-AAAAAAAA-0",
expected: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroupname/providers/Microsoft.Compute/virtualMachines/k8s-agent-AAAAAAAA-0",
},
{
desc: "resource group name in VMSS providerID should be converted",
resourceID: CloudProviderName + ":///subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroupName/providers/Microsoft.Compute/virtualMachineScaleSets/myScaleSetName/virtualMachines/156",
expected: CloudProviderName + ":///subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroupname/providers/Microsoft.Compute/virtualMachineScaleSets/myScaleSetName/virtualMachines/156",
},
{
desc: "resource group name in VMSS resourceID should be converted",
resourceID: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroupName/providers/Microsoft.Compute/virtualMachineScaleSets/myScaleSetName/virtualMachines/156",
expected: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroupname/providers/Microsoft.Compute/virtualMachineScaleSets/myScaleSetName/virtualMachines/156",
},
}
for _, test := range tests {
real, err := convertResourceGroupNameToLower(test.resourceID)
if test.expectError {
assert.NotNil(t, err, test.desc)
continue
}
assert.Nil(t, err, test.desc)
assert.Equal(t, test.expected, real, test.desc)
}
}
func TestIsBackendPoolOnSameLB(t *testing.T) {
tests := []struct {
backendPoolID string
existingBackendPools []string
expected bool
expectedLBName string
expectError bool
}{
{
backendPoolID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb1/backendAddressPools/pool1",
existingBackendPools: []string{
"/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb1/backendAddressPools/pool2",
},
expected: true,
expectedLBName: "",
},
{
backendPoolID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb1-internal/backendAddressPools/pool1",
existingBackendPools: []string{
"/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb1/backendAddressPools/pool2",
},
expected: true,
expectedLBName: "",
},
{
backendPoolID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb1/backendAddressPools/pool1",
existingBackendPools: []string{
"/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb1-internal/backendAddressPools/pool2",
},
expected: true,
expectedLBName: "",
},
{
backendPoolID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb1/backendAddressPools/pool1",
existingBackendPools: []string{
"/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb2/backendAddressPools/pool2",
},
expected: false,
expectedLBName: "lb2",
},
{
backendPoolID: "wrong-backendpool-id",
existingBackendPools: []string{
"/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb1/backendAddressPools/pool2",
},
expectError: true,
},
{
backendPoolID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb1/backendAddressPools/pool1",
existingBackendPools: []string{
"wrong-existing-backendpool-id",
},
expectError: true,
},
{
backendPoolID: "wrong-backendpool-id",
existingBackendPools: []string{
"wrong-existing-backendpool-id",
},
expectError: true,
},
}
for _, test := range tests {
isSameLB, lbName, err := isBackendPoolOnSameLB(test.backendPoolID, test.existingBackendPools)
if test.expectError {
assert.Error(t, err)
continue
}
assert.Equal(t, test.expected, isSameLB)
assert.Equal(t, test.expectedLBName, lbName)
}
}

View File

@@ -1,121 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"context"
"fmt"
"os"
"strconv"
"strings"
"k8s.io/apimachinery/pkg/types"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/klog"
)
// makeZone returns the zone value in format of <region>-<zone-id>.
func (az *Cloud) makeZone(zoneID int) string {
return fmt.Sprintf("%s-%d", strings.ToLower(az.Location), zoneID)
}
// isAvailabilityZone returns true if the zone is in format of <region>-<zone-id>.
func (az *Cloud) isAvailabilityZone(zone string) bool {
return strings.HasPrefix(zone, fmt.Sprintf("%s-", az.Location))
}
// GetZoneID returns the ID of zone from node's zone label.
func (az *Cloud) GetZoneID(zoneLabel string) string {
if !az.isAvailabilityZone(zoneLabel) {
return ""
}
return strings.TrimPrefix(zoneLabel, fmt.Sprintf("%s-", az.Location))
}
// GetZone returns the Zone containing the current availability zone and locality region that the program is running in.
// If the node is not running with availability zones, then it will fall back to fault domain.
func (az *Cloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) {
if az.UseInstanceMetadata {
metadata, err := az.metadata.GetMetadata()
if err != nil {
return cloudprovider.Zone{}, err
}
if metadata.Compute == nil {
return cloudprovider.Zone{}, fmt.Errorf("failure of getting compute information from instance metadata")
}
zone := ""
if metadata.Compute.Zone != "" {
zoneID, err := strconv.Atoi(metadata.Compute.Zone)
if err != nil {
return cloudprovider.Zone{}, fmt.Errorf("failed to parse zone ID %q: %v", metadata.Compute.Zone, err)
}
zone = az.makeZone(zoneID)
} else {
klog.V(3).Infof("Availability zone is not enabled for the node, falling back to fault domain")
zone = metadata.Compute.FaultDomain
}
return cloudprovider.Zone{
FailureDomain: zone,
Region: az.Location,
}, nil
}
// if UseInstanceMetadata is false, get Zone name by calling ARM
hostname, err := os.Hostname()
if err != nil {
return cloudprovider.Zone{}, fmt.Errorf("failure getting hostname from kernel")
}
return az.vmSet.GetZoneByNodeName(strings.ToLower(hostname))
}
// GetZoneByProviderID implements Zones.GetZoneByProviderID
// This is particularly useful in external cloud providers where the kubelet
// does not initialize node data.
func (az *Cloud) GetZoneByProviderID(ctx context.Context, providerID string) (cloudprovider.Zone, error) {
// Returns nil for unmanaged nodes because azure cloud provider couldn't fetch information for them.
if az.IsNodeUnmanagedByProviderID(providerID) {
klog.V(2).Infof("GetZoneByProviderID: omitting unmanaged node %q", providerID)
return cloudprovider.Zone{}, nil
}
nodeName, err := az.vmSet.GetNodeNameByProviderID(providerID)
if err != nil {
return cloudprovider.Zone{}, err
}
return az.GetZoneByNodeName(ctx, nodeName)
}
// GetZoneByNodeName implements Zones.GetZoneByNodeName
// This is particularly useful in external cloud providers where the kubelet
// does not initialize node data.
func (az *Cloud) GetZoneByNodeName(ctx context.Context, nodeName types.NodeName) (cloudprovider.Zone, error) {
// Returns "" for unmanaged nodes because azure cloud provider couldn't fetch information for them.
unmanaged, err := az.IsNodeUnmanaged(string(nodeName))
if err != nil {
return cloudprovider.Zone{}, err
}
if unmanaged {
klog.V(2).Infof("GetZoneByNodeName: omitting unmanaged node %q", nodeName)
return cloudprovider.Zone{}, nil
}
return az.vmSet.GetZoneByNodeName(string(nodeName))
}

View File

@@ -1,73 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"testing"
)
func TestIsAvailabilityZone(t *testing.T) {
location := "eastus"
az := &Cloud{
Config: Config{
Location: location,
},
}
tests := []struct {
desc string
zone string
expected bool
}{
{"empty string should return false", "", false},
{"wrong farmat should return false", "123", false},
{"wrong location should return false", "chinanorth-1", false},
{"correct zone should return true", "eastus-1", true},
}
for _, test := range tests {
actual := az.isAvailabilityZone(test.zone)
if actual != test.expected {
t.Errorf("test [%q] get unexpected result: %v != %v", test.desc, actual, test.expected)
}
}
}
func TestGetZoneID(t *testing.T) {
location := "eastus"
az := &Cloud{
Config: Config{
Location: location,
},
}
tests := []struct {
desc string
zone string
expected string
}{
{"empty string should return empty string", "", ""},
{"wrong farmat should return empty string", "123", ""},
{"wrong location should return empty string", "chinanorth-1", ""},
{"correct zone should return zone ID", "eastus-1", "1"},
}
for _, test := range tests {
actual := az.GetZoneID(test.zone)
if actual != test.expected {
t.Errorf("test [%q] get unexpected result: %q != %q", test.desc, actual, test.expected)
}
}
}

View File

@@ -19,11 +19,11 @@ package cloudprovider
import (
// Cloud providers
_ "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
_ "k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
_ "k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack"
_ "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
_ "k8s.io/kubernetes/pkg/cloudprovider/providers/openstack"
_ "k8s.io/kubernetes/pkg/cloudprovider/providers/ovirt"
_ "k8s.io/kubernetes/pkg/cloudprovider/providers/photon"
_ "k8s.io/legacy-cloud-providers/azure"
_ "k8s.io/legacy-cloud-providers/vsphere"
)