Merge pull request #29048 from justinsb/volumes_nodename_not_hostname

Automatic merge from submit-queue

Use strongly-typed types.NodeName for a node name

We had another bug where we confused the hostname with the NodeName.

Also, if we want to use different values for the Node.Name (which is
an important step for making installation easier), we need to keep
better control over this.

A tedious but mechanical commit therefore, to change all uses of the
node name to use types.NodeName
This commit is contained in:
Kubernetes Submit Queue 2016-09-27 17:58:41 -07:00 committed by GitHub
commit 1854bdcb0c
78 changed files with 998 additions and 777 deletions

View File

@ -16549,7 +16549,7 @@
}, },
"host": { "host": {
"type": "string", "type": "string",
"description": "Host name on which the event is generated." "description": "Node name on which the event is generated."
} }
} }
}, },

View File

@ -20,6 +20,7 @@ import (
"fmt" "fmt"
"os" "os"
"github.com/golang/glog"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/api" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/api"
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
"k8s.io/kubernetes/pkg/apis/certificates" "k8s.io/kubernetes/pkg/apis/certificates"
@ -29,6 +30,7 @@ import (
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd" "k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
"k8s.io/kubernetes/pkg/kubelet/util/csr" "k8s.io/kubernetes/pkg/kubelet/util/csr"
"k8s.io/kubernetes/pkg/types"
certutil "k8s.io/kubernetes/pkg/util/cert" certutil "k8s.io/kubernetes/pkg/util/cert"
) )
@ -37,11 +39,16 @@ func PerformTLSBootstrap(s *kubeadmapi.KubeadmConfig, apiEndpoint string, caCert
// TODO(phase1+) try all the api servers until we find one that works // TODO(phase1+) try all the api servers until we find one that works
bareClientConfig := kubeadmutil.CreateBasicClientConfig("kubernetes", apiEndpoint, caCert) bareClientConfig := kubeadmutil.CreateBasicClientConfig("kubernetes", apiEndpoint, caCert)
nodeName, err := os.Hostname() hostName, err := os.Hostname()
if err != nil { if err != nil {
return nil, fmt.Errorf("<node/csr> failed to get node hostname [%v]", err) return nil, fmt.Errorf("<node/csr> failed to get node hostname [%v]", err)
} }
// TODO: hostname == nodename doesn't hold on all clouds (AWS).
// But we don't have a cloudprovider, so we're stuck.
glog.Errorf("assuming that hostname is the same as NodeName")
nodeName := types.NodeName(hostName)
bootstrapClientConfig, err := clientcmd.NewDefaultClientConfig( bootstrapClientConfig, err := clientcmd.NewDefaultClientConfig(
*kubeadmutil.MakeClientConfigWithToken( *kubeadmutil.MakeClientConfigWithToken(
bareClientConfig, "kubernetes", fmt.Sprintf("kubelet-%s", nodeName), s.Secrets.BearerToken, bareClientConfig, "kubernetes", fmt.Sprintf("kubelet-%s", nodeName), s.Secrets.BearerToken,

View File

@ -30,6 +30,7 @@ import (
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd" "k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
"k8s.io/kubernetes/pkg/kubelet/util/csr" "k8s.io/kubernetes/pkg/kubelet/util/csr"
"k8s.io/kubernetes/pkg/types"
certutil "k8s.io/kubernetes/pkg/util/cert" certutil "k8s.io/kubernetes/pkg/util/cert"
) )
@ -42,7 +43,7 @@ const (
// The kubeconfig at bootstrapPath is used to request a client certificate from the API server. // The kubeconfig at bootstrapPath is used to request a client certificate from the API server.
// On success, a kubeconfig file referencing the generated key and obtained certificate is written to kubeconfigPath. // On success, a kubeconfig file referencing the generated key and obtained certificate is written to kubeconfigPath.
// The certificate and key file are stored in certDir. // The certificate and key file are stored in certDir.
func bootstrapClientCert(kubeconfigPath string, bootstrapPath string, certDir string, nodeName string) error { func bootstrapClientCert(kubeconfigPath string, bootstrapPath string, certDir string, nodeName types.NodeName) error {
// Short-circuit if the kubeconfig file already exists. // Short-circuit if the kubeconfig file already exists.
// TODO: inspect the kubeconfig, ensure a rest client can be built from it, verify client cert expiration, etc. // TODO: inspect the kubeconfig, ensure a rest client can be built from it, verify client cert expiration, etc.
_, err := os.Stat(kubeconfigPath) _, err := os.Stat(kubeconfigPath)

View File

@ -61,6 +61,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/server" "k8s.io/kubernetes/pkg/kubelet/server"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types" kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types"
certutil "k8s.io/kubernetes/pkg/util/cert" certutil "k8s.io/kubernetes/pkg/util/cert"
utilconfig "k8s.io/kubernetes/pkg/util/config" utilconfig "k8s.io/kubernetes/pkg/util/config"
"k8s.io/kubernetes/pkg/util/configz" "k8s.io/kubernetes/pkg/util/configz"
@ -169,7 +170,7 @@ func getRemoteKubeletConfig(s *options.KubeletServer, kubeDeps *kubelet.KubeletD
} }
configmap, err := func() (*api.ConfigMap, error) { configmap, err := func() (*api.ConfigMap, error) {
var nodename string var nodename types.NodeName
hostname := nodeutil.GetHostname(s.HostnameOverride) hostname := nodeutil.GetHostname(s.HostnameOverride)
if kubeDeps != nil && kubeDeps.Cloud != nil { if kubeDeps != nil && kubeDeps.Cloud != nil {
@ -460,9 +461,9 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.KubeletDeps) (err error) {
// getNodeName returns the node name according to the cloud provider // getNodeName returns the node name according to the cloud provider
// if cloud provider is specified. Otherwise, returns the hostname of the node. // if cloud provider is specified. Otherwise, returns the hostname of the node.
func getNodeName(cloud cloudprovider.Interface, hostname string) (string, error) { func getNodeName(cloud cloudprovider.Interface, hostname string) (types.NodeName, error) {
if cloud == nil { if cloud == nil {
return hostname, nil return types.NodeName(hostname), nil
} }
instances, ok := cloud.Instances() instances, ok := cloud.Instances()
@ -607,7 +608,7 @@ func RunKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *kubelet
} }
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
kubeDeps.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "kubelet", Host: nodeName}) kubeDeps.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "kubelet", Host: string(nodeName)})
eventBroadcaster.StartLogging(glog.V(3).Infof) eventBroadcaster.StartLogging(glog.V(3).Infof)
if kubeDeps.EventClient != nil { if kubeDeps.EventClient != nil {
glog.V(4).Infof("Sending events to api server.") glog.V(4).Infof("Sending events to api server.")

View File

@ -44,6 +44,7 @@ import (
kconfig "k8s.io/kubernetes/pkg/kubelet/config" kconfig "k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/dockertools" "k8s.io/kubernetes/pkg/kubelet/dockertools"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types" kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/types"
) )
// TODO(jdef): passing the value of envContainerID to all docker containers instantiated // TODO(jdef): passing the value of envContainerID to all docker containers instantiated

View File

@ -5008,7 +5008,7 @@ The resulting set of endpoints can be viewed as:<br>
</tr> </tr>
<tr> <tr>
<td class="tableblock halign-left valign-top"><p class="tableblock">host</p></td> <td class="tableblock halign-left valign-top"><p class="tableblock">host</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">Host name on which the event is generated.</p></td> <td class="tableblock halign-left valign-top"><p class="tableblock">Node name on which the event is generated.</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td> <td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">string</p></td> <td class="tableblock halign-left valign-top"><p class="tableblock">string</p></td>
<td class="tableblock halign-left valign-top"></td> <td class="tableblock halign-left valign-top"></td>

View File

@ -2568,7 +2568,7 @@ type SerializedReference struct {
type EventSource struct { type EventSource struct {
// Component from which the event is generated. // Component from which the event is generated.
Component string `json:"component,omitempty"` Component string `json:"component,omitempty"`
// Host name on which the event is generated. // Node name on which the event is generated.
Host string `json:"host,omitempty"` Host string `json:"host,omitempty"`
} }

View File

@ -769,7 +769,7 @@ message EventSource {
// Component from which the event is generated. // Component from which the event is generated.
optional string component = 1; optional string component = 1;
// Host name on which the event is generated. // Node name on which the event is generated.
optional string host = 2; optional string host = 2;
} }

View File

@ -3017,7 +3017,7 @@ type SerializedReference struct {
type EventSource struct { type EventSource struct {
// Component from which the event is generated. // Component from which the event is generated.
Component string `json:"component,omitempty" protobuf:"bytes,1,opt,name=component"` Component string `json:"component,omitempty" protobuf:"bytes,1,opt,name=component"`
// Host name on which the event is generated. // Node name on which the event is generated.
Host string `json:"host,omitempty" protobuf:"bytes,2,opt,name=host"` Host string `json:"host,omitempty" protobuf:"bytes,2,opt,name=host"`
} }

View File

@ -477,7 +477,7 @@ func (EventList) SwaggerDoc() map[string]string {
var map_EventSource = map[string]string{ var map_EventSource = map[string]string{
"": "EventSource contains information for an event.", "": "EventSource contains information for an event.",
"component": "Component from which the event is generated.", "component": "Component from which the event is generated.",
"host": "Host name on which the event is generated.", "host": "Node name on which the event is generated.",
} }
func (EventSource) SwaggerDoc() map[string]string { func (EventSource) SwaggerDoc() map[string]string {

View File

@ -22,6 +22,7 @@ import (
"strings" "strings"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/types"
) )
// Interface is an abstract, pluggable interface for cloud providers. // Interface is an abstract, pluggable interface for cloud providers.
@ -63,7 +64,7 @@ func GetLoadBalancerName(service *api.Service) string {
return ret return ret
} }
func GetInstanceProviderID(cloud Interface, nodeName string) (string, error) { func GetInstanceProviderID(cloud Interface, nodeName types.NodeName) (string, error) {
instances, ok := cloud.Instances() instances, ok := cloud.Instances()
if !ok { if !ok {
return "", fmt.Errorf("failed to get instances from cloud provider") return "", fmt.Errorf("failed to get instances from cloud provider")
@ -86,11 +87,11 @@ type LoadBalancer interface {
// EnsureLoadBalancer creates a new load balancer 'name', or updates the existing one. Returns the status of the balancer // EnsureLoadBalancer creates a new load balancer 'name', or updates the existing one. Returns the status of the balancer
// Implementations must treat the *api.Service parameter as read-only and not modify it. // Implementations must treat the *api.Service parameter as read-only and not modify it.
// Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager // Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager
EnsureLoadBalancer(clusterName string, service *api.Service, hosts []string) (*api.LoadBalancerStatus, error) EnsureLoadBalancer(clusterName string, service *api.Service, nodeNames []string) (*api.LoadBalancerStatus, error)
// UpdateLoadBalancer updates hosts under the specified load balancer. // UpdateLoadBalancer updates hosts under the specified load balancer.
// Implementations must treat the *api.Service parameter as read-only and not modify it. // Implementations must treat the *api.Service parameter as read-only and not modify it.
// Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager // Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager
UpdateLoadBalancer(clusterName string, service *api.Service, hosts []string) error UpdateLoadBalancer(clusterName string, service *api.Service, nodeNames []string) error
// EnsureLoadBalancerDeleted deletes the specified load balancer if it // EnsureLoadBalancerDeleted deletes the specified load balancer if it
// exists, returning nil if the load balancer specified either didn't exist or // exists, returning nil if the load balancer specified either didn't exist or
// was successfully deleted. // was successfully deleted.
@ -108,22 +109,22 @@ type Instances interface {
// TODO(roberthbailey): This currently is only used in such a way that it // TODO(roberthbailey): This currently is only used in such a way that it
// returns the address of the calling instance. We should do a rename to // returns the address of the calling instance. We should do a rename to
// make this clearer. // make this clearer.
NodeAddresses(name string) ([]api.NodeAddress, error) NodeAddresses(name types.NodeName) ([]api.NodeAddress, error)
// ExternalID returns the cloud provider ID of the specified instance (deprecated). // ExternalID returns the cloud provider ID of the node with the specified NodeName.
// Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound) // Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound)
ExternalID(name string) (string, error) ExternalID(nodeName types.NodeName) (string, error)
// InstanceID returns the cloud provider ID of the specified instance. // InstanceID returns the cloud provider ID of the node with the specified NodeName.
InstanceID(name string) (string, error) InstanceID(nodeName types.NodeName) (string, error)
// InstanceType returns the type of the specified instance. // InstanceType returns the type of the specified instance.
InstanceType(name string) (string, error) InstanceType(name types.NodeName) (string, error)
// List lists instances that match 'filter' which is a regular expression which must match the entire instance name (fqdn) // List lists instances that match 'filter' which is a regular expression which must match the entire instance name (fqdn)
List(filter string) ([]string, error) List(filter string) ([]types.NodeName, error)
// AddSSHKeyToAllInstances adds an SSH public key as a legal identity for all instances // AddSSHKeyToAllInstances adds an SSH public key as a legal identity for all instances
// expected format for the key is standard ssh-keygen format: <protocol> <blob> // expected format for the key is standard ssh-keygen format: <protocol> <blob>
AddSSHKeyToAllInstances(user string, keyData []byte) error AddSSHKeyToAllInstances(user string, keyData []byte) error
// CurrentNodeName returns the name of the node we are currently running on // CurrentNodeName returns the name of the node we are currently running on
// On most clouds (e.g. GCE) this is the hostname, so we provide the hostname // On most clouds (e.g. GCE) this is the hostname, so we provide the hostname
CurrentNodeName(hostname string) (string, error) CurrentNodeName(hostname string) (types.NodeName, error)
} }
// Route is a representation of an advanced routing rule. // Route is a representation of an advanced routing rule.
@ -131,9 +132,8 @@ type Route struct {
// Name is the name of the routing rule in the cloud-provider. // Name is the name of the routing rule in the cloud-provider.
// It will be ignored in a Create (although nameHint may influence it) // It will be ignored in a Create (although nameHint may influence it)
Name string Name string
// TargetInstance is the name of the instance as specified in routing rules // TargetNode is the NodeName of the target instance.
// for the cloud-provider (in gce: the Instance Name). TargetNode types.NodeName
TargetInstance string
// DestinationCIDR is the CIDR format IP range that this routing rule // DestinationCIDR is the CIDR format IP range that this routing rule
// applies to. // applies to.
DestinationCIDR string DestinationCIDR string

View File

@ -296,14 +296,14 @@ type VolumeOptions struct {
// Volumes is an interface for managing cloud-provisioned volumes // Volumes is an interface for managing cloud-provisioned volumes
// TODO: Allow other clouds to implement this // TODO: Allow other clouds to implement this
type Volumes interface { type Volumes interface {
// Attach the disk to the specified instance // Attach the disk to the node with the specified NodeName
// instanceName can be empty to mean "the instance on which we are running" // nodeName can be empty to mean "the instance on which we are running"
// Returns the device (e.g. /dev/xvdf) where we attached the volume // Returns the device (e.g. /dev/xvdf) where we attached the volume
AttachDisk(diskName string, instanceName string, readOnly bool) (string, error) AttachDisk(diskName string, nodeName types.NodeName, readOnly bool) (string, error)
// Detach the disk from the specified instance // Detach the disk from the node with the specified NodeName
// instanceName can be empty to mean "the instance on which we are running" // nodeName can be empty to mean "the instance on which we are running"
// Returns the device where the volume was attached // Returns the device where the volume was attached
DetachDisk(diskName string, instanceName string) (string, error) DetachDisk(diskName string, nodeName types.NodeName) (string, error)
// Create a volume with the specified options // Create a volume with the specified options
CreateDisk(volumeOptions *VolumeOptions) (volumeName string, err error) CreateDisk(volumeOptions *VolumeOptions) (volumeName string, err error)
@ -319,8 +319,8 @@ type Volumes interface {
// return the device path where the volume is attached // return the device path where the volume is attached
GetDiskPath(volumeName string) (string, error) GetDiskPath(volumeName string) (string, error)
// Check if the volume is already attached to the instance // Check if the volume is already attached to the node with the specified NodeName
DiskIsAttached(diskName, instanceID string) (bool, error) DiskIsAttached(diskName string, nodeName types.NodeName) (bool, error)
} }
// InstanceGroups is an interface for managing cloud-managed instance groups / autoscaling instance groups // InstanceGroups is an interface for managing cloud-managed instance groups / autoscaling instance groups
@ -362,7 +362,7 @@ type Cloud struct {
// attached, to avoid a race condition where we assign a device mapping // attached, to avoid a race condition where we assign a device mapping
// and then get a second request before we attach the volume // and then get a second request before we attach the volume
attachingMutex sync.Mutex attachingMutex sync.Mutex
attaching map[ /*nodeName*/ string]map[mountDevice]string attaching map[types.NodeName]map[mountDevice]string
} }
var _ Volumes = &Cloud{} var _ Volumes = &Cloud{}
@ -537,7 +537,7 @@ func (c *Cloud) AddSSHKeyToAllInstances(user string, keyData []byte) error {
} }
// CurrentNodeName returns the name of the current node // CurrentNodeName returns the name of the current node
func (c *Cloud) CurrentNodeName(hostname string) (string, error) { func (c *Cloud) CurrentNodeName(hostname string) (types.NodeName, error) {
return c.selfAWSInstance.nodeName, nil return c.selfAWSInstance.nodeName, nil
} }
@ -795,7 +795,7 @@ func newAWSCloud(config io.Reader, awsServices Services) (*Cloud, error) {
cfg: cfg, cfg: cfg,
region: regionName, region: regionName,
attaching: make(map[string]map[mountDevice]string), attaching: make(map[types.NodeName]map[mountDevice]string),
} }
selfAWSInstance, err := awsCloud.buildSelfAWSInstance() selfAWSInstance, err := awsCloud.buildSelfAWSInstance()
@ -877,7 +877,7 @@ func (c *Cloud) Routes() (cloudprovider.Routes, bool) {
} }
// NodeAddresses is an implementation of Instances.NodeAddresses. // NodeAddresses is an implementation of Instances.NodeAddresses.
func (c *Cloud) NodeAddresses(name string) ([]api.NodeAddress, error) { func (c *Cloud) NodeAddresses(name types.NodeName) ([]api.NodeAddress, error) {
if c.selfAWSInstance.nodeName == name || len(name) == 0 { if c.selfAWSInstance.nodeName == name || len(name) == 0 {
addresses := []api.NodeAddress{} addresses := []api.NodeAddress{}
@ -932,15 +932,15 @@ func (c *Cloud) NodeAddresses(name string) ([]api.NodeAddress, error) {
return addresses, nil return addresses, nil
} }
// ExternalID returns the cloud provider ID of the specified instance (deprecated). // ExternalID returns the cloud provider ID of the node with the specified nodeName (deprecated).
func (c *Cloud) ExternalID(name string) (string, error) { func (c *Cloud) ExternalID(nodeName types.NodeName) (string, error) {
if c.selfAWSInstance.nodeName == name { if c.selfAWSInstance.nodeName == nodeName {
// We assume that if this is run on the instance itself, the instance exists and is alive // We assume that if this is run on the instance itself, the instance exists and is alive
return c.selfAWSInstance.awsID, nil return c.selfAWSInstance.awsID, nil
} }
// We must verify that the instance still exists // We must verify that the instance still exists
// Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound) // Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound)
instance, err := c.findInstanceByNodeName(name) instance, err := c.findInstanceByNodeName(nodeName)
if err != nil { if err != nil {
return "", err return "", err
} }
@ -950,34 +950,34 @@ func (c *Cloud) ExternalID(name string) (string, error) {
return orEmpty(instance.InstanceId), nil return orEmpty(instance.InstanceId), nil
} }
// InstanceID returns the cloud provider ID of the specified instance. // InstanceID returns the cloud provider ID of the node with the specified nodeName.
func (c *Cloud) InstanceID(name string) (string, error) { func (c *Cloud) InstanceID(nodeName types.NodeName) (string, error) {
// In the future it is possible to also return an endpoint as: // In the future it is possible to also return an endpoint as:
// <endpoint>/<zone>/<instanceid> // <endpoint>/<zone>/<instanceid>
if c.selfAWSInstance.nodeName == name { if c.selfAWSInstance.nodeName == nodeName {
return "/" + c.selfAWSInstance.availabilityZone + "/" + c.selfAWSInstance.awsID, nil return "/" + c.selfAWSInstance.availabilityZone + "/" + c.selfAWSInstance.awsID, nil
} }
inst, err := c.getInstanceByNodeName(name) inst, err := c.getInstanceByNodeName(nodeName)
if err != nil { if err != nil {
return "", fmt.Errorf("getInstanceByNodeName failed for %q with %v", name, err) return "", fmt.Errorf("getInstanceByNodeName failed for %q with %v", nodeName, err)
} }
return "/" + orEmpty(inst.Placement.AvailabilityZone) + "/" + orEmpty(inst.InstanceId), nil return "/" + orEmpty(inst.Placement.AvailabilityZone) + "/" + orEmpty(inst.InstanceId), nil
} }
// InstanceType returns the type of the specified instance. // InstanceType returns the type of the node with the specified nodeName.
func (c *Cloud) InstanceType(name string) (string, error) { func (c *Cloud) InstanceType(nodeName types.NodeName) (string, error) {
if c.selfAWSInstance.nodeName == name { if c.selfAWSInstance.nodeName == nodeName {
return c.selfAWSInstance.instanceType, nil return c.selfAWSInstance.instanceType, nil
} }
inst, err := c.getInstanceByNodeName(name) inst, err := c.getInstanceByNodeName(nodeName)
if err != nil { if err != nil {
return "", fmt.Errorf("getInstanceByNodeName failed for %q with %v", name, err) return "", fmt.Errorf("getInstanceByNodeName failed for %q with %v", nodeName, err)
} }
return orEmpty(inst.InstanceType), nil return orEmpty(inst.InstanceType), nil
} }
// Return a list of instances matching regex string. // Return a list of instances matching regex string.
func (c *Cloud) getInstancesByRegex(regex string) ([]string, error) { func (c *Cloud) getInstancesByRegex(regex string) ([]types.NodeName, error) {
filters := []*ec2.Filter{newEc2Filter("instance-state-name", "running")} filters := []*ec2.Filter{newEc2Filter("instance-state-name", "running")}
filters = c.addFilters(filters) filters = c.addFilters(filters)
request := &ec2.DescribeInstancesInput{ request := &ec2.DescribeInstancesInput{
@ -986,10 +986,10 @@ func (c *Cloud) getInstancesByRegex(regex string) ([]string, error) {
instances, err := c.ec2.DescribeInstances(request) instances, err := c.ec2.DescribeInstances(request)
if err != nil { if err != nil {
return []string{}, err return []types.NodeName{}, err
} }
if len(instances) == 0 { if len(instances) == 0 {
return []string{}, fmt.Errorf("no instances returned") return []types.NodeName{}, fmt.Errorf("no instances returned")
} }
if strings.HasPrefix(regex, "'") && strings.HasSuffix(regex, "'") { if strings.HasPrefix(regex, "'") && strings.HasSuffix(regex, "'") {
@ -999,10 +999,10 @@ func (c *Cloud) getInstancesByRegex(regex string) ([]string, error) {
re, err := regexp.Compile(regex) re, err := regexp.Compile(regex)
if err != nil { if err != nil {
return []string{}, err return []types.NodeName{}, err
} }
matchingInstances := []string{} matchingInstances := []types.NodeName{}
for _, instance := range instances { for _, instance := range instances {
// Only return fully-ready instances when listing instances // Only return fully-ready instances when listing instances
// (vs a query by name, where we will return it if we find it) // (vs a query by name, where we will return it if we find it)
@ -1011,16 +1011,16 @@ func (c *Cloud) getInstancesByRegex(regex string) ([]string, error) {
continue continue
} }
privateDNSName := orEmpty(instance.PrivateDnsName) nodeName := mapInstanceToNodeName(instance)
if privateDNSName == "" { if nodeName == "" {
glog.V(2).Infof("Skipping EC2 instance (no PrivateDNSName): %s", glog.V(2).Infof("Skipping EC2 instance (no PrivateDNSName): %s",
orEmpty(instance.InstanceId)) aws.StringValue(instance.InstanceId))
continue continue
} }
for _, tag := range instance.Tags { for _, tag := range instance.Tags {
if orEmpty(tag.Key) == "Name" && re.MatchString(orEmpty(tag.Value)) { if orEmpty(tag.Key) == "Name" && re.MatchString(orEmpty(tag.Value)) {
matchingInstances = append(matchingInstances, privateDNSName) matchingInstances = append(matchingInstances, nodeName)
break break
} }
} }
@ -1030,7 +1030,7 @@ func (c *Cloud) getInstancesByRegex(regex string) ([]string, error) {
} }
// List is an implementation of Instances.List. // List is an implementation of Instances.List.
func (c *Cloud) List(filter string) ([]string, error) { func (c *Cloud) List(filter string) ([]types.NodeName, error) {
// TODO: Should really use tag query. No need to go regexp. // TODO: Should really use tag query. No need to go regexp.
return c.getInstancesByRegex(filter) return c.getInstancesByRegex(filter)
} }
@ -1102,7 +1102,7 @@ type awsInstance struct {
awsID string awsID string
// node name in k8s // node name in k8s
nodeName string nodeName types.NodeName
// availability zone the instance resides in // availability zone the instance resides in
availabilityZone string availabilityZone string
@ -1126,7 +1126,7 @@ func newAWSInstance(ec2Service EC2, instance *ec2.Instance) *awsInstance {
self := &awsInstance{ self := &awsInstance{
ec2: ec2Service, ec2: ec2Service,
awsID: aws.StringValue(instance.InstanceId), awsID: aws.StringValue(instance.InstanceId),
nodeName: aws.StringValue(instance.PrivateDnsName), nodeName: mapInstanceToNodeName(instance),
availabilityZone: az, availabilityZone: az,
instanceType: aws.StringValue(instance.InstanceType), instanceType: aws.StringValue(instance.InstanceType),
vpcID: aws.StringValue(instance.VpcId), vpcID: aws.StringValue(instance.VpcId),
@ -1436,8 +1436,8 @@ func (c *Cloud) buildSelfAWSInstance() (*awsInstance, error) {
return newAWSInstance(c.ec2, instance), nil return newAWSInstance(c.ec2, instance), nil
} }
// Gets the awsInstance with node-name nodeName, or the 'self' instance if nodeName == "" // Gets the awsInstance with for the node with the specified nodeName, or the 'self' instance if nodeName == ""
func (c *Cloud) getAwsInstance(nodeName string) (*awsInstance, error) { func (c *Cloud) getAwsInstance(nodeName types.NodeName) (*awsInstance, error) {
var awsInstance *awsInstance var awsInstance *awsInstance
if nodeName == "" { if nodeName == "" {
awsInstance = c.selfAWSInstance awsInstance = c.selfAWSInstance
@ -1454,15 +1454,15 @@ func (c *Cloud) getAwsInstance(nodeName string) (*awsInstance, error) {
} }
// AttachDisk implements Volumes.AttachDisk // AttachDisk implements Volumes.AttachDisk
func (c *Cloud) AttachDisk(diskName string, instanceName string, readOnly bool) (string, error) { func (c *Cloud) AttachDisk(diskName string, nodeName types.NodeName, readOnly bool) (string, error) {
disk, err := newAWSDisk(c, diskName) disk, err := newAWSDisk(c, diskName)
if err != nil { if err != nil {
return "", err return "", err
} }
awsInstance, err := c.getAwsInstance(instanceName) awsInstance, err := c.getAwsInstance(nodeName)
if err != nil { if err != nil {
return "", fmt.Errorf("error finding instance %s: %v", instanceName, err) return "", fmt.Errorf("error finding instance %s: %v", nodeName, err)
} }
if readOnly { if readOnly {
@ -1528,32 +1528,32 @@ func (c *Cloud) AttachDisk(diskName string, instanceName string, readOnly bool)
// which could theoretically be against a different device (or even instance). // which could theoretically be against a different device (or even instance).
if attachment == nil { if attachment == nil {
// Impossible? // Impossible?
return "", fmt.Errorf("unexpected state: attachment nil after attached %q to %q", diskName, instanceName) return "", fmt.Errorf("unexpected state: attachment nil after attached %q to %q", diskName, nodeName)
} }
if ec2Device != aws.StringValue(attachment.Device) { if ec2Device != aws.StringValue(attachment.Device) {
return "", fmt.Errorf("disk attachment of %q to %q failed: requested device %q but found %q", diskName, instanceName, ec2Device, aws.StringValue(attachment.Device)) return "", fmt.Errorf("disk attachment of %q to %q failed: requested device %q but found %q", diskName, nodeName, ec2Device, aws.StringValue(attachment.Device))
} }
if awsInstance.awsID != aws.StringValue(attachment.InstanceId) { if awsInstance.awsID != aws.StringValue(attachment.InstanceId) {
return "", fmt.Errorf("disk attachment of %q to %q failed: requested instance %q but found %q", diskName, instanceName, awsInstance.awsID, aws.StringValue(attachment.InstanceId)) return "", fmt.Errorf("disk attachment of %q to %q failed: requested instance %q but found %q", diskName, nodeName, awsInstance.awsID, aws.StringValue(attachment.InstanceId))
} }
return hostDevice, nil return hostDevice, nil
} }
// DetachDisk implements Volumes.DetachDisk // DetachDisk implements Volumes.DetachDisk
func (c *Cloud) DetachDisk(diskName string, instanceName string) (string, error) { func (c *Cloud) DetachDisk(diskName string, nodeName types.NodeName) (string, error) {
disk, err := newAWSDisk(c, diskName) disk, err := newAWSDisk(c, diskName)
if err != nil { if err != nil {
return "", err return "", err
} }
awsInstance, err := c.getAwsInstance(instanceName) awsInstance, err := c.getAwsInstance(nodeName)
if err != nil { if err != nil {
if err == cloudprovider.InstanceNotFound { if err == cloudprovider.InstanceNotFound {
// If instance no longer exists, safe to assume volume is not attached. // If instance no longer exists, safe to assume volume is not attached.
glog.Warningf( glog.Warningf(
"Instance %q does not exist. DetachDisk will assume disk %q is not attached to it.", "Instance %q does not exist. DetachDisk will assume disk %q is not attached to it.",
instanceName, nodeName,
diskName) diskName)
return "", nil return "", nil
} }
@ -1743,14 +1743,14 @@ func (c *Cloud) GetDiskPath(volumeName string) (string, error) {
} }
// DiskIsAttached implements Volumes.DiskIsAttached // DiskIsAttached implements Volumes.DiskIsAttached
func (c *Cloud) DiskIsAttached(diskName, instanceID string) (bool, error) { func (c *Cloud) DiskIsAttached(diskName string, nodeName types.NodeName) (bool, error) {
awsInstance, err := c.getAwsInstance(instanceID) awsInstance, err := c.getAwsInstance(nodeName)
if err != nil { if err != nil {
if err == cloudprovider.InstanceNotFound { if err == cloudprovider.InstanceNotFound {
// If instance no longer exists, safe to assume volume is not attached. // If instance no longer exists, safe to assume volume is not attached.
glog.Warningf( glog.Warningf(
"Instance %q does not exist. DiskIsAttached will assume disk %q is not attached to it.", "Instance %q does not exist. DiskIsAttached will assume disk %q is not attached to it.",
instanceID, nodeName,
diskName) diskName)
return false, nil return false, nil
} }
@ -3184,11 +3184,23 @@ func (c *Cloud) getInstancesByNodeNamesCached(nodeNames sets.String) ([]*ec2.Ins
return instances, nil return instances, nil
} }
// mapNodeNameToPrivateDNSName maps a k8s NodeName to an AWS Instance PrivateDNSName
// This is a simple string cast
func mapNodeNameToPrivateDNSName(nodeName types.NodeName) string {
return string(nodeName)
}
// mapInstanceToNodeName maps a EC2 instance to a k8s NodeName, by extracting the PrivateDNSName
func mapInstanceToNodeName(i *ec2.Instance) types.NodeName {
return types.NodeName(aws.StringValue(i.PrivateDnsName))
}
// Returns the instance with the specified node name // Returns the instance with the specified node name
// Returns nil if it does not exist // Returns nil if it does not exist
func (c *Cloud) findInstanceByNodeName(nodeName string) (*ec2.Instance, error) { func (c *Cloud) findInstanceByNodeName(nodeName types.NodeName) (*ec2.Instance, error) {
privateDNSName := mapNodeNameToPrivateDNSName(nodeName)
filters := []*ec2.Filter{ filters := []*ec2.Filter{
newEc2Filter("private-dns-name", nodeName), newEc2Filter("private-dns-name", privateDNSName),
newEc2Filter("instance-state-name", "running"), newEc2Filter("instance-state-name", "running"),
} }
filters = c.addFilters(filters) filters = c.addFilters(filters)
@ -3211,7 +3223,7 @@ func (c *Cloud) findInstanceByNodeName(nodeName string) (*ec2.Instance, error) {
// Returns the instance with the specified node name // Returns the instance with the specified node name
// Like findInstanceByNodeName, but returns error if node not found // Like findInstanceByNodeName, but returns error if node not found
func (c *Cloud) getInstanceByNodeName(nodeName string) (*ec2.Instance, error) { func (c *Cloud) getInstanceByNodeName(nodeName types.NodeName) (*ec2.Instance, error) {
instance, err := c.findInstanceByNodeName(nodeName) instance, err := c.findInstanceByNodeName(nodeName)
if err == nil && instance == nil { if err == nil && instance == nil {
return nil, cloudprovider.InstanceNotFound return nil, cloudprovider.InstanceNotFound

View File

@ -86,9 +86,9 @@ func (c *Cloud) ListRoutes(clusterName string) ([]*cloudprovider.Route, error) {
glog.Warningf("unable to find instance ID %s in the list of instances being routed to", instanceID) glog.Warningf("unable to find instance ID %s in the list of instances being routed to", instanceID)
continue continue
} }
instanceName := orEmpty(instance.PrivateDnsName) nodeName := mapInstanceToNodeName(instance)
routeName := clusterName + "-" + destinationCIDR routeName := clusterName + "-" + destinationCIDR
routes = append(routes, &cloudprovider.Route{Name: routeName, TargetInstance: instanceName, DestinationCIDR: destinationCIDR}) routes = append(routes, &cloudprovider.Route{Name: routeName, TargetNode: nodeName, DestinationCIDR: destinationCIDR})
} }
return routes, nil return routes, nil
@ -110,7 +110,7 @@ func (c *Cloud) configureInstanceSourceDestCheck(instanceID string, sourceDestCh
// CreateRoute implements Routes.CreateRoute // CreateRoute implements Routes.CreateRoute
// Create the described route // Create the described route
func (c *Cloud) CreateRoute(clusterName string, nameHint string, route *cloudprovider.Route) error { func (c *Cloud) CreateRoute(clusterName string, nameHint string, route *cloudprovider.Route) error {
instance, err := c.getInstanceByNodeName(route.TargetInstance) instance, err := c.getInstanceByNodeName(route.TargetNode)
if err != nil { if err != nil {
return err return err
} }

View File

@ -34,6 +34,7 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock" "github.com/stretchr/testify/mock"
"k8s.io/kubernetes/pkg/types"
) )
const TestClusterId = "clusterid.test" const TestClusterId = "clusterid.test"
@ -605,11 +606,11 @@ func TestList(t *testing.T) {
table := []struct { table := []struct {
input string input string
expect []string expect []types.NodeName
}{ }{
{"blahonga", []string{}}, {"blahonga", []types.NodeName{}},
{"quux", []string{"instance3.ec2.internal"}}, {"quux", []types.NodeName{"instance3.ec2.internal"}},
{"a", []string{"instance1.ec2.internal", "instance2.ec2.internal"}}, {"a", []types.NodeName{"instance1.ec2.internal", "instance2.ec2.internal"}},
} }
for _, item := range table { for _, item := range table {
@ -705,7 +706,7 @@ func TestNodeAddresses(t *testing.T) {
fakeServices.selfInstance.PublicIpAddress = aws.String("2.3.4.5") fakeServices.selfInstance.PublicIpAddress = aws.String("2.3.4.5")
fakeServices.selfInstance.PrivateIpAddress = aws.String("192.168.0.2") fakeServices.selfInstance.PrivateIpAddress = aws.String("192.168.0.2")
addrs4, err4 := aws4.NodeAddresses(*instance0.PrivateDnsName) addrs4, err4 := aws4.NodeAddresses(mapInstanceToNodeName(&instance0))
if err4 != nil { if err4 != nil {
t.Errorf("unexpected error: %v", err4) t.Errorf("unexpected error: %v", err4)
} }
@ -1062,7 +1063,7 @@ func TestIpPermissionExistsHandlesMultipleGroupIdsWithUserIds(t *testing.T) {
func TestFindInstanceByNodeNameExcludesTerminatedInstances(t *testing.T) { func TestFindInstanceByNodeNameExcludesTerminatedInstances(t *testing.T) {
awsServices := NewFakeAWSServices() awsServices := NewFakeAWSServices()
nodeName := "my-dns.internal" nodeName := types.NodeName("my-dns.internal")
var tag ec2.Tag var tag ec2.Tag
tag.Key = aws.String(TagNameKubernetesCluster) tag.Key = aws.String(TagNameKubernetesCluster)
@ -1071,13 +1072,13 @@ func TestFindInstanceByNodeNameExcludesTerminatedInstances(t *testing.T) {
var runningInstance ec2.Instance var runningInstance ec2.Instance
runningInstance.InstanceId = aws.String("i-running") runningInstance.InstanceId = aws.String("i-running")
runningInstance.PrivateDnsName = aws.String(nodeName) runningInstance.PrivateDnsName = aws.String(string(nodeName))
runningInstance.State = &ec2.InstanceState{Code: aws.Int64(16), Name: aws.String("running")} runningInstance.State = &ec2.InstanceState{Code: aws.Int64(16), Name: aws.String("running")}
runningInstance.Tags = tags runningInstance.Tags = tags
var terminatedInstance ec2.Instance var terminatedInstance ec2.Instance
terminatedInstance.InstanceId = aws.String("i-terminated") terminatedInstance.InstanceId = aws.String("i-terminated")
terminatedInstance.PrivateDnsName = aws.String(nodeName) terminatedInstance.PrivateDnsName = aws.String(string(nodeName))
terminatedInstance.State = &ec2.InstanceState{Code: aws.Int64(48), Name: aws.String("terminated")} terminatedInstance.State = &ec2.InstanceState{Code: aws.Int64(48), Name: aws.String("terminated")}
terminatedInstance.Tags = tags terminatedInstance.Tags = tags

View File

@ -24,10 +24,11 @@ import (
"k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider"
"github.com/Azure/azure-sdk-for-go/arm/compute" "github.com/Azure/azure-sdk-for-go/arm/compute"
"k8s.io/kubernetes/pkg/types"
) )
// NodeAddresses returns the addresses of the specified instance. // NodeAddresses returns the addresses of the specified instance.
func (az *Cloud) NodeAddresses(name string) ([]api.NodeAddress, error) { func (az *Cloud) NodeAddresses(name types.NodeName) ([]api.NodeAddress, error) {
ip, err := az.getIPForMachine(name) ip, err := az.getIPForMachine(name)
if err != nil { if err != nil {
return nil, err return nil, err
@ -35,18 +36,18 @@ func (az *Cloud) NodeAddresses(name string) ([]api.NodeAddress, error) {
return []api.NodeAddress{ return []api.NodeAddress{
{Type: api.NodeInternalIP, Address: ip}, {Type: api.NodeInternalIP, Address: ip},
{Type: api.NodeHostName, Address: name}, {Type: api.NodeHostName, Address: string(name)},
}, nil }, nil
} }
// ExternalID returns the cloud provider ID of the specified instance (deprecated). // ExternalID returns the cloud provider ID of the specified instance (deprecated).
func (az *Cloud) ExternalID(name string) (string, error) { func (az *Cloud) ExternalID(name types.NodeName) (string, error) {
return az.InstanceID(name) return az.InstanceID(name)
} }
// InstanceID returns the cloud provider ID of the specified instance. // InstanceID returns the cloud provider ID of the specified instance.
// Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound) // Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound)
func (az *Cloud) InstanceID(name string) (string, error) { func (az *Cloud) InstanceID(name types.NodeName) (string, error) {
machine, exists, err := az.getVirtualMachine(name) machine, exists, err := az.getVirtualMachine(name)
if err != nil { if err != nil {
return "", err return "", err
@ -60,7 +61,7 @@ func (az *Cloud) InstanceID(name string) (string, error) {
// Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound) // Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound)
// (Implementer Note): This is used by kubelet. Kubelet will label the node. Real log from kubelet: // (Implementer Note): This is used by kubelet. Kubelet will label the node. Real log from kubelet:
// Adding node label from cloud provider: beta.kubernetes.io/instance-type=[value] // Adding node label from cloud provider: beta.kubernetes.io/instance-type=[value]
func (az *Cloud) InstanceType(name string) (string, error) { func (az *Cloud) InstanceType(name types.NodeName) (string, error) {
machine, exists, err := az.getVirtualMachine(name) machine, exists, err := az.getVirtualMachine(name)
if err != nil { if err != nil {
return "", err return "", err
@ -71,7 +72,7 @@ func (az *Cloud) InstanceType(name string) (string, error) {
} }
// List lists instances that match 'filter' which is a regular expression which must match the entire instance name (fqdn) // List lists instances that match 'filter' which is a regular expression which must match the entire instance name (fqdn)
func (az *Cloud) List(filter string) ([]string, error) { func (az *Cloud) List(filter string) ([]types.NodeName, error) {
allNodes, err := az.listAllNodesInResourceGroup() allNodes, err := az.listAllNodesInResourceGroup()
if err != nil { if err != nil {
return nil, err return nil, err
@ -82,9 +83,9 @@ func (az *Cloud) List(filter string) ([]string, error) {
return nil, err return nil, err
} }
nodeNames := make([]string, len(filteredNodes)) nodeNames := make([]types.NodeName, len(filteredNodes))
for i, v := range filteredNodes { for i, v := range filteredNodes {
nodeNames[i] = *v.Name nodeNames[i] = types.NodeName(*v.Name)
} }
return nodeNames, nil return nodeNames, nil
@ -98,8 +99,8 @@ func (az *Cloud) AddSSHKeyToAllInstances(user string, keyData []byte) error {
// CurrentNodeName returns the name of the node we are currently running on // CurrentNodeName returns the name of the node we are currently running on
// On most clouds (e.g. GCE) this is the hostname, so we provide the hostname // On most clouds (e.g. GCE) this is the hostname, so we provide the hostname
func (az *Cloud) CurrentNodeName(hostname string) (string, error) { func (az *Cloud) CurrentNodeName(hostname string) (types.NodeName, error) {
return hostname, nil return types.NodeName(hostname), nil
} }
func (az *Cloud) listAllNodesInResourceGroup() ([]compute.VirtualMachine, error) { func (az *Cloud) listAllNodesInResourceGroup() ([]compute.VirtualMachine, error) {
@ -144,3 +145,15 @@ func filterNodes(nodes []compute.VirtualMachine, filter string) ([]compute.Virtu
return filteredNodes, nil return filteredNodes, nil
} }
// mapNodeNameToVMName maps a k8s NodeName to an Azure VM Name
// This is a simple string cast.
func mapNodeNameToVMName(nodeName types.NodeName) string {
return string(nodeName)
}
// mapVMNameToNodeName maps an Azure VM Name to a k8s NodeName
// This is a simple string cast.
func mapVMNameToNodeName(vmName string) types.NodeName {
return types.NodeName(vmName)
}

View File

@ -27,6 +27,7 @@ import (
"github.com/Azure/azure-sdk-for-go/arm/network" "github.com/Azure/azure-sdk-for-go/arm/network"
"github.com/Azure/go-autorest/autorest/to" "github.com/Azure/go-autorest/autorest/to"
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/types"
) )
// GetLoadBalancer returns whether the specified load balancer exists, and // GetLoadBalancer returns whether the specified load balancer exists, and
@ -60,7 +61,7 @@ func (az *Cloud) GetLoadBalancer(clusterName string, service *api.Service) (stat
} }
// EnsureLoadBalancer creates a new load balancer 'name', or updates the existing one. Returns the status of the balancer // EnsureLoadBalancer creates a new load balancer 'name', or updates the existing one. Returns the status of the balancer
func (az *Cloud) EnsureLoadBalancer(clusterName string, service *api.Service, hosts []string) (*api.LoadBalancerStatus, error) { func (az *Cloud) EnsureLoadBalancer(clusterName string, service *api.Service, nodeNames []string) (*api.LoadBalancerStatus, error) {
lbName := getLoadBalancerName(clusterName) lbName := getLoadBalancerName(clusterName)
pipName := getPublicIPName(clusterName, service) pipName := getPublicIPName(clusterName, service)
serviceName := getServiceName(service) serviceName := getServiceName(service)
@ -99,7 +100,7 @@ func (az *Cloud) EnsureLoadBalancer(clusterName string, service *api.Service, ho
} }
} }
lb, lbNeedsUpdate, err := az.reconcileLoadBalancer(lb, pip, clusterName, service, hosts) lb, lbNeedsUpdate, err := az.reconcileLoadBalancer(lb, pip, clusterName, service, nodeNames)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -114,11 +115,11 @@ func (az *Cloud) EnsureLoadBalancer(clusterName string, service *api.Service, ho
// Add the machines to the backend pool if they're not already // Add the machines to the backend pool if they're not already
lbBackendName := getBackendPoolName(clusterName) lbBackendName := getBackendPoolName(clusterName)
lbBackendPoolID := az.getBackendPoolID(lbName, lbBackendName) lbBackendPoolID := az.getBackendPoolID(lbName, lbBackendName)
hostUpdates := make([]func() error, len(hosts)) hostUpdates := make([]func() error, len(nodeNames))
for i, host := range hosts { for i, nodeName := range nodeNames {
localHost := host localNodeName := nodeName
f := func() error { f := func() error {
err := az.ensureHostInPool(serviceName, localHost, lbBackendPoolID) err := az.ensureHostInPool(serviceName, types.NodeName(localNodeName), lbBackendPoolID)
if err != nil { if err != nil {
return fmt.Errorf("ensure(%s): lb(%s) - failed to ensure host in pool: %q", serviceName, lbName, err) return fmt.Errorf("ensure(%s): lb(%s) - failed to ensure host in pool: %q", serviceName, lbName, err)
} }
@ -139,8 +140,8 @@ func (az *Cloud) EnsureLoadBalancer(clusterName string, service *api.Service, ho
} }
// UpdateLoadBalancer updates hosts under the specified load balancer. // UpdateLoadBalancer updates hosts under the specified load balancer.
func (az *Cloud) UpdateLoadBalancer(clusterName string, service *api.Service, hosts []string) error { func (az *Cloud) UpdateLoadBalancer(clusterName string, service *api.Service, nodeNames []string) error {
_, err := az.EnsureLoadBalancer(clusterName, service, hosts) _, err := az.EnsureLoadBalancer(clusterName, service, nodeNames)
return err return err
} }
@ -257,7 +258,7 @@ func (az *Cloud) ensurePublicIPDeleted(serviceName, pipName string) error {
// This ensures load balancer exists and the frontend ip config is setup. // This ensures load balancer exists and the frontend ip config is setup.
// This also reconciles the Service's Ports with the LoadBalancer config. // This also reconciles the Service's Ports with the LoadBalancer config.
// This entails adding rules/probes for expected Ports and removing stale rules/ports. // This entails adding rules/probes for expected Ports and removing stale rules/ports.
func (az *Cloud) reconcileLoadBalancer(lb network.LoadBalancer, pip *network.PublicIPAddress, clusterName string, service *api.Service, hosts []string) (network.LoadBalancer, bool, error) { func (az *Cloud) reconcileLoadBalancer(lb network.LoadBalancer, pip *network.PublicIPAddress, clusterName string, service *api.Service, nodeNames []string) (network.LoadBalancer, bool, error) {
lbName := getLoadBalancerName(clusterName) lbName := getLoadBalancerName(clusterName)
serviceName := getServiceName(service) serviceName := getServiceName(service)
lbFrontendIPConfigName := getFrontendIPConfigName(service) lbFrontendIPConfigName := getFrontendIPConfigName(service)
@ -556,8 +557,9 @@ func findSecurityRule(rules []network.SecurityRule, rule network.SecurityRule) b
// This ensures the given VM's Primary NIC's Primary IP Configuration is // This ensures the given VM's Primary NIC's Primary IP Configuration is
// participating in the specified LoadBalancer Backend Pool. // participating in the specified LoadBalancer Backend Pool.
func (az *Cloud) ensureHostInPool(serviceName, machineName string, backendPoolID string) error { func (az *Cloud) ensureHostInPool(serviceName string, nodeName types.NodeName, backendPoolID string) error {
machine, err := az.VirtualMachinesClient.Get(az.ResourceGroup, machineName, "") vmName := mapNodeNameToVMName(nodeName)
machine, err := az.VirtualMachinesClient.Get(az.ResourceGroup, vmName, "")
if err != nil { if err != nil {
return err return err
} }

View File

@ -24,6 +24,7 @@ import (
"github.com/Azure/azure-sdk-for-go/arm/network" "github.com/Azure/azure-sdk-for-go/arm/network"
"github.com/Azure/go-autorest/autorest/to" "github.com/Azure/go-autorest/autorest/to"
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/types"
) )
// ListRoutes lists all managed routes that belong to the specified clusterName // ListRoutes lists all managed routes that belong to the specified clusterName
@ -41,13 +42,13 @@ func (az *Cloud) ListRoutes(clusterName string) (routes []*cloudprovider.Route,
if routeTable.Properties.Routes != nil { if routeTable.Properties.Routes != nil {
kubeRoutes = make([]*cloudprovider.Route, len(*routeTable.Properties.Routes)) kubeRoutes = make([]*cloudprovider.Route, len(*routeTable.Properties.Routes))
for i, route := range *routeTable.Properties.Routes { for i, route := range *routeTable.Properties.Routes {
instance := getInstanceName(*route.Name) instance := mapRouteNameToNodeName(*route.Name)
cidr := *route.Properties.AddressPrefix cidr := *route.Properties.AddressPrefix
glog.V(10).Infof("list: * instance=%q, cidr=%q", instance, cidr) glog.V(10).Infof("list: * instance=%q, cidr=%q", instance, cidr)
kubeRoutes[i] = &cloudprovider.Route{ kubeRoutes[i] = &cloudprovider.Route{
Name: *route.Name, Name: *route.Name,
TargetInstance: instance, TargetNode: instance,
DestinationCIDR: cidr, DestinationCIDR: cidr,
} }
} }
@ -61,7 +62,7 @@ func (az *Cloud) ListRoutes(clusterName string) (routes []*cloudprovider.Route,
// route.Name will be ignored, although the cloud-provider may use nameHint // route.Name will be ignored, although the cloud-provider may use nameHint
// to create a more user-meaningful name. // to create a more user-meaningful name.
func (az *Cloud) CreateRoute(clusterName string, nameHint string, kubeRoute *cloudprovider.Route) error { func (az *Cloud) CreateRoute(clusterName string, nameHint string, kubeRoute *cloudprovider.Route) error {
glog.V(2).Infof("create: creating route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetInstance, kubeRoute.DestinationCIDR) glog.V(2).Infof("create: creating route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
routeTable, existsRouteTable, err := az.getRouteTable() routeTable, existsRouteTable, err := az.getRouteTable()
if err != nil { if err != nil {
@ -107,12 +108,12 @@ func (az *Cloud) CreateRoute(clusterName string, nameHint string, kubeRoute *clo
} }
} }
targetIP, err := az.getIPForMachine(kubeRoute.TargetInstance) targetIP, err := az.getIPForMachine(kubeRoute.TargetNode)
if err != nil { if err != nil {
return err return err
} }
routeName := getRouteName(kubeRoute.TargetInstance) routeName := mapNodeNameToRouteName(kubeRoute.TargetNode)
route := network.Route{ route := network.Route{
Name: to.StringPtr(routeName), Name: to.StringPtr(routeName),
Properties: &network.RoutePropertiesFormat{ Properties: &network.RoutePropertiesFormat{
@ -122,40 +123,40 @@ func (az *Cloud) CreateRoute(clusterName string, nameHint string, kubeRoute *clo
}, },
} }
glog.V(3).Infof("create: creating route: instance=%q cidr=%q", kubeRoute.TargetInstance, kubeRoute.DestinationCIDR) glog.V(3).Infof("create: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
_, err = az.RoutesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, *route.Name, route, nil) _, err = az.RoutesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, *route.Name, route, nil)
if err != nil { if err != nil {
return err return err
} }
glog.V(2).Infof("create: route created. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetInstance, kubeRoute.DestinationCIDR) glog.V(2).Infof("create: route created. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
return nil return nil
} }
// DeleteRoute deletes the specified managed route // DeleteRoute deletes the specified managed route
// Route should be as returned by ListRoutes // Route should be as returned by ListRoutes
func (az *Cloud) DeleteRoute(clusterName string, kubeRoute *cloudprovider.Route) error { func (az *Cloud) DeleteRoute(clusterName string, kubeRoute *cloudprovider.Route) error {
glog.V(2).Infof("delete: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetInstance, kubeRoute.DestinationCIDR) glog.V(2).Infof("delete: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
routeName := getRouteName(kubeRoute.TargetInstance) routeName := mapNodeNameToRouteName(kubeRoute.TargetNode)
_, err := az.RoutesClient.Delete(az.ResourceGroup, az.RouteTableName, routeName, nil) _, err := az.RoutesClient.Delete(az.ResourceGroup, az.RouteTableName, routeName, nil)
if err != nil { if err != nil {
return err return err
} }
glog.V(2).Infof("delete: route deleted. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetInstance, kubeRoute.DestinationCIDR) glog.V(2).Infof("delete: route deleted. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
return nil return nil
} }
// This must be kept in sync with getInstanceName. // This must be kept in sync with mapRouteNameToNodeName.
// These two functions enable stashing the instance name in the route // These two functions enable stashing the instance name in the route
// and then retrieving it later when listing. This is needed because // and then retrieving it later when listing. This is needed because
// Azure does not let you put tags/descriptions on the Route itself. // Azure does not let you put tags/descriptions on the Route itself.
func getRouteName(instanceName string) string { func mapNodeNameToRouteName(nodeName types.NodeName) string {
return fmt.Sprintf("%s", instanceName) return fmt.Sprintf("%s", nodeName)
} }
// Used with getRouteName. See comment on getRouteName. // Used with mapNodeNameToRouteName. See comment on mapNodeNameToRouteName.
func getInstanceName(routeName string) string { func mapRouteNameToNodeName(routeName string) types.NodeName {
return fmt.Sprintf("%s", routeName) return types.NodeName(fmt.Sprintf("%s", routeName))
} }

View File

@ -23,6 +23,7 @@ import (
"github.com/Azure/azure-sdk-for-go/arm/compute" "github.com/Azure/azure-sdk-for-go/arm/compute"
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/types"
) )
const ( const (
@ -31,8 +32,8 @@ const (
// AttachDisk attaches a vhd to vm // AttachDisk attaches a vhd to vm
// the vhd must exist, can be identified by diskName, diskURI, and lun. // the vhd must exist, can be identified by diskName, diskURI, and lun.
func (az *Cloud) AttachDisk(diskName, diskURI, vmName string, lun int32, cachingMode compute.CachingTypes) error { func (az *Cloud) AttachDisk(diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error {
vm, exists, err := az.getVirtualMachine(vmName) vm, exists, err := az.getVirtualMachine(nodeName)
if err != nil { if err != nil {
return err return err
} else if !exists { } else if !exists {
@ -58,6 +59,7 @@ func (az *Cloud) AttachDisk(diskName, diskURI, vmName string, lun int32, caching
}, },
}, },
} }
vmName := mapNodeNameToVMName(nodeName)
_, err = az.VirtualMachinesClient.CreateOrUpdate(az.ResourceGroup, vmName, newVM, nil) _, err = az.VirtualMachinesClient.CreateOrUpdate(az.ResourceGroup, vmName, newVM, nil)
if err != nil { if err != nil {
glog.Errorf("azure attach failed, err: %v", err) glog.Errorf("azure attach failed, err: %v", err)
@ -65,7 +67,7 @@ func (az *Cloud) AttachDisk(diskName, diskURI, vmName string, lun int32, caching
if strings.Contains(detail, "Code=\"AcquireDiskLeaseFailed\"") { if strings.Contains(detail, "Code=\"AcquireDiskLeaseFailed\"") {
// if lease cannot be acquired, immediately detach the disk and return the original error // if lease cannot be acquired, immediately detach the disk and return the original error
glog.Infof("failed to acquire disk lease, try detach") glog.Infof("failed to acquire disk lease, try detach")
az.DetachDiskByName(diskName, diskURI, vmName) az.DetachDiskByName(diskName, diskURI, nodeName)
} }
} else { } else {
glog.V(4).Infof("azure attach succeeded") glog.V(4).Infof("azure attach succeeded")
@ -75,11 +77,11 @@ func (az *Cloud) AttachDisk(diskName, diskURI, vmName string, lun int32, caching
// DetachDiskByName detaches a vhd from host // DetachDiskByName detaches a vhd from host
// the vhd can be identified by diskName or diskURI // the vhd can be identified by diskName or diskURI
func (az *Cloud) DetachDiskByName(diskName, diskURI, vmName string) error { func (az *Cloud) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error {
vm, exists, err := az.getVirtualMachine(vmName) vm, exists, err := az.getVirtualMachine(nodeName)
if err != nil || !exists { if err != nil || !exists {
// if host doesn't exist, no need to detach // if host doesn't exist, no need to detach
glog.Warningf("cannot find node %s, skip detaching disk %s", vmName, diskName) glog.Warningf("cannot find node %s, skip detaching disk %s", nodeName, diskName)
return nil return nil
} }
@ -100,6 +102,7 @@ func (az *Cloud) DetachDiskByName(diskName, diskURI, vmName string) error {
}, },
}, },
} }
vmName := mapNodeNameToVMName(nodeName)
_, err = az.VirtualMachinesClient.CreateOrUpdate(az.ResourceGroup, vmName, newVM, nil) _, err = az.VirtualMachinesClient.CreateOrUpdate(az.ResourceGroup, vmName, newVM, nil)
if err != nil { if err != nil {
glog.Errorf("azure disk detach failed, err: %v", err) glog.Errorf("azure disk detach failed, err: %v", err)
@ -110,8 +113,8 @@ func (az *Cloud) DetachDiskByName(diskName, diskURI, vmName string) error {
} }
// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI // GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI
func (az *Cloud) GetDiskLun(diskName, diskURI, vmName string) (int32, error) { func (az *Cloud) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) {
vm, exists, err := az.getVirtualMachine(vmName) vm, exists, err := az.getVirtualMachine(nodeName)
if err != nil { if err != nil {
return -1, err return -1, err
} else if !exists { } else if !exists {
@ -130,8 +133,8 @@ func (az *Cloud) GetDiskLun(diskName, diskURI, vmName string) (int32, error) {
// GetNextDiskLun searches all vhd attachment on the host and find unused lun // GetNextDiskLun searches all vhd attachment on the host and find unused lun
// return -1 if all luns are used // return -1 if all luns are used
func (az *Cloud) GetNextDiskLun(vmName string) (int32, error) { func (az *Cloud) GetNextDiskLun(nodeName types.NodeName) (int32, error) {
vm, exists, err := az.getVirtualMachine(vmName) vm, exists, err := az.getVirtualMachine(nodeName)
if err != nil { if err != nil {
return -1, err return -1, err
} else if !exists { } else if !exists {

View File

@ -25,6 +25,7 @@ import (
"github.com/Azure/azure-sdk-for-go/arm/compute" "github.com/Azure/azure-sdk-for-go/arm/compute"
"github.com/Azure/azure-sdk-for-go/arm/network" "github.com/Azure/azure-sdk-for-go/arm/network"
"k8s.io/kubernetes/pkg/types"
) )
const ( const (
@ -202,8 +203,8 @@ outer:
return -1, fmt.Errorf("SecurityGroup priorities are exhausted") return -1, fmt.Errorf("SecurityGroup priorities are exhausted")
} }
func (az *Cloud) getIPForMachine(machineName string) (string, error) { func (az *Cloud) getIPForMachine(nodeName types.NodeName) (string, error) {
machine, exists, err := az.getVirtualMachine(machineName) machine, exists, err := az.getVirtualMachine(nodeName)
if !exists { if !exists {
return "", cloudprovider.InstanceNotFound return "", cloudprovider.InstanceNotFound
} }

View File

@ -22,6 +22,7 @@ import (
"github.com/Azure/azure-sdk-for-go/arm/compute" "github.com/Azure/azure-sdk-for-go/arm/compute"
"github.com/Azure/azure-sdk-for-go/arm/network" "github.com/Azure/azure-sdk-for-go/arm/network"
"github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest"
"k8s.io/kubernetes/pkg/types"
) )
// checkExistsFromError inspects an error and returns a true if err is nil, // checkExistsFromError inspects an error and returns a true if err is nil,
@ -38,10 +39,11 @@ func checkResourceExistsFromError(err error) (bool, error) {
return false, v return false, v
} }
func (az *Cloud) getVirtualMachine(machineName string) (vm compute.VirtualMachine, exists bool, err error) { func (az *Cloud) getVirtualMachine(nodeName types.NodeName) (vm compute.VirtualMachine, exists bool, err error) {
var realErr error var realErr error
vm, err = az.VirtualMachinesClient.Get(az.ResourceGroup, machineName, "") vmName := string(nodeName)
vm, err = az.VirtualMachinesClient.Get(az.ResourceGroup, vmName, "")
exists, realErr = checkResourceExistsFromError(err) exists, realErr = checkResourceExistsFromError(err)
if realErr != nil { if realErr != nil {

View File

@ -25,6 +25,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/types"
) )
const ProviderName = "fake" const ProviderName = "fake"
@ -49,9 +50,9 @@ type FakeCloud struct {
Err error Err error
Calls []string Calls []string
Addresses []api.NodeAddress Addresses []api.NodeAddress
ExtID map[string]string ExtID map[types.NodeName]string
InstanceTypes map[string]string InstanceTypes map[types.NodeName]string
Machines []string Machines []types.NodeName
NodeResources *api.NodeResources NodeResources *api.NodeResources
ClusterList []string ClusterList []string
MasterName string MasterName string
@ -173,13 +174,13 @@ func (f *FakeCloud) AddSSHKeyToAllInstances(user string, keyData []byte) error {
} }
// Implementation of Instances.CurrentNodeName // Implementation of Instances.CurrentNodeName
func (f *FakeCloud) CurrentNodeName(hostname string) (string, error) { func (f *FakeCloud) CurrentNodeName(hostname string) (types.NodeName, error) {
return hostname, nil return types.NodeName(hostname), nil
} }
// NodeAddresses is a test-spy implementation of Instances.NodeAddresses. // NodeAddresses is a test-spy implementation of Instances.NodeAddresses.
// It adds an entry "node-addresses" into the internal method call record. // It adds an entry "node-addresses" into the internal method call record.
func (f *FakeCloud) NodeAddresses(instance string) ([]api.NodeAddress, error) { func (f *FakeCloud) NodeAddresses(instance types.NodeName) ([]api.NodeAddress, error) {
f.addCall("node-addresses") f.addCall("node-addresses")
return f.Addresses, f.Err return f.Addresses, f.Err
} }
@ -187,30 +188,30 @@ func (f *FakeCloud) NodeAddresses(instance string) ([]api.NodeAddress, error) {
// ExternalID is a test-spy implementation of Instances.ExternalID. // ExternalID is a test-spy implementation of Instances.ExternalID.
// It adds an entry "external-id" into the internal method call record. // It adds an entry "external-id" into the internal method call record.
// It returns an external id to the mapped instance name, if not found, it will return "ext-{instance}" // It returns an external id to the mapped instance name, if not found, it will return "ext-{instance}"
func (f *FakeCloud) ExternalID(instance string) (string, error) { func (f *FakeCloud) ExternalID(nodeName types.NodeName) (string, error) {
f.addCall("external-id") f.addCall("external-id")
return f.ExtID[instance], f.Err return f.ExtID[nodeName], f.Err
} }
// InstanceID returns the cloud provider ID of the specified instance. // InstanceID returns the cloud provider ID of the node with the specified Name.
func (f *FakeCloud) InstanceID(instance string) (string, error) { func (f *FakeCloud) InstanceID(nodeName types.NodeName) (string, error) {
f.addCall("instance-id") f.addCall("instance-id")
return f.ExtID[instance], nil return f.ExtID[nodeName], nil
} }
// InstanceType returns the type of the specified instance. // InstanceType returns the type of the specified instance.
func (f *FakeCloud) InstanceType(instance string) (string, error) { func (f *FakeCloud) InstanceType(instance types.NodeName) (string, error) {
f.addCall("instance-type") f.addCall("instance-type")
return f.InstanceTypes[instance], nil return f.InstanceTypes[instance], nil
} }
// List is a test-spy implementation of Instances.List. // List is a test-spy implementation of Instances.List.
// It adds an entry "list" into the internal method call record. // It adds an entry "list" into the internal method call record.
func (f *FakeCloud) List(filter string) ([]string, error) { func (f *FakeCloud) List(filter string) ([]types.NodeName, error) {
f.addCall("list") f.addCall("list")
result := []string{} result := []types.NodeName{}
for _, machine := range f.Machines { for _, machine := range f.Machines {
if match, _ := regexp.MatchString(filter, machine); match { if match, _ := regexp.MatchString(filter, string(machine)); match {
result = append(result, machine) result = append(result, machine)
} }
} }

View File

@ -122,16 +122,16 @@ const (
// Disks is interface for manipulation with GCE PDs. // Disks is interface for manipulation with GCE PDs.
type Disks interface { type Disks interface {
// AttachDisk attaches given disk to given instance. Current instance // AttachDisk attaches given disk to the node with the specified NodeName.
// is used when instanceID is empty string. // Current instance is used when instanceID is empty string.
AttachDisk(diskName, instanceID string, readOnly bool) error AttachDisk(diskName string, nodeName types.NodeName, readOnly bool) error
// DetachDisk detaches given disk to given instance. Current instance // DetachDisk detaches given disk to the node with the specified NodeName.
// is used when instanceID is empty string. // Current instance is used when nodeName is empty string.
DetachDisk(devicePath, instanceID string) error DetachDisk(devicePath string, nodeName types.NodeName) error
// DiskIsAttached checks if a disk is attached to the given node. // DiskIsAttached checks if a disk is attached to the node with the specified NodeName.
DiskIsAttached(diskName, instanceID string) (bool, error) DiskIsAttached(diskName string, nodeName types.NodeName) (bool, error)
// CreateDisk creates a new PD with given properties. Tags are serialized // CreateDisk creates a new PD with given properties. Tags are serialized
// as JSON into Description field. // as JSON into Description field.
@ -2095,8 +2095,8 @@ func canonicalizeInstanceName(name string) string {
} }
// Implementation of Instances.CurrentNodeName // Implementation of Instances.CurrentNodeName
func (gce *GCECloud) CurrentNodeName(hostname string) (string, error) { func (gce *GCECloud) CurrentNodeName(hostname string) (types.NodeName, error) {
return hostname, nil return types.NodeName(hostname), nil
} }
func (gce *GCECloud) AddSSHKeyToAllInstances(user string, keyData []byte) error { func (gce *GCECloud) AddSSHKeyToAllInstances(user string, keyData []byte) error {
@ -2145,7 +2145,7 @@ func (gce *GCECloud) AddSSHKeyToAllInstances(user string, keyData []byte) error
} }
// NodeAddresses is an implementation of Instances.NodeAddresses. // NodeAddresses is an implementation of Instances.NodeAddresses.
func (gce *GCECloud) NodeAddresses(_ string) ([]api.NodeAddress, error) { func (gce *GCECloud) NodeAddresses(_ types.NodeName) ([]api.NodeAddress, error) {
internalIP, err := metadata.Get("instance/network-interfaces/0/ip") internalIP, err := metadata.Get("instance/network-interfaces/0/ip")
if err != nil { if err != nil {
return nil, fmt.Errorf("couldn't get internal IP: %v", err) return nil, fmt.Errorf("couldn't get internal IP: %v", err)
@ -2172,11 +2172,23 @@ func (gce *GCECloud) isCurrentInstance(instanceID string) bool {
return currentInstanceID == canonicalizeInstanceName(instanceID) return currentInstanceID == canonicalizeInstanceName(instanceID)
} }
// ExternalID returns the cloud provider ID of the specified instance (deprecated). // mapNodeNameToInstanceName maps a k8s NodeName to a GCE Instance Name
func (gce *GCECloud) ExternalID(instance string) (string, error) { // This is a simple string cast.
func mapNodeNameToInstanceName(nodeName types.NodeName) string {
return string(nodeName)
}
// mapInstanceToNodeName maps a GCE Instance to a k8s NodeName
func mapInstanceToNodeName(instance *compute.Instance) types.NodeName {
return types.NodeName(instance.Name)
}
// ExternalID returns the cloud provider ID of the node with the specified NodeName (deprecated).
func (gce *GCECloud) ExternalID(nodeName types.NodeName) (string, error) {
instanceName := mapNodeNameToInstanceName(nodeName)
if gce.useMetadataServer { if gce.useMetadataServer {
// Use metadata, if possible, to fetch ID. See issue #12000 // Use metadata, if possible, to fetch ID. See issue #12000
if gce.isCurrentInstance(instance) { if gce.isCurrentInstance(instanceName) {
externalInstanceID, err := getCurrentExternalIDViaMetadata() externalInstanceID, err := getCurrentExternalIDViaMetadata()
if err == nil { if err == nil {
return externalInstanceID, nil return externalInstanceID, nil
@ -2185,15 +2197,16 @@ func (gce *GCECloud) ExternalID(instance string) (string, error) {
} }
// Fallback to GCE API call if metadata server fails to retrieve ID // Fallback to GCE API call if metadata server fails to retrieve ID
inst, err := gce.getInstanceByName(instance) inst, err := gce.getInstanceByName(instanceName)
if err != nil { if err != nil {
return "", err return "", err
} }
return strconv.FormatUint(inst.ID, 10), nil return strconv.FormatUint(inst.ID, 10), nil
} }
// InstanceID returns the cloud provider ID of the specified instance. // InstanceID returns the cloud provider ID of the node with the specified NodeName.
func (gce *GCECloud) InstanceID(instanceName string) (string, error) { func (gce *GCECloud) InstanceID(nodeName types.NodeName) (string, error) {
instanceName := mapNodeNameToInstanceName(nodeName)
if gce.useMetadataServer { if gce.useMetadataServer {
// Use metadata, if possible, to fetch ID. See issue #12000 // Use metadata, if possible, to fetch ID. See issue #12000
if gce.isCurrentInstance(instanceName) { if gce.isCurrentInstance(instanceName) {
@ -2210,8 +2223,9 @@ func (gce *GCECloud) InstanceID(instanceName string) (string, error) {
return gce.projectID + "/" + instance.Zone + "/" + instance.Name, nil return gce.projectID + "/" + instance.Zone + "/" + instance.Name, nil
} }
// InstanceType returns the type of the specified instance. // InstanceType returns the type of the specified node with the specified NodeName.
func (gce *GCECloud) InstanceType(instanceName string) (string, error) { func (gce *GCECloud) InstanceType(nodeName types.NodeName) (string, error) {
instanceName := mapNodeNameToInstanceName(nodeName)
if gce.useMetadataServer { if gce.useMetadataServer {
// Use metadata, if possible, to fetch ID. See issue #12000 // Use metadata, if possible, to fetch ID. See issue #12000
if gce.isCurrentInstance(instanceName) { if gce.isCurrentInstance(instanceName) {
@ -2229,8 +2243,8 @@ func (gce *GCECloud) InstanceType(instanceName string) (string, error) {
} }
// List is an implementation of Instances.List. // List is an implementation of Instances.List.
func (gce *GCECloud) List(filter string) ([]string, error) { func (gce *GCECloud) List(filter string) ([]types.NodeName, error) {
var instances []string var instances []types.NodeName
// TODO: Parallelize, although O(zones) so not too bad (N <= 3 typically) // TODO: Parallelize, although O(zones) so not too bad (N <= 3 typically)
for _, zone := range gce.managedZones { for _, zone := range gce.managedZones {
pageToken := "" pageToken := ""
@ -2249,7 +2263,7 @@ func (gce *GCECloud) List(filter string) ([]string, error) {
} }
pageToken = res.NextPageToken pageToken = res.NextPageToken
for _, instance := range res.Items { for _, instance := range res.Items {
instances = append(instances, instance.Name) instances = append(instances, mapInstanceToNodeName(instance))
} }
} }
if page >= maxPages { if page >= maxPages {
@ -2349,7 +2363,9 @@ func (gce *GCECloud) ListRoutes(clusterName string) ([]*cloudprovider.Route, err
} }
target := path.Base(r.NextHopInstance) target := path.Base(r.NextHopInstance)
routes = append(routes, &cloudprovider.Route{Name: r.Name, TargetInstance: target, DestinationCIDR: r.DestRange}) // TODO: Should we lastComponent(target) this?
targetNodeName := types.NodeName(target) // NodeName == Instance Name on GCE
routes = append(routes, &cloudprovider.Route{Name: r.Name, TargetNode: targetNodeName, DestinationCIDR: r.DestRange})
} }
} }
if page >= maxPages { if page >= maxPages {
@ -2365,7 +2381,8 @@ func gceNetworkURL(project, network string) string {
func (gce *GCECloud) CreateRoute(clusterName string, nameHint string, route *cloudprovider.Route) error { func (gce *GCECloud) CreateRoute(clusterName string, nameHint string, route *cloudprovider.Route) error {
routeName := truncateClusterName(clusterName) + "-" + nameHint routeName := truncateClusterName(clusterName) + "-" + nameHint
targetInstance, err := gce.getInstanceByName(route.TargetInstance) instanceName := mapNodeNameToInstanceName(route.TargetNode)
targetInstance, err := gce.getInstanceByName(instanceName)
if err != nil { if err != nil {
return err return err
} }
@ -2545,10 +2562,11 @@ func (gce *GCECloud) GetAutoLabelsForPD(name string, zone string) (map[string]st
return labels, nil return labels, nil
} }
func (gce *GCECloud) AttachDisk(diskName, instanceID string, readOnly bool) error { func (gce *GCECloud) AttachDisk(diskName string, nodeName types.NodeName, readOnly bool) error {
instance, err := gce.getInstanceByName(instanceID) instanceName := mapNodeNameToInstanceName(nodeName)
instance, err := gce.getInstanceByName(instanceName)
if err != nil { if err != nil {
return fmt.Errorf("error getting instance %q", instanceID) return fmt.Errorf("error getting instance %q", instanceName)
} }
disk, err := gce.getDiskByName(diskName, instance.Zone) disk, err := gce.getDiskByName(diskName, instance.Zone)
if err != nil { if err != nil {
@ -2560,7 +2578,7 @@ func (gce *GCECloud) AttachDisk(diskName, instanceID string, readOnly bool) erro
} }
attachedDisk := gce.convertDiskToAttachedDisk(disk, readWrite) attachedDisk := gce.convertDiskToAttachedDisk(disk, readWrite)
attachOp, err := gce.service.Instances.AttachDisk(gce.projectID, disk.Zone, instanceID, attachedDisk).Do() attachOp, err := gce.service.Instances.AttachDisk(gce.projectID, disk.Zone, instanceName, attachedDisk).Do()
if err != nil { if err != nil {
return err return err
} }
@ -2568,19 +2586,20 @@ func (gce *GCECloud) AttachDisk(diskName, instanceID string, readOnly bool) erro
return gce.waitForZoneOp(attachOp, disk.Zone) return gce.waitForZoneOp(attachOp, disk.Zone)
} }
func (gce *GCECloud) DetachDisk(devicePath, instanceID string) error { func (gce *GCECloud) DetachDisk(devicePath string, nodeName types.NodeName) error {
inst, err := gce.getInstanceByName(instanceID) instanceName := mapNodeNameToInstanceName(nodeName)
inst, err := gce.getInstanceByName(instanceName)
if err != nil { if err != nil {
if err == cloudprovider.InstanceNotFound { if err == cloudprovider.InstanceNotFound {
// If instance no longer exists, safe to assume volume is not attached. // If instance no longer exists, safe to assume volume is not attached.
glog.Warningf( glog.Warningf(
"Instance %q does not exist. DetachDisk will assume PD %q is not attached to it.", "Instance %q does not exist. DetachDisk will assume PD %q is not attached to it.",
instanceID, instanceName,
devicePath) devicePath)
return nil return nil
} }
return fmt.Errorf("error getting instance %q", instanceID) return fmt.Errorf("error getting instance %q", instanceName)
} }
detachOp, err := gce.service.Instances.DetachDisk(gce.projectID, inst.Zone, inst.Name, devicePath).Do() detachOp, err := gce.service.Instances.DetachDisk(gce.projectID, inst.Zone, inst.Name, devicePath).Do()
@ -2591,14 +2610,15 @@ func (gce *GCECloud) DetachDisk(devicePath, instanceID string) error {
return gce.waitForZoneOp(detachOp, inst.Zone) return gce.waitForZoneOp(detachOp, inst.Zone)
} }
func (gce *GCECloud) DiskIsAttached(diskName, instanceID string) (bool, error) { func (gce *GCECloud) DiskIsAttached(diskName string, nodeName types.NodeName) (bool, error) {
instance, err := gce.getInstanceByName(instanceID) instanceName := mapNodeNameToInstanceName(nodeName)
instance, err := gce.getInstanceByName(instanceName)
if err != nil { if err != nil {
if err == cloudprovider.InstanceNotFound { if err == cloudprovider.InstanceNotFound {
// If instance no longer exists, safe to assume volume is not attached. // If instance no longer exists, safe to assume volume is not attached.
glog.Warningf( glog.Warningf(
"Instance %q does not exist. DiskIsAttached will assume PD %q is not attached to it.", "Instance %q does not exist. DiskIsAttached will assume PD %q is not attached to it.",
instanceID, instanceName,
diskName) diskName)
return false, nil return false, nil
} }

View File

@ -30,6 +30,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/types"
) )
const ( const (
@ -89,8 +90,8 @@ func newMesosCloud(configReader io.Reader) (*MesosCloud, error) {
} }
// Implementation of Instances.CurrentNodeName // Implementation of Instances.CurrentNodeName
func (c *MesosCloud) CurrentNodeName(hostname string) (string, error) { func (c *MesosCloud) CurrentNodeName(hostname string) (types.NodeName, error) {
return hostname, nil return types.NodeName(hostname), nil
} }
func (c *MesosCloud) AddSSHKeyToAllInstances(user string, keyData []byte) error { func (c *MesosCloud) AddSSHKeyToAllInstances(user string, keyData []byte) error {
@ -190,8 +191,15 @@ func ipAddress(name string) (net.IP, error) {
return ipaddr, nil return ipaddr, nil
} }
// ExternalID returns the cloud provider ID of the specified instance (deprecated). // mapNodeNameToPrivateDNSName maps a k8s NodeName to an mesos hostname.
func (c *MesosCloud) ExternalID(instance string) (string, error) { // This is a simple string cast
func mapNodeNameToHostname(nodeName types.NodeName) string {
return string(nodeName)
}
// ExternalID returns the cloud provider ID of the instance with the specified nodeName (deprecated).
func (c *MesosCloud) ExternalID(nodeName types.NodeName) (string, error) {
hostname := mapNodeNameToHostname(nodeName)
//TODO(jdef) use a timeout here? 15s? //TODO(jdef) use a timeout here? 15s?
ctx, cancel := context.WithCancel(context.TODO()) ctx, cancel := context.WithCancel(context.TODO())
defer cancel() defer cancel()
@ -201,7 +209,7 @@ func (c *MesosCloud) ExternalID(instance string) (string, error) {
return "", err return "", err
} }
node := nodes[instance] node := nodes[hostname]
if node == nil { if node == nil {
return "", cloudprovider.InstanceNotFound return "", cloudprovider.InstanceNotFound
} }
@ -213,13 +221,13 @@ func (c *MesosCloud) ExternalID(instance string) (string, error) {
return ip.String(), nil return ip.String(), nil
} }
// InstanceID returns the cloud provider ID of the specified instance. // InstanceID returns the cloud provider ID of the instance with the specified nodeName.
func (c *MesosCloud) InstanceID(name string) (string, error) { func (c *MesosCloud) InstanceID(nodeName types.NodeName) (string, error) {
return "", nil return "", nil
} }
// InstanceType returns the type of the specified instance. // InstanceType returns the type of the instance with the specified nodeName.
func (c *MesosCloud) InstanceType(name string) (string, error) { func (c *MesosCloud) InstanceType(nodeName types.NodeName) (string, error) {
return "", nil return "", nil
} }
@ -241,7 +249,7 @@ func (c *MesosCloud) listNodes() (map[string]*slaveNode, error) {
// List lists instances that match 'filter' which is a regular expression // List lists instances that match 'filter' which is a regular expression
// which must match the entire instance name (fqdn). // which must match the entire instance name (fqdn).
func (c *MesosCloud) List(filter string) ([]string, error) { func (c *MesosCloud) List(filter string) ([]types.NodeName, error) {
nodes, err := c.listNodes() nodes, err := c.listNodes()
if err != nil { if err != nil {
return nil, err return nil, err
@ -250,13 +258,13 @@ func (c *MesosCloud) List(filter string) ([]string, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
addr := []string{} names := []types.NodeName{}
for _, node := range nodes { for _, node := range nodes {
if filterRegex.MatchString(node.hostname) { if filterRegex.MatchString(node.hostname) {
addr = append(addr, node.hostname) names = append(names, types.NodeName(node.hostname))
} }
} }
return addr, nil return names, nil
} }
// ListWithKubelet list those instance which have no running kubelet, i.e. the // ListWithKubelet list those instance which have no running kubelet, i.e. the
@ -275,8 +283,9 @@ func (c *MesosCloud) ListWithoutKubelet() ([]string, error) {
return addr, nil return addr, nil
} }
// NodeAddresses returns the addresses of the specified instance. // NodeAddresses returns the addresses of the instance with the specified nodeName.
func (c *MesosCloud) NodeAddresses(name string) ([]api.NodeAddress, error) { func (c *MesosCloud) NodeAddresses(nodeName types.NodeName) ([]api.NodeAddress, error) {
name := mapNodeNameToHostname(nodeName)
ip, err := ipAddress(name) ip, err := ipAddress(name)
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -25,6 +25,7 @@ import (
log "github.com/golang/glog" log "github.com/golang/glog"
"k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/types"
) )
func TestIPAddress(t *testing.T) { func TestIPAddress(t *testing.T) {
@ -268,7 +269,7 @@ func Test_ExternalID(t *testing.T) {
t.Fatalf("ExternalID did not return InstanceNotFound on an unknown instance") t.Fatalf("ExternalID did not return InstanceNotFound on an unknown instance")
} }
slaveName := "mesos3.internal.company.com" slaveName := types.NodeName("mesos3.internal.company.com")
id, err := mesosCloud.ExternalID(slaveName) id, err := mesosCloud.ExternalID(slaveName)
if id != "" { if id != "" {
t.Fatalf("ExternalID should not be able to resolve %q", slaveName) t.Fatalf("ExternalID should not be able to resolve %q", slaveName)

View File

@ -37,6 +37,7 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/types"
) )
const ProviderName = "openstack" const ProviderName = "openstack"
@ -237,9 +238,20 @@ func newOpenStack(cfg Config) (*OpenStack, error) {
return &os, nil return &os, nil
} }
func getServerByName(client *gophercloud.ServiceClient, name string) (*servers.Server, error) { // mapNodeNameToServerName maps a k8s NodeName to an OpenStack Server Name
// This is a simple string cast.
func mapNodeNameToServerName(nodeName types.NodeName) string {
return string(nodeName)
}
// mapServerToNodeName maps an OpenStack Server to a k8s NodeName
func mapServerToNodeName(server *servers.Server) types.NodeName {
return types.NodeName(server.Name)
}
func getServerByName(client *gophercloud.ServiceClient, name types.NodeName) (*servers.Server, error) {
opts := servers.ListOpts{ opts := servers.ListOpts{
Name: fmt.Sprintf("^%s$", regexp.QuoteMeta(name)), Name: fmt.Sprintf("^%s$", regexp.QuoteMeta(mapNodeNameToServerName(name))),
Status: "ACTIVE", Status: "ACTIVE",
} }
pager := servers.List(client, opts) pager := servers.List(client, opts)
@ -270,7 +282,7 @@ func getServerByName(client *gophercloud.ServiceClient, name string) (*servers.S
return &serverList[0], nil return &serverList[0], nil
} }
func getAddressesByName(client *gophercloud.ServiceClient, name string) ([]api.NodeAddress, error) { func getAddressesByName(client *gophercloud.ServiceClient, name types.NodeName) ([]api.NodeAddress, error) {
srv, err := getServerByName(client, name) srv, err := getServerByName(client, name)
if err != nil { if err != nil {
return nil, err return nil, err
@ -339,7 +351,7 @@ func getAddressesByName(client *gophercloud.ServiceClient, name string) ([]api.N
return addrs, nil return addrs, nil
} }
func getAddressByName(client *gophercloud.ServiceClient, name string) (string, error) { func getAddressByName(client *gophercloud.ServiceClient, name types.NodeName) (string, error) {
addrs, err := getAddressesByName(client, name) addrs, err := getAddressesByName(client, name)
if err != nil { if err != nil {
return "", err return "", err

View File

@ -29,6 +29,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/types"
) )
type Instances struct { type Instances struct {
@ -81,7 +82,7 @@ func (os *OpenStack) Instances() (cloudprovider.Instances, bool) {
return &Instances{compute, flavor_to_resource}, true return &Instances{compute, flavor_to_resource}, true
} }
func (i *Instances) List(name_filter string) ([]string, error) { func (i *Instances) List(name_filter string) ([]types.NodeName, error) {
glog.V(4).Infof("openstack List(%v) called", name_filter) glog.V(4).Infof("openstack List(%v) called", name_filter)
opts := servers.ListOpts{ opts := servers.ListOpts{
@ -90,14 +91,14 @@ func (i *Instances) List(name_filter string) ([]string, error) {
} }
pager := servers.List(i.compute, opts) pager := servers.List(i.compute, opts)
ret := make([]string, 0) ret := make([]types.NodeName, 0)
err := pager.EachPage(func(page pagination.Page) (bool, error) { err := pager.EachPage(func(page pagination.Page) (bool, error) {
sList, err := servers.ExtractServers(page) sList, err := servers.ExtractServers(page)
if err != nil { if err != nil {
return false, err return false, err
} }
for _, server := range sList { for i := range sList {
ret = append(ret, server.Name) ret = append(ret, mapServerToNodeName(&sList[i]))
} }
return true, nil return true, nil
}) })
@ -112,15 +113,15 @@ func (i *Instances) List(name_filter string) ([]string, error) {
} }
// Implementation of Instances.CurrentNodeName // Implementation of Instances.CurrentNodeName
func (i *Instances) CurrentNodeName(hostname string) (string, error) { func (i *Instances) CurrentNodeName(hostname string) (types.NodeName, error) {
return hostname, nil return types.NodeName(hostname), nil
} }
func (i *Instances) AddSSHKeyToAllInstances(user string, keyData []byte) error { func (i *Instances) AddSSHKeyToAllInstances(user string, keyData []byte) error {
return errors.New("unimplemented") return errors.New("unimplemented")
} }
func (i *Instances) NodeAddresses(name string) ([]api.NodeAddress, error) { func (i *Instances) NodeAddresses(name types.NodeName) ([]api.NodeAddress, error) {
glog.V(4).Infof("NodeAddresses(%v) called", name) glog.V(4).Infof("NodeAddresses(%v) called", name)
addrs, err := getAddressesByName(i.compute, name) addrs, err := getAddressesByName(i.compute, name)
@ -133,7 +134,7 @@ func (i *Instances) NodeAddresses(name string) ([]api.NodeAddress, error) {
} }
// ExternalID returns the cloud provider ID of the specified instance (deprecated). // ExternalID returns the cloud provider ID of the specified instance (deprecated).
func (i *Instances) ExternalID(name string) (string, error) { func (i *Instances) ExternalID(name types.NodeName) (string, error) {
srv, err := getServerByName(i.compute, name) srv, err := getServerByName(i.compute, name)
if err != nil { if err != nil {
if err == ErrNotFound { if err == ErrNotFound {
@ -150,7 +151,7 @@ func (os *OpenStack) InstanceID() (string, error) {
} }
// InstanceID returns the cloud provider ID of the specified instance. // InstanceID returns the cloud provider ID of the specified instance.
func (i *Instances) InstanceID(name string) (string, error) { func (i *Instances) InstanceID(name types.NodeName) (string, error) {
srv, err := getServerByName(i.compute, name) srv, err := getServerByName(i.compute, name)
if err != nil { if err != nil {
return "", err return "", err
@ -161,6 +162,6 @@ func (i *Instances) InstanceID(name string) (string, error) {
} }
// InstanceType returns the type of the specified instance. // InstanceType returns the type of the specified instance.
func (i *Instances) InstanceType(name string) (string, error) { func (i *Instances) InstanceType(name types.NodeName) (string, error) {
return "", nil return "", nil
} }

View File

@ -39,6 +39,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/service" "k8s.io/kubernetes/pkg/api/service"
"k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/types"
) )
// Note: when creating a new Loadbalancer (VM), it can take some time before it is ready for use, // Note: when creating a new Loadbalancer (VM), it can take some time before it is ready for use,
@ -303,8 +304,8 @@ func (lbaas *LbaasV2) GetLoadBalancer(clusterName string, service *api.Service)
// a list of regions (from config) and query/create loadbalancers in // a list of regions (from config) and query/create loadbalancers in
// each region. // each region.
func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *api.Service, hosts []string) (*api.LoadBalancerStatus, error) { func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *api.Service, nodeNames []string) (*api.LoadBalancerStatus, error) {
glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v, %v)", clusterName, apiService.Namespace, apiService.Name, apiService.Spec.LoadBalancerIP, apiService.Spec.Ports, hosts, apiService.Annotations) glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v, %v)", clusterName, apiService.Namespace, apiService.Name, apiService.Spec.LoadBalancerIP, apiService.Spec.Ports, nodeNames, apiService.Annotations)
ports := apiService.Spec.Ports ports := apiService.Spec.Ports
if len(ports) == 0 { if len(ports) == 0 {
@ -410,8 +411,8 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *api.Ser
waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID) waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID)
for _, host := range hosts { for _, nodeName := range nodeNames {
addr, err := getAddressByName(lbaas.compute, host) addr, err := getAddressByName(lbaas.compute, types.NodeName(nodeName))
if err != nil { if err != nil {
// cleanup what was created so far // cleanup what was created so far
_ = lbaas.EnsureLoadBalancerDeleted(clusterName, apiService) _ = lbaas.EnsureLoadBalancerDeleted(clusterName, apiService)
@ -478,9 +479,9 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *api.Ser
return status, nil return status, nil
} }
func (lbaas *LbaasV2) UpdateLoadBalancer(clusterName string, service *api.Service, hosts []string) error { func (lbaas *LbaasV2) UpdateLoadBalancer(clusterName string, service *api.Service, nodeNames []string) error {
loadBalancerName := cloudprovider.GetLoadBalancerName(service) loadBalancerName := cloudprovider.GetLoadBalancerName(service)
glog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v)", clusterName, loadBalancerName, hosts) glog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v)", clusterName, loadBalancerName, nodeNames)
ports := service.Spec.Ports ports := service.Spec.Ports
if len(ports) == 0 { if len(ports) == 0 {
@ -536,8 +537,8 @@ func (lbaas *LbaasV2) UpdateLoadBalancer(clusterName string, service *api.Servic
// Compose Set of member (addresses) that _should_ exist // Compose Set of member (addresses) that _should_ exist
addrs := map[string]empty{} addrs := map[string]empty{}
for _, host := range hosts { for _, nodeName := range nodeNames {
addr, err := getAddressByName(lbaas.compute, host) addr, err := getAddressByName(lbaas.compute, types.NodeName(nodeName))
if err != nil { if err != nil {
return err return err
} }
@ -765,8 +766,8 @@ func (lb *LbaasV1) GetLoadBalancer(clusterName string, service *api.Service) (*a
// a list of regions (from config) and query/create loadbalancers in // a list of regions (from config) and query/create loadbalancers in
// each region. // each region.
func (lb *LbaasV1) EnsureLoadBalancer(clusterName string, apiService *api.Service, hosts []string) (*api.LoadBalancerStatus, error) { func (lb *LbaasV1) EnsureLoadBalancer(clusterName string, apiService *api.Service, nodeNames []string) (*api.LoadBalancerStatus, error) {
glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v, %v)", clusterName, apiService.Namespace, apiService.Name, apiService.Spec.LoadBalancerIP, apiService.Spec.Ports, hosts, apiService.Annotations) glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v, %v)", clusterName, apiService.Namespace, apiService.Name, apiService.Spec.LoadBalancerIP, apiService.Spec.Ports, nodeNames, apiService.Annotations)
ports := apiService.Spec.Ports ports := apiService.Spec.Ports
if len(ports) > 1 { if len(ports) > 1 {
@ -831,8 +832,8 @@ func (lb *LbaasV1) EnsureLoadBalancer(clusterName string, apiService *api.Servic
return nil, err return nil, err
} }
for _, host := range hosts { for _, nodeName := range nodeNames {
addr, err := getAddressByName(lb.compute, host) addr, err := getAddressByName(lb.compute, types.NodeName(nodeName))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -914,9 +915,9 @@ func (lb *LbaasV1) EnsureLoadBalancer(clusterName string, apiService *api.Servic
} }
func (lb *LbaasV1) UpdateLoadBalancer(clusterName string, service *api.Service, hosts []string) error { func (lb *LbaasV1) UpdateLoadBalancer(clusterName string, service *api.Service, nodeNames []string) error {
loadBalancerName := cloudprovider.GetLoadBalancerName(service) loadBalancerName := cloudprovider.GetLoadBalancerName(service)
glog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v)", clusterName, loadBalancerName, hosts) glog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v)", clusterName, loadBalancerName, nodeNames)
vip, err := getVipByName(lb.network, loadBalancerName) vip, err := getVipByName(lb.network, loadBalancerName)
if err != nil { if err != nil {
@ -925,8 +926,8 @@ func (lb *LbaasV1) UpdateLoadBalancer(clusterName string, service *api.Service,
// Set of member (addresses) that _should_ exist // Set of member (addresses) that _should_ exist
addrs := map[string]bool{} addrs := map[string]bool{}
for _, host := range hosts { for _, nodeName := range nodeNames {
addr, err := getAddressByName(lb.compute, host) addr, err := getAddressByName(lb.compute, types.NodeName(nodeName))
if err != nil { if err != nil {
return err return err
} }

View File

@ -33,6 +33,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/types"
) )
const ProviderName = "ovirt" const ProviderName = "ovirt"
@ -149,8 +150,9 @@ func (v *OVirtCloud) Routes() (cloudprovider.Routes, bool) {
return nil, false return nil, false
} }
// NodeAddresses returns the NodeAddresses of a particular machine instance // NodeAddresses returns the NodeAddresses of the instance with the specified nodeName.
func (v *OVirtCloud) NodeAddresses(name string) ([]api.NodeAddress, error) { func (v *OVirtCloud) NodeAddresses(nodeName types.NodeName) ([]api.NodeAddress, error) {
name := mapNodeNameToInstanceName(nodeName)
instance, err := v.fetchInstance(name) instance, err := v.fetchInstance(name)
if err != nil { if err != nil {
return nil, err return nil, err
@ -174,8 +176,15 @@ func (v *OVirtCloud) NodeAddresses(name string) ([]api.NodeAddress, error) {
return []api.NodeAddress{{Type: api.NodeLegacyHostIP, Address: address.String()}}, nil return []api.NodeAddress{{Type: api.NodeLegacyHostIP, Address: address.String()}}, nil
} }
// ExternalID returns the cloud provider ID of the specified instance (deprecated). // mapNodeNameToInstanceName maps from a k8s NodeName to an ovirt instance name (the hostname)
func (v *OVirtCloud) ExternalID(name string) (string, error) { // This is a simple string cast
func mapNodeNameToInstanceName(nodeName types.NodeName) string {
return string(nodeName)
}
// ExternalID returns the cloud provider ID of the specified node with the specified NodeName (deprecated).
func (v *OVirtCloud) ExternalID(nodeName types.NodeName) (string, error) {
name := mapNodeNameToInstanceName(nodeName)
instance, err := v.fetchInstance(name) instance, err := v.fetchInstance(name)
if err != nil { if err != nil {
return "", err return "", err
@ -183,8 +192,9 @@ func (v *OVirtCloud) ExternalID(name string) (string, error) {
return instance.UUID, nil return instance.UUID, nil
} }
// InstanceID returns the cloud provider ID of the specified instance. // InstanceID returns the cloud provider ID of the node with the specified NodeName.
func (v *OVirtCloud) InstanceID(name string) (string, error) { func (v *OVirtCloud) InstanceID(nodeName types.NodeName) (string, error) {
name := mapNodeNameToInstanceName(nodeName)
instance, err := v.fetchInstance(name) instance, err := v.fetchInstance(name)
if err != nil { if err != nil {
return "", err return "", err
@ -195,7 +205,7 @@ func (v *OVirtCloud) InstanceID(name string) (string, error) {
} }
// InstanceType returns the type of the specified instance. // InstanceType returns the type of the specified instance.
func (v *OVirtCloud) InstanceType(name string) (string, error) { func (v *OVirtCloud) InstanceType(name types.NodeName) (string, error) {
return "", nil return "", nil
} }
@ -274,17 +284,21 @@ func (m *OVirtInstanceMap) ListSortedNames() []string {
} }
// List enumerates the set of minions instances known by the cloud provider // List enumerates the set of minions instances known by the cloud provider
func (v *OVirtCloud) List(filter string) ([]string, error) { func (v *OVirtCloud) List(filter string) ([]types.NodeName, error) {
instances, err := v.fetchAllInstances() instances, err := v.fetchAllInstances()
if err != nil { if err != nil {
return nil, err return nil, err
} }
return instances.ListSortedNames(), nil var nodeNames []types.NodeName
for _, s := range instances.ListSortedNames() {
nodeNames = append(nodeNames, types.NodeName(s))
}
return nodeNames, nil
} }
// Implementation of Instances.CurrentNodeName // Implementation of Instances.CurrentNodeName
func (v *OVirtCloud) CurrentNodeName(hostname string) (string, error) { func (v *OVirtCloud) CurrentNodeName(hostname string) (types.NodeName, error) {
return hostname, nil return types.NodeName(hostname), nil
} }
func (v *OVirtCloud) AddSSHKeyToAllInstances(user string, keyData []byte) error { func (v *OVirtCloud) AddSSHKeyToAllInstances(user string, keyData []byte) error {

View File

@ -42,6 +42,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/types"
) )
const ProviderName = "rackspace" const ProviderName = "rackspace"
@ -230,7 +231,7 @@ func (os *Rackspace) Instances() (cloudprovider.Instances, bool) {
return &Instances{compute}, true return &Instances{compute}, true
} }
func (i *Instances) List(name_filter string) ([]string, error) { func (i *Instances) List(name_filter string) ([]types.NodeName, error) {
glog.V(2).Infof("rackspace List(%v) called", name_filter) glog.V(2).Infof("rackspace List(%v) called", name_filter)
opts := osservers.ListOpts{ opts := osservers.ListOpts{
@ -239,14 +240,14 @@ func (i *Instances) List(name_filter string) ([]string, error) {
} }
pager := servers.List(i.compute, opts) pager := servers.List(i.compute, opts)
ret := make([]string, 0) ret := make([]types.NodeName, 0)
err := pager.EachPage(func(page pagination.Page) (bool, error) { err := pager.EachPage(func(page pagination.Page) (bool, error) {
sList, err := servers.ExtractServers(page) sList, err := servers.ExtractServers(page)
if err != nil { if err != nil {
return false, err return false, err
} }
for _, server := range sList { for i := range sList {
ret = append(ret, server.Name) ret = append(ret, mapServerToNodeName(&sList[i]))
} }
return true, nil return true, nil
}) })
@ -396,23 +397,35 @@ func getAddressByName(api *gophercloud.ServiceClient, name string) (string, erro
return getAddressByServer(srv) return getAddressByServer(srv)
} }
func (i *Instances) NodeAddresses(name string) ([]api.NodeAddress, error) { func (i *Instances) NodeAddresses(nodeName types.NodeName) ([]api.NodeAddress, error) {
glog.V(2).Infof("NodeAddresses(%v) called", name) glog.V(2).Infof("NodeAddresses(%v) called", nodeName)
serverName := mapNodeNameToServerName(nodeName)
ip, err := probeNodeAddress(i.compute, name) ip, err := probeNodeAddress(i.compute, serverName)
if err != nil { if err != nil {
return nil, err return nil, err
} }
glog.V(2).Infof("NodeAddresses(%v) => %v", name, ip) glog.V(2).Infof("NodeAddresses(%v) => %v", serverName, ip)
// net.ParseIP().String() is to maintain compatibility with the old code // net.ParseIP().String() is to maintain compatibility with the old code
return []api.NodeAddress{{Type: api.NodeLegacyHostIP, Address: net.ParseIP(ip).String()}}, nil return []api.NodeAddress{{Type: api.NodeLegacyHostIP, Address: net.ParseIP(ip).String()}}, nil
} }
// ExternalID returns the cloud provider ID of the specified instance (deprecated). // mapNodeNameToServerName maps from a k8s NodeName to a rackspace Server Name
func (i *Instances) ExternalID(name string) (string, error) { // This is a simple string cast.
return probeInstanceID(i.compute, name) func mapNodeNameToServerName(nodeName types.NodeName) string {
return string(nodeName)
}
// mapServerToNodeName maps a rackspace Server to an k8s NodeName
func mapServerToNodeName(s *osservers.Server) types.NodeName {
return types.NodeName(s.Name)
}
// ExternalID returns the cloud provider ID of the node with the specified Name (deprecated).
func (i *Instances) ExternalID(nodeName types.NodeName) (string, error) {
serverName := mapNodeNameToServerName(nodeName)
return probeInstanceID(i.compute, serverName)
} }
// InstanceID returns the cloud provider ID of the kubelet's instance. // InstanceID returns the cloud provider ID of the kubelet's instance.
@ -420,13 +433,14 @@ func (rs *Rackspace) InstanceID() (string, error) {
return readInstanceID() return readInstanceID()
} }
// InstanceID returns the cloud provider ID of the specified instance. // InstanceID returns the cloud provider ID of the node with the specified Name.
func (i *Instances) InstanceID(name string) (string, error) { func (i *Instances) InstanceID(nodeName types.NodeName) (string, error) {
return probeInstanceID(i.compute, name) serverName := mapNodeNameToServerName(nodeName)
return probeInstanceID(i.compute, serverName)
} }
// InstanceType returns the type of the specified instance. // InstanceType returns the type of the specified instance.
func (i *Instances) InstanceType(name string) (string, error) { func (i *Instances) InstanceType(name types.NodeName) (string, error) {
return "", nil return "", nil
} }
@ -435,10 +449,10 @@ func (i *Instances) AddSSHKeyToAllInstances(user string, keyData []byte) error {
} }
// Implementation of Instances.CurrentNodeName // Implementation of Instances.CurrentNodeName
func (i *Instances) CurrentNodeName(hostname string) (string, error) { func (i *Instances) CurrentNodeName(hostname string) (types.NodeName, error) {
// Beware when changing this, nodename == hostname assumption is crucial to // Beware when changing this, nodename == hostname assumption is crucial to
// apiserver => kubelet communication. // apiserver => kubelet communication.
return hostname, nil return types.NodeName(hostname), nil
} }
func (os *Rackspace) Clusters() (cloudprovider.Clusters, bool) { func (os *Rackspace) Clusters() (cloudprovider.Clusters, bool) {

View File

@ -40,6 +40,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider"
k8stypes "k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/runtime" "k8s.io/kubernetes/pkg/util/runtime"
) )
@ -128,16 +129,16 @@ type VSphereConfig struct {
type Volumes interface { type Volumes interface {
// AttachDisk attaches given disk to given node. Current node // AttachDisk attaches given disk to given node. Current node
// is used when nodeName is empty string. // is used when nodeName is empty string.
AttachDisk(vmDiskPath string, nodeName string) (diskID string, diskUUID string, err error) AttachDisk(vmDiskPath string, nodeName k8stypes.NodeName) (diskID string, diskUUID string, err error)
// DetachDisk detaches given disk to given node. Current node // DetachDisk detaches given disk to given node. Current node
// is used when nodeName is empty string. // is used when nodeName is empty string.
// Assumption: If node doesn't exist, disk is already detached from node. // Assumption: If node doesn't exist, disk is already detached from node.
DetachDisk(volPath string, nodeName string) error DetachDisk(volPath string, nodeName k8stypes.NodeName) error
// DiskIsAttached checks if a disk is attached to the given node. // DiskIsAttached checks if a disk is attached to the given node.
// Assumption: If node doesn't exist, disk is not attached to the node. // Assumption: If node doesn't exist, disk is not attached to the node.
DiskIsAttached(volPath, nodeName string) (bool, error) DiskIsAttached(volPath string, nodeName k8stypes.NodeName) (bool, error)
// CreateVolume creates a new vmdk with specified parameters. // CreateVolume creates a new vmdk with specified parameters.
CreateVolume(volumeOptions *VolumeOptions) (volumePath string, err error) CreateVolume(volumeOptions *VolumeOptions) (volumePath string, err error)
@ -319,7 +320,9 @@ func vsphereLogin(cfg *VSphereConfig, ctx context.Context) (*govmomi.Client, err
} }
// Returns vSphere object `virtual machine` by its name. // Returns vSphere object `virtual machine` by its name.
func getVirtualMachineByName(cfg *VSphereConfig, ctx context.Context, c *govmomi.Client, name string) (*object.VirtualMachine, error) { func getVirtualMachineByName(cfg *VSphereConfig, ctx context.Context, c *govmomi.Client, nodeName k8stypes.NodeName) (*object.VirtualMachine, error) {
name := nodeNameToVMName(nodeName)
// Create a new finder // Create a new finder
f := find.NewFinder(c.Client, true) f := find.NewFinder(c.Client, true)
@ -406,7 +409,7 @@ func (vs *VSphere) Instances() (cloudprovider.Instances, bool) {
} }
// List returns names of VMs (inside vm folder) by applying filter and which are currently running. // List returns names of VMs (inside vm folder) by applying filter and which are currently running.
func (i *Instances) List(filter string) ([]string, error) { func (i *Instances) List(filter string) ([]k8stypes.NodeName, error) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
c, err := vsphereLogin(i.cfg, ctx) c, err := vsphereLogin(i.cfg, ctx)
@ -423,11 +426,15 @@ func (i *Instances) List(filter string) ([]string, error) {
glog.V(3).Infof("Found %d instances matching %s: %s", glog.V(3).Infof("Found %d instances matching %s: %s",
len(vmList), filter, vmList) len(vmList), filter, vmList)
return vmList, nil var nodeNames []k8stypes.NodeName
for _, n := range vmList {
nodeNames = append(nodeNames, k8stypes.NodeName(n))
}
return nodeNames, nil
} }
// NodeAddresses is an implementation of Instances.NodeAddresses. // NodeAddresses is an implementation of Instances.NodeAddresses.
func (i *Instances) NodeAddresses(name string) ([]api.NodeAddress, error) { func (i *Instances) NodeAddresses(nodeName k8stypes.NodeName) ([]api.NodeAddress, error) {
addrs := []api.NodeAddress{} addrs := []api.NodeAddress{}
// Create context // Create context
@ -441,7 +448,7 @@ func (i *Instances) NodeAddresses(name string) ([]api.NodeAddress, error) {
} }
defer c.Logout(ctx) defer c.Logout(ctx)
vm, err := getVirtualMachineByName(i.cfg, ctx, c, name) vm, err := getVirtualMachineByName(i.cfg, ctx, c, nodeName)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -476,12 +483,22 @@ func (i *Instances) AddSSHKeyToAllInstances(user string, keyData []byte) error {
return errors.New("unimplemented") return errors.New("unimplemented")
} }
func (i *Instances) CurrentNodeName(hostname string) (string, error) { func (i *Instances) CurrentNodeName(hostname string) (k8stypes.NodeName, error) {
return i.localInstanceID, nil return k8stypes.NodeName(i.localInstanceID), nil
} }
// ExternalID returns the cloud provider ID of the specified instance (deprecated). // nodeNameToVMName maps a NodeName to the vmware infrastructure name
func (i *Instances) ExternalID(name string) (string, error) { func nodeNameToVMName(nodeName k8stypes.NodeName) string {
return string(nodeName)
}
// nodeNameToVMName maps a vmware infrastructure name to a NodeName
func vmNameToNodeName(vmName string) k8stypes.NodeName {
return k8stypes.NodeName(vmName)
}
// ExternalID returns the cloud provider ID of the node with the specified Name (deprecated).
func (i *Instances) ExternalID(nodeName k8stypes.NodeName) (string, error) {
// Create context // Create context
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
@ -493,7 +510,7 @@ func (i *Instances) ExternalID(name string) (string, error) {
} }
defer c.Logout(ctx) defer c.Logout(ctx)
vm, err := getVirtualMachineByName(i.cfg, ctx, c, name) vm, err := getVirtualMachineByName(i.cfg, ctx, c, nodeName)
if err != nil { if err != nil {
if _, ok := err.(*find.NotFoundError); ok { if _, ok := err.(*find.NotFoundError); ok {
return "", cloudprovider.InstanceNotFound return "", cloudprovider.InstanceNotFound
@ -512,16 +529,16 @@ func (i *Instances) ExternalID(name string) (string, error) {
} }
if mvm.Summary.Config.Template == false { if mvm.Summary.Config.Template == false {
glog.Warningf("VM %s, is not in %s state", name, ActivePowerState) glog.Warningf("VM %s, is not in %s state", nodeName, ActivePowerState)
} else { } else {
glog.Warningf("VM %s, is a template", name) glog.Warningf("VM %s, is a template", nodeName)
} }
return "", cloudprovider.InstanceNotFound return "", cloudprovider.InstanceNotFound
} }
// InstanceID returns the cloud provider ID of the specified instance. // InstanceID returns the cloud provider ID of the node with the specified Name.
func (i *Instances) InstanceID(name string) (string, error) { func (i *Instances) InstanceID(nodeName k8stypes.NodeName) (string, error) {
// Create context // Create context
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
@ -533,7 +550,7 @@ func (i *Instances) InstanceID(name string) (string, error) {
} }
defer c.Logout(ctx) defer c.Logout(ctx)
vm, err := getVirtualMachineByName(i.cfg, ctx, c, name) vm, err := getVirtualMachineByName(i.cfg, ctx, c, nodeName)
if err != nil { if err != nil {
if _, ok := err.(*find.NotFoundError); ok { if _, ok := err.(*find.NotFoundError); ok {
return "", cloudprovider.InstanceNotFound return "", cloudprovider.InstanceNotFound
@ -552,15 +569,15 @@ func (i *Instances) InstanceID(name string) (string, error) {
} }
if mvm.Summary.Config.Template == false { if mvm.Summary.Config.Template == false {
glog.Warningf("VM %s, is not in %s state", name, ActivePowerState) glog.Warningf("VM %s, is not in %s state", nodeName, ActivePowerState)
} else { } else {
glog.Warningf("VM %s, is a template", name) glog.Warningf("VM %s, is a template", nodeName)
} }
return "", cloudprovider.InstanceNotFound return "", cloudprovider.InstanceNotFound
} }
func (i *Instances) InstanceType(name string) (string, error) { func (i *Instances) InstanceType(name k8stypes.NodeName) (string, error) {
return "", nil return "", nil
} }
@ -657,7 +674,7 @@ func cleanUpController(newSCSIController types.BaseVirtualDevice, vmDevices obje
} }
// Attaches given virtual disk volume to the compute running kubelet. // Attaches given virtual disk volume to the compute running kubelet.
func (vs *VSphere) AttachDisk(vmDiskPath string, nodeName string) (diskID string, diskUUID string, err error) { func (vs *VSphere) AttachDisk(vmDiskPath string, nodeName k8stypes.NodeName) (diskID string, diskUUID string, err error) {
// Create context // Create context
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
@ -673,8 +690,9 @@ func (vs *VSphere) AttachDisk(vmDiskPath string, nodeName string) (diskID string
var vSphereInstance string var vSphereInstance string
if nodeName == "" { if nodeName == "" {
vSphereInstance = vs.localInstanceID vSphereInstance = vs.localInstanceID
nodeName = vmNameToNodeName(vSphereInstance)
} else { } else {
vSphereInstance = nodeName vSphereInstance = nodeNameToVMName(nodeName)
} }
// Get VM device list // Get VM device list
@ -790,7 +808,7 @@ func (vs *VSphere) AttachDisk(vmDiskPath string, nodeName string) (diskID string
if newSCSICreated { if newSCSICreated {
cleanUpController(newSCSIController, vmDevices, vm, ctx) cleanUpController(newSCSIController, vmDevices, vm, ctx)
} }
vs.DetachDisk(deviceName, vSphereInstance) vs.DetachDisk(deviceName, nodeName)
return "", "", err return "", "", err
} }
@ -872,7 +890,7 @@ func getAvailableSCSIController(scsiControllers []*types.VirtualController) *typ
} }
// DiskIsAttached returns if disk is attached to the VM using controllers supported by the plugin. // DiskIsAttached returns if disk is attached to the VM using controllers supported by the plugin.
func (vs *VSphere) DiskIsAttached(volPath string, nodeName string) (bool, error) { func (vs *VSphere) DiskIsAttached(volPath string, nodeName k8stypes.NodeName) (bool, error) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
@ -884,15 +902,16 @@ func (vs *VSphere) DiskIsAttached(volPath string, nodeName string) (bool, error)
} }
defer c.Logout(ctx) defer c.Logout(ctx)
// Find virtual machine to attach disk to // Find VM to detach disk from
var vSphereInstance string var vSphereInstance string
if nodeName == "" { if nodeName == "" {
vSphereInstance = vs.localInstanceID vSphereInstance = vs.localInstanceID
nodeName = vmNameToNodeName(vSphereInstance)
} else { } else {
vSphereInstance = nodeName vSphereInstance = nodeNameToVMName(nodeName)
} }
nodeExist, err := vs.NodeExists(c, vSphereInstance) nodeExist, err := vs.NodeExists(c, nodeName)
if err != nil { if err != nil {
glog.Errorf("Failed to check whether node exist. err: %s.", err) glog.Errorf("Failed to check whether node exist. err: %s.", err)
@ -1043,7 +1062,7 @@ func getVirtualDiskID(volPath string, vmDevices object.VirtualDeviceList, dc *ob
} }
// DetachDisk detaches given virtual disk volume from the compute running kubelet. // DetachDisk detaches given virtual disk volume from the compute running kubelet.
func (vs *VSphere) DetachDisk(volPath string, nodeName string) error { func (vs *VSphere) DetachDisk(volPath string, nodeName k8stypes.NodeName) error {
// Create context // Create context
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
@ -1055,15 +1074,16 @@ func (vs *VSphere) DetachDisk(volPath string, nodeName string) error {
} }
defer c.Logout(ctx) defer c.Logout(ctx)
// Find VM to detach disk from // Find virtual machine to attach disk to
var vSphereInstance string var vSphereInstance string
if nodeName == "" { if nodeName == "" {
vSphereInstance = vs.localInstanceID vSphereInstance = vs.localInstanceID
nodeName = vmNameToNodeName(vSphereInstance)
} else { } else {
vSphereInstance = nodeName vSphereInstance = nodeNameToVMName(nodeName)
} }
nodeExist, err := vs.NodeExists(c, vSphereInstance) nodeExist, err := vs.NodeExists(c, nodeName)
if err != nil { if err != nil {
glog.Errorf("Failed to check whether node exist. err: %s.", err) glog.Errorf("Failed to check whether node exist. err: %s.", err)
@ -1073,7 +1093,7 @@ func (vs *VSphere) DetachDisk(volPath string, nodeName string) error {
if !nodeExist { if !nodeExist {
glog.Warningf( glog.Warningf(
"Node %q does not exist. DetachDisk will assume vmdk %q is not attached to it.", "Node %q does not exist. DetachDisk will assume vmdk %q is not attached to it.",
vSphereInstance, nodeName,
volPath) volPath)
return nil return nil
} }
@ -1214,8 +1234,7 @@ func (vs *VSphere) DeleteVolume(vmDiskPath string) error {
// NodeExists checks if the node with given nodeName exist. // NodeExists checks if the node with given nodeName exist.
// Returns false if VM doesn't exist or VM is in powerOff state. // Returns false if VM doesn't exist or VM is in powerOff state.
func (vs *VSphere) NodeExists(c *govmomi.Client, nodeName string) (bool, error) { func (vs *VSphere) NodeExists(c *govmomi.Client, nodeName k8stypes.NodeName) (bool, error) {
if nodeName == "" { if nodeName == "" {
return false, nil return false, nil
} }

View File

@ -25,6 +25,7 @@ import (
"golang.org/x/net/context" "golang.org/x/net/context"
"k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/rand" "k8s.io/kubernetes/pkg/util/rand"
) )
@ -186,7 +187,7 @@ func TestInstances(t *testing.T) {
} }
t.Logf("Found ExternalID(%s) = %s\n", srvs[0], externalId) t.Logf("Found ExternalID(%s) = %s\n", srvs[0], externalId)
nonExistingVM := rand.String(15) nonExistingVM := types.NodeName(rand.String(15))
externalId, err = i.ExternalID(nonExistingVM) externalId, err = i.ExternalID(nonExistingVM)
if err == cloudprovider.InstanceNotFound { if err == cloudprovider.InstanceNotFound {
t.Logf("VM %s was not found as expected\n", nonExistingVM) t.Logf("VM %s was not found as expected\n", nonExistingVM)

View File

@ -244,7 +244,7 @@ func nodeRunningOutdatedKubelet(node *api.Node) bool {
return false return false
} }
func nodeExistsInCloudProvider(cloud cloudprovider.Interface, nodeName string) (bool, error) { func nodeExistsInCloudProvider(cloud cloudprovider.Interface, nodeName types.NodeName) (bool, error) {
instances, ok := cloud.Instances() instances, ok := cloud.Instances()
if !ok { if !ok {
return false, fmt.Errorf("%v", ErrCloudInstance) return false, fmt.Errorf("%v", ErrCloudInstance)

View File

@ -37,6 +37,7 @@ import (
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/flowcontrol" "k8s.io/kubernetes/pkg/util/flowcontrol"
"k8s.io/kubernetes/pkg/util/metrics" "k8s.io/kubernetes/pkg/util/metrics"
utilnode "k8s.io/kubernetes/pkg/util/node" utilnode "k8s.io/kubernetes/pkg/util/node"
@ -147,7 +148,7 @@ type NodeController struct {
cidrAllocator CIDRAllocator cidrAllocator CIDRAllocator
forcefullyDeletePod func(*api.Pod) error forcefullyDeletePod func(*api.Pod) error
nodeExistsInCloudProvider func(string) (bool, error) nodeExistsInCloudProvider func(types.NodeName) (bool, error)
computeZoneStateFunc func(nodeConditions []*api.NodeCondition) (int, zoneState) computeZoneStateFunc func(nodeConditions []*api.NodeCondition) (int, zoneState)
enterPartialDisruptionFunc func(nodeNum int) float32 enterPartialDisruptionFunc func(nodeNum int) float32
enterFullDisruptionFunc func(nodeNum int) float32 enterFullDisruptionFunc func(nodeNum int) float32
@ -229,7 +230,7 @@ func NewNodeController(
serviceCIDR: serviceCIDR, serviceCIDR: serviceCIDR,
allocateNodeCIDRs: allocateNodeCIDRs, allocateNodeCIDRs: allocateNodeCIDRs,
forcefullyDeletePod: func(p *api.Pod) error { return forcefullyDeletePod(kubeClient, p) }, forcefullyDeletePod: func(p *api.Pod) error { return forcefullyDeletePod(kubeClient, p) },
nodeExistsInCloudProvider: func(nodeName string) (bool, error) { return nodeExistsInCloudProvider(cloud, nodeName) }, nodeExistsInCloudProvider: func(nodeName types.NodeName) (bool, error) { return nodeExistsInCloudProvider(cloud, nodeName) },
evictionLimiterQPS: evictionLimiterQPS, evictionLimiterQPS: evictionLimiterQPS,
secondaryEvictionLimiterQPS: secondaryEvictionLimiterQPS, secondaryEvictionLimiterQPS: secondaryEvictionLimiterQPS,
largeClusterThreshold: largeClusterThreshold, largeClusterThreshold: largeClusterThreshold,
@ -576,7 +577,7 @@ func (nc *NodeController) monitorNodeStatus() error {
// Check with the cloud provider to see if the node still exists. If it // Check with the cloud provider to see if the node still exists. If it
// doesn't, delete the node immediately. // doesn't, delete the node immediately.
if currentReadyCondition.Status != api.ConditionTrue && nc.cloud != nil { if currentReadyCondition.Status != api.ConditionTrue && nc.cloud != nil {
exists, err := nc.nodeExistsInCloudProvider(node.Name) exists, err := nc.nodeExistsInCloudProvider(types.NodeName(node.Name))
if err != nil { if err != nil {
glog.Errorf("Error determining if node %v exists in cloud: %v", node.Name, err) glog.Errorf("Error determining if node %v exists in cloud: %v", node.Name, err)
continue continue

View File

@ -27,6 +27,7 @@ import (
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake" fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/diff" "k8s.io/kubernetes/pkg/util/diff"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
) )
@ -1078,7 +1079,7 @@ func TestCloudProviderNoRateLimit(t *testing.T) {
testNodeMonitorPeriod, nil, nil, 0, false) testNodeMonitorPeriod, nil, nil, 0, false)
nodeController.cloud = &fakecloud.FakeCloud{} nodeController.cloud = &fakecloud.FakeCloud{}
nodeController.now = func() unversioned.Time { return unversioned.Date(2016, 1, 1, 12, 0, 0, 0, time.UTC) } nodeController.now = func() unversioned.Time { return unversioned.Date(2016, 1, 1, 12, 0, 0, 0, time.UTC) }
nodeController.nodeExistsInCloudProvider = func(nodeName string) (bool, error) { nodeController.nodeExistsInCloudProvider = func(nodeName types.NodeName) (bool, error) {
return false, nil return false, nil
} }
// monitorNodeStatus should allow this node to be immediately deleted // monitorNodeStatus should allow this node to be immediately deleted

View File

@ -31,6 +31,7 @@ import (
"k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/metrics" "k8s.io/kubernetes/pkg/util/metrics"
nodeutil "k8s.io/kubernetes/pkg/util/node" nodeutil "k8s.io/kubernetes/pkg/util/node"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@ -117,11 +118,11 @@ func (rc *RouteController) reconcileNodeRoutes() error {
func (rc *RouteController) reconcile(nodes []api.Node, routes []*cloudprovider.Route) error { func (rc *RouteController) reconcile(nodes []api.Node, routes []*cloudprovider.Route) error {
// nodeCIDRs maps nodeName->nodeCIDR // nodeCIDRs maps nodeName->nodeCIDR
nodeCIDRs := make(map[string]string) nodeCIDRs := make(map[types.NodeName]string)
// routeMap maps routeTargetInstance->route // routeMap maps routeTargetNode->route
routeMap := make(map[string]*cloudprovider.Route) routeMap := make(map[types.NodeName]*cloudprovider.Route)
for _, route := range routes { for _, route := range routes {
routeMap[route.TargetInstance] = route routeMap[route.TargetNode] = route
} }
wg := sync.WaitGroup{} wg := sync.WaitGroup{}
@ -132,17 +133,18 @@ func (rc *RouteController) reconcile(nodes []api.Node, routes []*cloudprovider.R
if node.Spec.PodCIDR == "" { if node.Spec.PodCIDR == "" {
continue continue
} }
nodeName := types.NodeName(node.Name)
// Check if we have a route for this node w/ the correct CIDR. // Check if we have a route for this node w/ the correct CIDR.
r := routeMap[node.Name] r := routeMap[nodeName]
if r == nil || r.DestinationCIDR != node.Spec.PodCIDR { if r == nil || r.DestinationCIDR != node.Spec.PodCIDR {
// If not, create the route. // If not, create the route.
route := &cloudprovider.Route{ route := &cloudprovider.Route{
TargetInstance: node.Name, TargetNode: nodeName,
DestinationCIDR: node.Spec.PodCIDR, DestinationCIDR: node.Spec.PodCIDR,
} }
nameHint := string(node.UID) nameHint := string(node.UID)
wg.Add(1) wg.Add(1)
go func(nodeName string, nameHint string, route *cloudprovider.Route) { go func(nodeName types.NodeName, nameHint string, route *cloudprovider.Route) {
defer wg.Done() defer wg.Done()
for i := 0; i < maxRetries; i++ { for i := 0; i < maxRetries; i++ {
startTime := time.Now() startTime := time.Now()
@ -161,20 +163,20 @@ func (rc *RouteController) reconcile(nodes []api.Node, routes []*cloudprovider.R
return return
} }
} }
}(node.Name, nameHint, route) }(nodeName, nameHint, route)
} else { } else {
// Update condition only if it doesn't reflect the current state. // Update condition only if it doesn't reflect the current state.
_, condition := api.GetNodeCondition(&node.Status, api.NodeNetworkUnavailable) _, condition := api.GetNodeCondition(&node.Status, api.NodeNetworkUnavailable)
if condition == nil || condition.Status != api.ConditionFalse { if condition == nil || condition.Status != api.ConditionFalse {
rc.updateNetworkingCondition(node.Name, true) rc.updateNetworkingCondition(types.NodeName(node.Name), true)
} }
} }
nodeCIDRs[node.Name] = node.Spec.PodCIDR nodeCIDRs[nodeName] = node.Spec.PodCIDR
} }
for _, route := range routes { for _, route := range routes {
if rc.isResponsibleForRoute(route) { if rc.isResponsibleForRoute(route) {
// Check if this route applies to a node we know about & has correct CIDR. // Check if this route applies to a node we know about & has correct CIDR.
if nodeCIDRs[route.TargetInstance] != route.DestinationCIDR { if nodeCIDRs[route.TargetNode] != route.DestinationCIDR {
wg.Add(1) wg.Add(1)
// Delete the route. // Delete the route.
go func(route *cloudprovider.Route, startTime time.Time) { go func(route *cloudprovider.Route, startTime time.Time) {
@ -194,7 +196,7 @@ func (rc *RouteController) reconcile(nodes []api.Node, routes []*cloudprovider.R
return nil return nil
} }
func (rc *RouteController) updateNetworkingCondition(nodeName string, routeCreated bool) error { func (rc *RouteController) updateNetworkingCondition(nodeName types.NodeName, routeCreated bool) error {
var err error var err error
for i := 0; i < updateNodeStatusMaxRetries; i++ { for i := 0; i < updateNodeStatusMaxRetries; i++ {
// Patch could also fail, even though the chance is very slim. So we still do // Patch could also fail, even though the chance is very slim. So we still do

View File

@ -26,6 +26,7 @@ import (
"k8s.io/kubernetes/pkg/client/testing/core" "k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider"
fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake" fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
"k8s.io/kubernetes/pkg/types"
) )
func TestIsResponsibleForRoute(t *testing.T) { func TestIsResponsibleForRoute(t *testing.T) {
@ -58,7 +59,7 @@ func TestIsResponsibleForRoute(t *testing.T) {
rc := New(nil, nil, myClusterName, cidr) rc := New(nil, nil, myClusterName, cidr)
route := &cloudprovider.Route{ route := &cloudprovider.Route{
Name: testCase.routeName, Name: testCase.routeName,
TargetInstance: "doesnt-matter-for-this-test", TargetNode: types.NodeName("doesnt-matter-for-this-test"),
DestinationCIDR: testCase.routeCIDR, DestinationCIDR: testCase.routeCIDR,
} }
if resp := rc.isResponsibleForRoute(route); resp != testCase.expectedResponsible { if resp := rc.isResponsibleForRoute(route); resp != testCase.expectedResponsible {

View File

@ -237,7 +237,7 @@ func (adc *attachDetachController) nodeAdd(obj interface{}) {
return return
} }
nodeName := node.Name nodeName := types.NodeName(node.Name)
if _, exists := node.Annotations[volumehelper.ControllerManagedAttachAnnotation]; exists { if _, exists := node.Annotations[volumehelper.ControllerManagedAttachAnnotation]; exists {
// Node specifies annotation indicating it should be managed by attach // Node specifies annotation indicating it should be managed by attach
// detach controller. Add it to desired state of world. // detach controller. Add it to desired state of world.
@ -258,7 +258,7 @@ func (adc *attachDetachController) nodeDelete(obj interface{}) {
return return
} }
nodeName := node.Name nodeName := types.NodeName(node.Name)
if err := adc.desiredStateOfWorld.DeleteNode(nodeName); err != nil { if err := adc.desiredStateOfWorld.DeleteNode(nodeName); err != nil {
glog.V(10).Infof("%v", err) glog.V(10).Infof("%v", err)
} }
@ -278,7 +278,9 @@ func (adc *attachDetachController) processPodVolumes(
return return
} }
if !adc.desiredStateOfWorld.NodeExists(pod.Spec.NodeName) { nodeName := types.NodeName(pod.Spec.NodeName)
if !adc.desiredStateOfWorld.NodeExists(nodeName) {
// If the node the pod is scheduled to does not exist in the desired // If the node the pod is scheduled to does not exist in the desired
// state of the world data structure, that indicates the node is not // state of the world data structure, that indicates the node is not
// yet managed by the controller. Therefore, ignore the pod. // yet managed by the controller. Therefore, ignore the pod.
@ -288,7 +290,7 @@ func (adc *attachDetachController) processPodVolumes(
"Skipping processing of pod %q/%q: it is scheduled to node %q which is not managed by the controller.", "Skipping processing of pod %q/%q: it is scheduled to node %q which is not managed by the controller.",
pod.Namespace, pod.Namespace,
pod.Name, pod.Name,
pod.Spec.NodeName) nodeName)
return return
} }
@ -321,7 +323,7 @@ func (adc *attachDetachController) processPodVolumes(
if addVolumes { if addVolumes {
// Add volume to desired state of world // Add volume to desired state of world
_, err := adc.desiredStateOfWorld.AddPod( _, err := adc.desiredStateOfWorld.AddPod(
uniquePodName, pod, volumeSpec, pod.Spec.NodeName) uniquePodName, pod, volumeSpec, nodeName)
if err != nil { if err != nil {
glog.V(10).Infof( glog.V(10).Infof(
"Failed to add volume %q for pod %q/%q to desiredStateOfWorld. %v", "Failed to add volume %q for pod %q/%q to desiredStateOfWorld. %v",
@ -345,7 +347,7 @@ func (adc *attachDetachController) processPodVolumes(
continue continue
} }
adc.desiredStateOfWorld.DeletePod( adc.desiredStateOfWorld.DeletePod(
uniquePodName, uniqueVolumeName, pod.Spec.NodeName) uniquePodName, uniqueVolumeName, nodeName)
} }
} }
@ -516,7 +518,7 @@ func (adc *attachDetachController) getPVSpecFromCache(
// corresponding volume in the actual state of the world to indicate that it is // corresponding volume in the actual state of the world to indicate that it is
// mounted. // mounted.
func (adc *attachDetachController) processVolumesInUse( func (adc *attachDetachController) processVolumesInUse(
nodeName string, volumesInUse []api.UniqueVolumeName) { nodeName types.NodeName, volumesInUse []api.UniqueVolumeName) {
glog.V(4).Infof("processVolumesInUse for node %q", nodeName) glog.V(4).Infof("processVolumesInUse for node %q", nodeName)
for _, attachedVolume := range adc.actualStateOfWorld.GetAttachedVolumesForNode(nodeName) { for _, attachedVolume := range adc.actualStateOfWorld.GetAttachedVolumesForNode(nodeName) {
mounted := false mounted := false

View File

@ -29,6 +29,7 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/operationexecutor" "k8s.io/kubernetes/pkg/volume/util/operationexecutor"
"k8s.io/kubernetes/pkg/volume/util/volumehelper" "k8s.io/kubernetes/pkg/volume/util/volumehelper"
@ -55,7 +56,7 @@ type ActualStateOfWorld interface {
// added. // added.
// If no node with the name nodeName exists in list of attached nodes for // If no node with the name nodeName exists in list of attached nodes for
// the specified volume, the node is added. // the specified volume, the node is added.
AddVolumeNode(volumeSpec *volume.Spec, nodeName string, devicePath string) (api.UniqueVolumeName, error) AddVolumeNode(volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) (api.UniqueVolumeName, error)
// SetVolumeMountedByNode sets the MountedByNode value for the given volume // SetVolumeMountedByNode sets the MountedByNode value for the given volume
// and node. When set to true this value indicates the volume is mounted by // and node. When set to true this value indicates the volume is mounted by
@ -64,23 +65,23 @@ type ActualStateOfWorld interface {
// returned. // returned.
// If no node with the name nodeName exists in list of attached nodes for // If no node with the name nodeName exists in list of attached nodes for
// the specified volume, an error is returned. // the specified volume, an error is returned.
SetVolumeMountedByNode(volumeName api.UniqueVolumeName, nodeName string, mounted bool) error SetVolumeMountedByNode(volumeName api.UniqueVolumeName, nodeName types.NodeName, mounted bool) error
// SetNodeStatusUpdateNeeded sets statusUpdateNeeded for the specified // SetNodeStatusUpdateNeeded sets statusUpdateNeeded for the specified
// node to true indicating the AttachedVolume field in the Node's Status // node to true indicating the AttachedVolume field in the Node's Status
// object needs to be updated by the node updater again. // object needs to be updated by the node updater again.
// If the specifed node does not exist in the nodesToUpdateStatusFor list, // If the specifed node does not exist in the nodesToUpdateStatusFor list,
// log the error and return // log the error and return
SetNodeStatusUpdateNeeded(nodeName string) SetNodeStatusUpdateNeeded(nodeName types.NodeName)
// ResetDetachRequestTime resets the detachRequestTime to 0 which indicates there is no detach // ResetDetachRequestTime resets the detachRequestTime to 0 which indicates there is no detach
// request any more for the volume // request any more for the volume
ResetDetachRequestTime(volumeName api.UniqueVolumeName, nodeName string) ResetDetachRequestTime(volumeName api.UniqueVolumeName, nodeName types.NodeName)
// SetDetachRequestTime sets the detachRequestedTime to current time if this is no // SetDetachRequestTime sets the detachRequestedTime to current time if this is no
// previous request (the previous detachRequestedTime is zero) and return the time elapsed // previous request (the previous detachRequestedTime is zero) and return the time elapsed
// since last request // since last request
SetDetachRequestTime(volumeName api.UniqueVolumeName, nodeName string) (time.Duration, error) SetDetachRequestTime(volumeName api.UniqueVolumeName, nodeName types.NodeName) (time.Duration, error)
// DeleteVolumeNode removes the given volume and node from the underlying // DeleteVolumeNode removes the given volume and node from the underlying
// store indicating the specified volume is no longer attached to the // store indicating the specified volume is no longer attached to the
@ -88,12 +89,12 @@ type ActualStateOfWorld interface {
// If the volume/node combo does not exist, this is a no-op. // If the volume/node combo does not exist, this is a no-op.
// If after deleting the node, the specified volume contains no other child // If after deleting the node, the specified volume contains no other child
// nodes, the volume is also deleted. // nodes, the volume is also deleted.
DeleteVolumeNode(volumeName api.UniqueVolumeName, nodeName string) DeleteVolumeNode(volumeName api.UniqueVolumeName, nodeName types.NodeName)
// VolumeNodeExists returns true if the specified volume/node combo exists // VolumeNodeExists returns true if the specified volume/node combo exists
// in the underlying store indicating the specified volume is attached to // in the underlying store indicating the specified volume is attached to
// the specified node. // the specified node.
VolumeNodeExists(volumeName api.UniqueVolumeName, nodeName string) bool VolumeNodeExists(volumeName api.UniqueVolumeName, nodeName types.NodeName) bool
// GetAttachedVolumes generates and returns a list of volumes/node pairs // GetAttachedVolumes generates and returns a list of volumes/node pairs
// reflecting which volumes are attached to which nodes based on the // reflecting which volumes are attached to which nodes based on the
@ -103,7 +104,7 @@ type ActualStateOfWorld interface {
// GetAttachedVolumes generates and returns a list of volumes attached to // GetAttachedVolumes generates and returns a list of volumes attached to
// the specified node reflecting which volumes are attached to that node // the specified node reflecting which volumes are attached to that node
// based on the current actual state of the world. // based on the current actual state of the world.
GetAttachedVolumesForNode(nodeName string) []AttachedVolume GetAttachedVolumesForNode(nodeName types.NodeName) []AttachedVolume
// GetVolumesToReportAttached returns a map containing the set of nodes for // GetVolumesToReportAttached returns a map containing the set of nodes for
// which the VolumesAttached Status field in the Node API object should be // which the VolumesAttached Status field in the Node API object should be
@ -112,7 +113,7 @@ type ActualStateOfWorld interface {
// this may differ from the actual list of attached volumes for the node // this may differ from the actual list of attached volumes for the node
// since volumes should be removed from this list as soon a detach operation // since volumes should be removed from this list as soon a detach operation
// is considered, before the detach operation is triggered). // is considered, before the detach operation is triggered).
GetVolumesToReportAttached() map[string][]api.AttachedVolume GetVolumesToReportAttached() map[types.NodeName][]api.AttachedVolume
} }
// AttachedVolume represents a volume that is attached to a node. // AttachedVolume represents a volume that is attached to a node.
@ -136,7 +137,7 @@ type AttachedVolume struct {
func NewActualStateOfWorld(volumePluginMgr *volume.VolumePluginMgr) ActualStateOfWorld { func NewActualStateOfWorld(volumePluginMgr *volume.VolumePluginMgr) ActualStateOfWorld {
return &actualStateOfWorld{ return &actualStateOfWorld{
attachedVolumes: make(map[api.UniqueVolumeName]attachedVolume), attachedVolumes: make(map[api.UniqueVolumeName]attachedVolume),
nodesToUpdateStatusFor: make(map[string]nodeToUpdateStatusFor), nodesToUpdateStatusFor: make(map[types.NodeName]nodeToUpdateStatusFor),
volumePluginMgr: volumePluginMgr, volumePluginMgr: volumePluginMgr,
} }
} }
@ -152,7 +153,7 @@ type actualStateOfWorld struct {
// update the VolumesAttached Status field. The key in this map is the name // update the VolumesAttached Status field. The key in this map is the name
// of the node and the value is an object containing more information about // of the node and the value is an object containing more information about
// the node (including the list of volumes to report attached). // the node (including the list of volumes to report attached).
nodesToUpdateStatusFor map[string]nodeToUpdateStatusFor nodesToUpdateStatusFor map[types.NodeName]nodeToUpdateStatusFor
// volumePluginMgr is the volume plugin manager used to create volume // volumePluginMgr is the volume plugin manager used to create volume
// plugin objects. // plugin objects.
@ -176,7 +177,7 @@ type attachedVolume struct {
// successfully been attached to. The key in this map is the name of the // successfully been attached to. The key in this map is the name of the
// node and the value is a node object containing more information about // node and the value is a node object containing more information about
// the node. // the node.
nodesAttachedTo map[string]nodeAttachedTo nodesAttachedTo map[types.NodeName]nodeAttachedTo
// devicePath contains the path on the node where the volume is attached // devicePath contains the path on the node where the volume is attached
devicePath string devicePath string
@ -185,7 +186,7 @@ type attachedVolume struct {
// The nodeAttachedTo object represents a node that has volumes attached to it. // The nodeAttachedTo object represents a node that has volumes attached to it.
type nodeAttachedTo struct { type nodeAttachedTo struct {
// nodeName contains the name of this node. // nodeName contains the name of this node.
nodeName string nodeName types.NodeName
// mountedByNode indicates that this node/volume combo is mounted by the // mountedByNode indicates that this node/volume combo is mounted by the
// node and is unsafe to detach // node and is unsafe to detach
@ -206,7 +207,7 @@ type nodeAttachedTo struct {
// attached in the Node's Status API object. // attached in the Node's Status API object.
type nodeToUpdateStatusFor struct { type nodeToUpdateStatusFor struct {
// nodeName contains the name of this node. // nodeName contains the name of this node.
nodeName string nodeName types.NodeName
// statusUpdateNeeded indicates that the value of the VolumesAttached field // statusUpdateNeeded indicates that the value of the VolumesAttached field
// in the Node's Status API object should be updated. This should be set to // in the Node's Status API object should be updated. This should be set to
@ -224,32 +225,32 @@ type nodeToUpdateStatusFor struct {
} }
func (asw *actualStateOfWorld) MarkVolumeAsAttached( func (asw *actualStateOfWorld) MarkVolumeAsAttached(
_ api.UniqueVolumeName, volumeSpec *volume.Spec, nodeName string, devicePath string) error { _ api.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) error {
_, err := asw.AddVolumeNode(volumeSpec, nodeName, devicePath) _, err := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
return err return err
} }
func (asw *actualStateOfWorld) MarkVolumeAsDetached( func (asw *actualStateOfWorld) MarkVolumeAsDetached(
volumeName api.UniqueVolumeName, nodeName string) { volumeName api.UniqueVolumeName, nodeName types.NodeName) {
asw.DeleteVolumeNode(volumeName, nodeName) asw.DeleteVolumeNode(volumeName, nodeName)
} }
func (asw *actualStateOfWorld) RemoveVolumeFromReportAsAttached( func (asw *actualStateOfWorld) RemoveVolumeFromReportAsAttached(
volumeName api.UniqueVolumeName, nodeName string) error { volumeName api.UniqueVolumeName, nodeName types.NodeName) error {
asw.Lock() asw.Lock()
defer asw.Unlock() defer asw.Unlock()
return asw.removeVolumeFromReportAsAttached(volumeName, nodeName) return asw.removeVolumeFromReportAsAttached(volumeName, nodeName)
} }
func (asw *actualStateOfWorld) AddVolumeToReportAsAttached( func (asw *actualStateOfWorld) AddVolumeToReportAsAttached(
volumeName api.UniqueVolumeName, nodeName string) { volumeName api.UniqueVolumeName, nodeName types.NodeName) {
asw.Lock() asw.Lock()
defer asw.Unlock() defer asw.Unlock()
asw.addVolumeToReportAsAttached(volumeName, nodeName) asw.addVolumeToReportAsAttached(volumeName, nodeName)
} }
func (asw *actualStateOfWorld) AddVolumeNode( func (asw *actualStateOfWorld) AddVolumeNode(
volumeSpec *volume.Spec, nodeName string, devicePath string) (api.UniqueVolumeName, error) { volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) (api.UniqueVolumeName, error) {
asw.Lock() asw.Lock()
defer asw.Unlock() defer asw.Unlock()
@ -275,7 +276,7 @@ func (asw *actualStateOfWorld) AddVolumeNode(
volumeObj = attachedVolume{ volumeObj = attachedVolume{
volumeName: volumeName, volumeName: volumeName,
spec: volumeSpec, spec: volumeSpec,
nodesAttachedTo: make(map[string]nodeAttachedTo), nodesAttachedTo: make(map[types.NodeName]nodeAttachedTo),
devicePath: devicePath, devicePath: devicePath,
} }
asw.attachedVolumes[volumeName] = volumeObj asw.attachedVolumes[volumeName] = volumeObj
@ -301,7 +302,7 @@ func (asw *actualStateOfWorld) AddVolumeNode(
} }
func (asw *actualStateOfWorld) SetVolumeMountedByNode( func (asw *actualStateOfWorld) SetVolumeMountedByNode(
volumeName api.UniqueVolumeName, nodeName string, mounted bool) error { volumeName api.UniqueVolumeName, nodeName types.NodeName, mounted bool) error {
asw.Lock() asw.Lock()
defer asw.Unlock() defer asw.Unlock()
@ -330,7 +331,7 @@ func (asw *actualStateOfWorld) SetVolumeMountedByNode(
} }
func (asw *actualStateOfWorld) ResetDetachRequestTime( func (asw *actualStateOfWorld) ResetDetachRequestTime(
volumeName api.UniqueVolumeName, nodeName string) { volumeName api.UniqueVolumeName, nodeName types.NodeName) {
asw.Lock() asw.Lock()
defer asw.Unlock() defer asw.Unlock()
@ -344,7 +345,7 @@ func (asw *actualStateOfWorld) ResetDetachRequestTime(
} }
func (asw *actualStateOfWorld) SetDetachRequestTime( func (asw *actualStateOfWorld) SetDetachRequestTime(
volumeName api.UniqueVolumeName, nodeName string) (time.Duration, error) { volumeName api.UniqueVolumeName, nodeName types.NodeName) (time.Duration, error) {
asw.Lock() asw.Lock()
defer asw.Unlock() defer asw.Unlock()
@ -366,7 +367,7 @@ func (asw *actualStateOfWorld) SetDetachRequestTime(
// Get the volume and node object from actual state of world // Get the volume and node object from actual state of world
// This is an internal function and caller should acquire and release the lock // This is an internal function and caller should acquire and release the lock
func (asw *actualStateOfWorld) getNodeAndVolume( func (asw *actualStateOfWorld) getNodeAndVolume(
volumeName api.UniqueVolumeName, nodeName string) (attachedVolume, nodeAttachedTo, error) { volumeName api.UniqueVolumeName, nodeName types.NodeName) (attachedVolume, nodeAttachedTo, error) {
volumeObj, volumeExists := asw.attachedVolumes[volumeName] volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if volumeExists { if volumeExists {
@ -384,7 +385,7 @@ func (asw *actualStateOfWorld) getNodeAndVolume(
// Remove the volumeName from the node's volumesToReportAsAttached list // Remove the volumeName from the node's volumesToReportAsAttached list
// This is an internal function and caller should acquire and release the lock // This is an internal function and caller should acquire and release the lock
func (asw *actualStateOfWorld) removeVolumeFromReportAsAttached( func (asw *actualStateOfWorld) removeVolumeFromReportAsAttached(
volumeName api.UniqueVolumeName, nodeName string) error { volumeName api.UniqueVolumeName, nodeName types.NodeName) error {
nodeToUpdate, nodeToUpdateExists := asw.nodesToUpdateStatusFor[nodeName] nodeToUpdate, nodeToUpdateExists := asw.nodesToUpdateStatusFor[nodeName]
if nodeToUpdateExists { if nodeToUpdateExists {
@ -406,7 +407,7 @@ func (asw *actualStateOfWorld) removeVolumeFromReportAsAttached(
// Add the volumeName to the node's volumesToReportAsAttached list // Add the volumeName to the node's volumesToReportAsAttached list
// This is an internal function and caller should acquire and release the lock // This is an internal function and caller should acquire and release the lock
func (asw *actualStateOfWorld) addVolumeToReportAsAttached( func (asw *actualStateOfWorld) addVolumeToReportAsAttached(
volumeName api.UniqueVolumeName, nodeName string) { volumeName api.UniqueVolumeName, nodeName types.NodeName) {
// In case the volume/node entry is no longer in attachedVolume list, skip the rest // In case the volume/node entry is no longer in attachedVolume list, skip the rest
if _, _, err := asw.getNodeAndVolume(volumeName, nodeName); err != nil { if _, _, err := asw.getNodeAndVolume(volumeName, nodeName); err != nil {
glog.V(4).Infof("Volume %q is no longer attached to node %q", volumeName, nodeName) glog.V(4).Infof("Volume %q is no longer attached to node %q", volumeName, nodeName)
@ -437,7 +438,7 @@ func (asw *actualStateOfWorld) addVolumeToReportAsAttached(
// needs to be updated again by the node status updater. // needs to be updated again by the node status updater.
// If the specifed node does not exist in the nodesToUpdateStatusFor list, log the error and return // If the specifed node does not exist in the nodesToUpdateStatusFor list, log the error and return
// This is an internal function and caller should acquire and release the lock // This is an internal function and caller should acquire and release the lock
func (asw *actualStateOfWorld) updateNodeStatusUpdateNeeded(nodeName string, needed bool) { func (asw *actualStateOfWorld) updateNodeStatusUpdateNeeded(nodeName types.NodeName, needed bool) {
nodeToUpdate, nodeToUpdateExists := asw.nodesToUpdateStatusFor[nodeName] nodeToUpdate, nodeToUpdateExists := asw.nodesToUpdateStatusFor[nodeName]
if !nodeToUpdateExists { if !nodeToUpdateExists {
// should not happen // should not happen
@ -451,14 +452,14 @@ func (asw *actualStateOfWorld) updateNodeStatusUpdateNeeded(nodeName string, nee
asw.nodesToUpdateStatusFor[nodeName] = nodeToUpdate asw.nodesToUpdateStatusFor[nodeName] = nodeToUpdate
} }
func (asw *actualStateOfWorld) SetNodeStatusUpdateNeeded(nodeName string) { func (asw *actualStateOfWorld) SetNodeStatusUpdateNeeded(nodeName types.NodeName) {
asw.Lock() asw.Lock()
defer asw.Unlock() defer asw.Unlock()
asw.updateNodeStatusUpdateNeeded(nodeName, true) asw.updateNodeStatusUpdateNeeded(nodeName, true)
} }
func (asw *actualStateOfWorld) DeleteVolumeNode( func (asw *actualStateOfWorld) DeleteVolumeNode(
volumeName api.UniqueVolumeName, nodeName string) { volumeName api.UniqueVolumeName, nodeName types.NodeName) {
asw.Lock() asw.Lock()
defer asw.Unlock() defer asw.Unlock()
@ -481,7 +482,7 @@ func (asw *actualStateOfWorld) DeleteVolumeNode(
} }
func (asw *actualStateOfWorld) VolumeNodeExists( func (asw *actualStateOfWorld) VolumeNodeExists(
volumeName api.UniqueVolumeName, nodeName string) bool { volumeName api.UniqueVolumeName, nodeName types.NodeName) bool {
asw.RLock() asw.RLock()
defer asw.RUnlock() defer asw.RUnlock()
@ -512,7 +513,7 @@ func (asw *actualStateOfWorld) GetAttachedVolumes() []AttachedVolume {
} }
func (asw *actualStateOfWorld) GetAttachedVolumesForNode( func (asw *actualStateOfWorld) GetAttachedVolumesForNode(
nodeName string) []AttachedVolume { nodeName types.NodeName) []AttachedVolume {
asw.RLock() asw.RLock()
defer asw.RUnlock() defer asw.RUnlock()
@ -531,11 +532,11 @@ func (asw *actualStateOfWorld) GetAttachedVolumesForNode(
return attachedVolumes return attachedVolumes
} }
func (asw *actualStateOfWorld) GetVolumesToReportAttached() map[string][]api.AttachedVolume { func (asw *actualStateOfWorld) GetVolumesToReportAttached() map[types.NodeName][]api.AttachedVolume {
asw.RLock() asw.RLock()
defer asw.RUnlock() defer asw.RUnlock()
volumesToReportAttached := make(map[string][]api.AttachedVolume) volumesToReportAttached := make(map[types.NodeName][]api.AttachedVolume)
for nodeName, nodeToUpdateObj := range asw.nodesToUpdateStatusFor { for nodeName, nodeToUpdateObj := range asw.nodesToUpdateStatusFor {
if nodeToUpdateObj.statusUpdateNeeded { if nodeToUpdateObj.statusUpdateNeeded {
attachedVolumes := make( attachedVolumes := make(

View File

@ -22,6 +22,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing" controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing"
"k8s.io/kubernetes/pkg/types"
volumetesting "k8s.io/kubernetes/pkg/volume/testing" volumetesting "k8s.io/kubernetes/pkg/volume/testing"
) )
@ -34,7 +35,7 @@ func Test_AddVolumeNode_Positive_NewVolumeNewNode(t *testing.T) {
volumeName := api.UniqueVolumeName("volume-name") volumeName := api.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := "node-name" nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
// Act // Act
@ -66,8 +67,8 @@ func Test_AddVolumeNode_Positive_ExistingVolumeNewNode(t *testing.T) {
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := api.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
node1Name := "node1-name" node1Name := types.NodeName("node1-name")
node2Name := "node2-name" node2Name := types.NodeName("node2-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
// Act // Act
@ -116,7 +117,7 @@ func Test_AddVolumeNode_Positive_ExistingVolumeExistingNode(t *testing.T) {
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := api.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := "node-name" nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
// Act // Act
@ -160,7 +161,7 @@ func Test_DeleteVolumeNode_Positive_VolumeExistsNodeExists(t *testing.T) {
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := api.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := "node-name" nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath) generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
if addErr != nil { if addErr != nil {
@ -189,7 +190,7 @@ func Test_DeleteVolumeNode_Positive_VolumeDoesntExistNodeDoesntExist(t *testing.
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := api.UniqueVolumeName("volume-name")
nodeName := "node-name" nodeName := types.NodeName("node-name")
// Act // Act
asw.DeleteVolumeNode(volumeName, nodeName) asw.DeleteVolumeNode(volumeName, nodeName)
@ -216,8 +217,8 @@ func Test_DeleteVolumeNode_Positive_TwoNodesOneDeleted(t *testing.T) {
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := api.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
node1Name := "node1-name" node1Name := types.NodeName("node1-name")
node2Name := "node2-name" node2Name := types.NodeName("node2-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
generatedVolumeName1, add1Err := asw.AddVolumeNode(volumeSpec, node1Name, devicePath) generatedVolumeName1, add1Err := asw.AddVolumeNode(volumeSpec, node1Name, devicePath)
if add1Err != nil { if add1Err != nil {
@ -265,7 +266,7 @@ func Test_VolumeNodeExists_Positive_VolumeExistsNodeExists(t *testing.T) {
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := api.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := "node-name" nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath) generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
if addErr != nil { if addErr != nil {
@ -297,8 +298,8 @@ func Test_VolumeNodeExists_Positive_VolumeExistsNodeDoesntExist(t *testing.T) {
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := api.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
node1Name := "node1-name" node1Name := types.NodeName("node1-name")
node2Name := "node2-name" node2Name := types.NodeName("node2-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, node1Name, devicePath) generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, node1Name, devicePath)
if addErr != nil { if addErr != nil {
@ -328,7 +329,7 @@ func Test_VolumeNodeExists_Positive_VolumeAndNodeDontExist(t *testing.T) {
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := api.UniqueVolumeName("volume-name")
nodeName := "node-name" nodeName := types.NodeName("node-name")
// Act // Act
volumeNodeComboExists := asw.VolumeNodeExists(volumeName, nodeName) volumeNodeComboExists := asw.VolumeNodeExists(volumeName, nodeName)
@ -369,7 +370,7 @@ func Test_GetAttachedVolumes_Positive_OneVolumeOneNode(t *testing.T) {
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := api.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := "node-name" nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath) generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
if addErr != nil { if addErr != nil {
@ -396,7 +397,7 @@ func Test_GetAttachedVolumes_Positive_TwoVolumeTwoNodes(t *testing.T) {
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volume1Name := api.UniqueVolumeName("volume1-name") volume1Name := api.UniqueVolumeName("volume1-name")
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name) volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
node1Name := "node1-name" node1Name := types.NodeName("node1-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
generatedVolumeName1, add1Err := asw.AddVolumeNode(volume1Spec, node1Name, devicePath) generatedVolumeName1, add1Err := asw.AddVolumeNode(volume1Spec, node1Name, devicePath)
if add1Err != nil { if add1Err != nil {
@ -404,7 +405,7 @@ func Test_GetAttachedVolumes_Positive_TwoVolumeTwoNodes(t *testing.T) {
} }
volume2Name := api.UniqueVolumeName("volume2-name") volume2Name := api.UniqueVolumeName("volume2-name")
volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name) volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name)
node2Name := "node2-name" node2Name := types.NodeName("node2-name")
generatedVolumeName2, add2Err := asw.AddVolumeNode(volume2Spec, node2Name, devicePath) generatedVolumeName2, add2Err := asw.AddVolumeNode(volume2Spec, node2Name, devicePath)
if add2Err != nil { if add2Err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add2Err) t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add2Err)
@ -431,13 +432,13 @@ func Test_GetAttachedVolumes_Positive_OneVolumeTwoNodes(t *testing.T) {
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := api.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
node1Name := "node1-name" node1Name := types.NodeName("node1-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
generatedVolumeName1, add1Err := asw.AddVolumeNode(volumeSpec, node1Name, devicePath) generatedVolumeName1, add1Err := asw.AddVolumeNode(volumeSpec, node1Name, devicePath)
if add1Err != nil { if add1Err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add1Err) t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add1Err)
} }
node2Name := "node2-name" node2Name := types.NodeName("node2-name")
generatedVolumeName2, add2Err := asw.AddVolumeNode(volumeSpec, node2Name, devicePath) generatedVolumeName2, add2Err := asw.AddVolumeNode(volumeSpec, node2Name, devicePath)
if add2Err != nil { if add2Err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add2Err) t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add2Err)
@ -470,7 +471,7 @@ func Test_SetVolumeMountedByNode_Positive_Set(t *testing.T) {
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := api.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := "node-name" nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath) generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
if addErr != nil { if addErr != nil {
@ -497,7 +498,7 @@ func Test_SetVolumeMountedByNode_Positive_UnsetWithInitialSet(t *testing.T) {
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := api.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := "node-name" nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath) generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
if addErr != nil { if addErr != nil {
@ -533,7 +534,7 @@ func Test_SetVolumeMountedByNode_Positive_UnsetWithoutInitialSet(t *testing.T) {
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := api.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := "node-name" nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath) generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
if addErr != nil { if addErr != nil {
@ -566,7 +567,7 @@ func Test_SetVolumeMountedByNode_Positive_UnsetWithInitialSetAddVolumeNodeNotRes
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := api.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := "node-name" nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath) generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
if addErr != nil { if addErr != nil {
@ -607,7 +608,7 @@ func Test_SetVolumeMountedByNode_Positive_UnsetWithInitialSetVerifyDetachRequest
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := api.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := "node-name" nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath) generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
if addErr != nil { if addErr != nil {
@ -655,7 +656,7 @@ func Test_RemoveVolumeFromReportAsAttached_Positive_Set(t *testing.T) {
volumeName := api.UniqueVolumeName("volume-name") volumeName := api.UniqueVolumeName("volume-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := "node-name" nodeName := types.NodeName("node-name")
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath) generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
if addErr != nil { if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr) t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
@ -681,7 +682,7 @@ func Test_RemoveVolumeFromReportAsAttached_Positive_Marked(t *testing.T) {
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := api.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := "node-name" nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath) generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
if addErr != nil { if addErr != nil {
@ -717,7 +718,7 @@ func Test_MarkDesireToDetach_Positive_MarkedAddVolumeNodeReset(t *testing.T) {
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := api.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := "node-name" nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath) generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
if addErr != nil { if addErr != nil {
@ -760,7 +761,7 @@ func Test_RemoveVolumeFromReportAsAttached_Positive_UnsetWithInitialSetVolumeMou
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := api.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := "node-name" nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath) generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
if addErr != nil { if addErr != nil {
@ -803,7 +804,7 @@ func Test_RemoveVolumeFromReportAsAttached(t *testing.T) {
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := api.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := "node-name" nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath) generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
if addErr != nil { if addErr != nil {
@ -836,7 +837,7 @@ func Test_RemoveVolumeFromReportAsAttached_AddVolumeToReportAsAttached_Positive(
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := api.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := "node-name" nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath) generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
if addErr != nil { if addErr != nil {
@ -879,7 +880,7 @@ func Test_RemoveVolumeFromReportAsAttached_Delete_AddVolumeNode(t *testing.T) {
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := api.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := "node-name" nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath) generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
if addErr != nil { if addErr != nil {
@ -924,7 +925,7 @@ func Test_SetDetachRequestTime_Positive(t *testing.T) {
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := api.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := "node-name" nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath) generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
if addErr != nil { if addErr != nil {
@ -955,7 +956,7 @@ func verifyAttachedVolume(
attachedVolumes []AttachedVolume, attachedVolumes []AttachedVolume,
expectedVolumeName api.UniqueVolumeName, expectedVolumeName api.UniqueVolumeName,
expectedVolumeSpecName string, expectedVolumeSpecName string,
expectedNodeName string, expectedNodeName types.NodeName,
expectedMountedByNode, expectedMountedByNode,
expectNonZeroDetachRequestedTime bool) { expectNonZeroDetachRequestedTime bool) {
for _, attachedVolume := range attachedVolumes { for _, attachedVolume := range attachedVolumes {
@ -981,7 +982,7 @@ func Test_GetAttachedVolumesForNode_Positive_NoVolumesOrNodes(t *testing.T) {
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
node := "random" node := types.NodeName("random")
// Act // Act
attachedVolumes := asw.GetAttachedVolumesForNode(node) attachedVolumes := asw.GetAttachedVolumesForNode(node)
@ -998,7 +999,7 @@ func Test_GetAttachedVolumesForNode_Positive_OneVolumeOneNode(t *testing.T) {
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := api.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := "node-name" nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath) generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
if addErr != nil { if addErr != nil {
@ -1022,7 +1023,7 @@ func Test_GetAttachedVolumesForNode_Positive_TwoVolumeTwoNodes(t *testing.T) {
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volume1Name := api.UniqueVolumeName("volume1-name") volume1Name := api.UniqueVolumeName("volume1-name")
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name) volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
node1Name := "node1-name" node1Name := types.NodeName("node1-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
_, add1Err := asw.AddVolumeNode(volume1Spec, node1Name, devicePath) _, add1Err := asw.AddVolumeNode(volume1Spec, node1Name, devicePath)
if add1Err != nil { if add1Err != nil {
@ -1030,7 +1031,7 @@ func Test_GetAttachedVolumesForNode_Positive_TwoVolumeTwoNodes(t *testing.T) {
} }
volume2Name := api.UniqueVolumeName("volume2-name") volume2Name := api.UniqueVolumeName("volume2-name")
volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name) volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name)
node2Name := "node2-name" node2Name := types.NodeName("node2-name")
generatedVolumeName2, add2Err := asw.AddVolumeNode(volume2Spec, node2Name, devicePath) generatedVolumeName2, add2Err := asw.AddVolumeNode(volume2Spec, node2Name, devicePath)
if add2Err != nil { if add2Err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add2Err) t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add2Err)
@ -1053,13 +1054,13 @@ func Test_GetAttachedVolumesForNode_Positive_OneVolumeTwoNodes(t *testing.T) {
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := api.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
node1Name := "node1-name" node1Name := types.NodeName("node1-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
generatedVolumeName1, add1Err := asw.AddVolumeNode(volumeSpec, node1Name, devicePath) generatedVolumeName1, add1Err := asw.AddVolumeNode(volumeSpec, node1Name, devicePath)
if add1Err != nil { if add1Err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add1Err) t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add1Err)
} }
node2Name := "node2-name" node2Name := types.NodeName("node2-name")
generatedVolumeName2, add2Err := asw.AddVolumeNode(volumeSpec, node2Name, devicePath) generatedVolumeName2, add2Err := asw.AddVolumeNode(volumeSpec, node2Name, devicePath)
if add2Err != nil { if add2Err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add2Err) t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add2Err)

View File

@ -26,6 +26,7 @@ import (
"sync" "sync"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
k8stypes "k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/operationexecutor" "k8s.io/kubernetes/pkg/volume/util/operationexecutor"
"k8s.io/kubernetes/pkg/volume/util/types" "k8s.io/kubernetes/pkg/volume/util/types"
@ -45,7 +46,7 @@ type DesiredStateOfWorld interface {
// AddNode adds the given node to the list of nodes managed by the attach/ // AddNode adds the given node to the list of nodes managed by the attach/
// detach controller. // detach controller.
// If the node already exists this is a no-op. // If the node already exists this is a no-op.
AddNode(nodeName string) AddNode(nodeName k8stypes.NodeName)
// AddPod adds the given pod to the list of pods that reference the // AddPod adds the given pod to the list of pods that reference the
// specified volume and is scheduled to the specified node. // specified volume and is scheduled to the specified node.
@ -57,13 +58,13 @@ type DesiredStateOfWorld interface {
// should be attached to the specified node, the volume is implicitly added. // should be attached to the specified node, the volume is implicitly added.
// If no node with the name nodeName exists in list of nodes managed by the // If no node with the name nodeName exists in list of nodes managed by the
// attach/detach attached controller, an error is returned. // attach/detach attached controller, an error is returned.
AddPod(podName types.UniquePodName, pod *api.Pod, volumeSpec *volume.Spec, nodeName string) (api.UniqueVolumeName, error) AddPod(podName types.UniquePodName, pod *api.Pod, volumeSpec *volume.Spec, nodeName k8stypes.NodeName) (api.UniqueVolumeName, error)
// DeleteNode removes the given node from the list of nodes managed by the // DeleteNode removes the given node from the list of nodes managed by the
// attach/detach controller. // attach/detach controller.
// If the node does not exist this is a no-op. // If the node does not exist this is a no-op.
// If the node exists but has 1 or more child volumes, an error is returned. // If the node exists but has 1 or more child volumes, an error is returned.
DeleteNode(nodeName string) error DeleteNode(nodeName k8stypes.NodeName) error
// DeletePod removes the given pod from the list of pods that reference the // DeletePod removes the given pod from the list of pods that reference the
// specified volume and are scheduled to the specified node. // specified volume and are scheduled to the specified node.
@ -75,16 +76,16 @@ type DesiredStateOfWorld interface {
// volumes under the specified node, this is a no-op. // volumes under the specified node, this is a no-op.
// If after deleting the pod, the specified volume contains no other child // If after deleting the pod, the specified volume contains no other child
// pods, the volume is also deleted. // pods, the volume is also deleted.
DeletePod(podName types.UniquePodName, volumeName api.UniqueVolumeName, nodeName string) DeletePod(podName types.UniquePodName, volumeName api.UniqueVolumeName, nodeName k8stypes.NodeName)
// NodeExists returns true if the node with the specified name exists in // NodeExists returns true if the node with the specified name exists in
// the list of nodes managed by the attach/detach controller. // the list of nodes managed by the attach/detach controller.
NodeExists(nodeName string) bool NodeExists(nodeName k8stypes.NodeName) bool
// VolumeExists returns true if the volume with the specified name exists // VolumeExists returns true if the volume with the specified name exists
// in the list of volumes that should be attached to the specified node by // in the list of volumes that should be attached to the specified node by
// the attach detach controller. // the attach detach controller.
VolumeExists(volumeName api.UniqueVolumeName, nodeName string) bool VolumeExists(volumeName api.UniqueVolumeName, nodeName k8stypes.NodeName) bool
// GetVolumesToAttach generates and returns a list of volumes to attach // GetVolumesToAttach generates and returns a list of volumes to attach
// and the nodes they should be attached to based on the current desired // and the nodes they should be attached to based on the current desired
@ -111,13 +112,13 @@ type PodToAdd struct {
VolumeName api.UniqueVolumeName VolumeName api.UniqueVolumeName
// nodeName contains the name of this node. // nodeName contains the name of this node.
NodeName string NodeName k8stypes.NodeName
} }
// NewDesiredStateOfWorld returns a new instance of DesiredStateOfWorld. // NewDesiredStateOfWorld returns a new instance of DesiredStateOfWorld.
func NewDesiredStateOfWorld(volumePluginMgr *volume.VolumePluginMgr) DesiredStateOfWorld { func NewDesiredStateOfWorld(volumePluginMgr *volume.VolumePluginMgr) DesiredStateOfWorld {
return &desiredStateOfWorld{ return &desiredStateOfWorld{
nodesManaged: make(map[string]nodeManaged), nodesManaged: make(map[k8stypes.NodeName]nodeManaged),
volumePluginMgr: volumePluginMgr, volumePluginMgr: volumePluginMgr,
} }
} }
@ -126,7 +127,7 @@ type desiredStateOfWorld struct {
// nodesManaged is a map containing the set of nodes managed by the attach/ // nodesManaged is a map containing the set of nodes managed by the attach/
// detach controller. The key in this map is the name of the node and the // detach controller. The key in this map is the name of the node and the
// value is a node object containing more information about the node. // value is a node object containing more information about the node.
nodesManaged map[string]nodeManaged nodesManaged map[k8stypes.NodeName]nodeManaged
// volumePluginMgr is the volume plugin manager used to create volume // volumePluginMgr is the volume plugin manager used to create volume
// plugin objects. // plugin objects.
volumePluginMgr *volume.VolumePluginMgr volumePluginMgr *volume.VolumePluginMgr
@ -137,7 +138,7 @@ type desiredStateOfWorld struct {
// controller. // controller.
type nodeManaged struct { type nodeManaged struct {
// nodeName contains the name of this node. // nodeName contains the name of this node.
nodeName string nodeName k8stypes.NodeName
// volumesToAttach is a map containing the set of volumes that should be // volumesToAttach is a map containing the set of volumes that should be
// attached to this node. The key in the map is the name of the volume and // attached to this node. The key in the map is the name of the volume and
@ -172,7 +173,7 @@ type pod struct {
podObj *api.Pod podObj *api.Pod
} }
func (dsw *desiredStateOfWorld) AddNode(nodeName string) { func (dsw *desiredStateOfWorld) AddNode(nodeName k8stypes.NodeName) {
dsw.Lock() dsw.Lock()
defer dsw.Unlock() defer dsw.Unlock()
@ -188,7 +189,7 @@ func (dsw *desiredStateOfWorld) AddPod(
podName types.UniquePodName, podName types.UniquePodName,
podToAdd *api.Pod, podToAdd *api.Pod,
volumeSpec *volume.Spec, volumeSpec *volume.Spec,
nodeName string) (api.UniqueVolumeName, error) { nodeName k8stypes.NodeName) (api.UniqueVolumeName, error) {
dsw.Lock() dsw.Lock()
defer dsw.Unlock() defer dsw.Unlock()
@ -236,7 +237,7 @@ func (dsw *desiredStateOfWorld) AddPod(
return volumeName, nil return volumeName, nil
} }
func (dsw *desiredStateOfWorld) DeleteNode(nodeName string) error { func (dsw *desiredStateOfWorld) DeleteNode(nodeName k8stypes.NodeName) error {
dsw.Lock() dsw.Lock()
defer dsw.Unlock() defer dsw.Unlock()
@ -261,7 +262,7 @@ func (dsw *desiredStateOfWorld) DeleteNode(nodeName string) error {
func (dsw *desiredStateOfWorld) DeletePod( func (dsw *desiredStateOfWorld) DeletePod(
podName types.UniquePodName, podName types.UniquePodName,
volumeName api.UniqueVolumeName, volumeName api.UniqueVolumeName,
nodeName string) { nodeName k8stypes.NodeName) {
dsw.Lock() dsw.Lock()
defer dsw.Unlock() defer dsw.Unlock()
@ -289,7 +290,7 @@ func (dsw *desiredStateOfWorld) DeletePod(
} }
} }
func (dsw *desiredStateOfWorld) NodeExists(nodeName string) bool { func (dsw *desiredStateOfWorld) NodeExists(nodeName k8stypes.NodeName) bool {
dsw.RLock() dsw.RLock()
defer dsw.RUnlock() defer dsw.RUnlock()
@ -298,7 +299,7 @@ func (dsw *desiredStateOfWorld) NodeExists(nodeName string) bool {
} }
func (dsw *desiredStateOfWorld) VolumeExists( func (dsw *desiredStateOfWorld) VolumeExists(
volumeName api.UniqueVolumeName, nodeName string) bool { volumeName api.UniqueVolumeName, nodeName k8stypes.NodeName) bool {
dsw.RLock() dsw.RLock()
defer dsw.RUnlock() defer dsw.RUnlock()

View File

@ -21,6 +21,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing" controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing"
k8stypes "k8s.io/kubernetes/pkg/types"
volumetesting "k8s.io/kubernetes/pkg/volume/testing" volumetesting "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/kubernetes/pkg/volume/util/types" "k8s.io/kubernetes/pkg/volume/util/types"
) )
@ -31,7 +32,7 @@ func Test_AddNode_Positive_NewNode(t *testing.T) {
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
dsw := NewDesiredStateOfWorld(volumePluginMgr) dsw := NewDesiredStateOfWorld(volumePluginMgr)
nodeName := "node-name" nodeName := k8stypes.NodeName("node-name")
// Act // Act
dsw.AddNode(nodeName) dsw.AddNode(nodeName)
@ -56,7 +57,7 @@ func Test_AddNode_Positive_ExistingNode(t *testing.T) {
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
dsw := NewDesiredStateOfWorld(volumePluginMgr) dsw := NewDesiredStateOfWorld(volumePluginMgr)
nodeName := "node-name" nodeName := k8stypes.NodeName("node-name")
// Act // Act
dsw.AddNode(nodeName) dsw.AddNode(nodeName)
@ -92,7 +93,7 @@ func Test_AddPod_Positive_NewPodNodeExistsVolumeDoesntExist(t *testing.T) {
dsw := NewDesiredStateOfWorld(volumePluginMgr) dsw := NewDesiredStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := api.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := "node-name" nodeName := k8stypes.NodeName("node-name")
dsw.AddNode(nodeName) dsw.AddNode(nodeName)
volumeExists := dsw.VolumeExists(volumeName, nodeName) volumeExists := dsw.VolumeExists(volumeName, nodeName)
if volumeExists { if volumeExists {
@ -140,7 +141,7 @@ func Test_AddPod_Positive_NewPodNodeExistsVolumeExists(t *testing.T) {
pod2Name := "pod2-uid" pod2Name := "pod2-uid"
volumeName := api.UniqueVolumeName("volume-name") volumeName := api.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := "node-name" nodeName := k8stypes.NodeName("node-name")
dsw.AddNode(nodeName) dsw.AddNode(nodeName)
volumeExists := dsw.VolumeExists(volumeName, nodeName) volumeExists := dsw.VolumeExists(volumeName, nodeName)
if volumeExists { if volumeExists {
@ -213,7 +214,7 @@ func Test_AddPod_Positive_PodExistsNodeExistsVolumeExists(t *testing.T) {
podName := "pod-uid" podName := "pod-uid"
volumeName := api.UniqueVolumeName("volume-name") volumeName := api.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := "node-name" nodeName := k8stypes.NodeName("node-name")
dsw.AddNode(nodeName) dsw.AddNode(nodeName)
volumeExists := dsw.VolumeExists(volumeName, nodeName) volumeExists := dsw.VolumeExists(volumeName, nodeName)
if volumeExists { if volumeExists {
@ -279,7 +280,7 @@ func Test_AddPod_Negative_NewPodNodeDoesntExistVolumeDoesntExist(t *testing.T) {
podName := "pod-uid" podName := "pod-uid"
volumeName := api.UniqueVolumeName("volume-name") volumeName := api.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := "node-name" nodeName := k8stypes.NodeName("node-name")
volumeExists := dsw.VolumeExists(volumeName, nodeName) volumeExists := dsw.VolumeExists(volumeName, nodeName)
if volumeExists { if volumeExists {
t.Fatalf( t.Fatalf(
@ -317,7 +318,7 @@ func Test_DeleteNode_Positive_NodeExists(t *testing.T) {
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
dsw := NewDesiredStateOfWorld(volumePluginMgr) dsw := NewDesiredStateOfWorld(volumePluginMgr)
nodeName := "node-name" nodeName := k8stypes.NodeName("node-name")
dsw.AddNode(nodeName) dsw.AddNode(nodeName)
// Act // Act
@ -345,7 +346,7 @@ func Test_DeleteNode_Positive_NodeDoesntExist(t *testing.T) {
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
dsw := NewDesiredStateOfWorld(volumePluginMgr) dsw := NewDesiredStateOfWorld(volumePluginMgr)
notAddedNodeName := "node-not-added-name" notAddedNodeName := k8stypes.NodeName("node-not-added-name")
// Act // Act
err := dsw.DeleteNode(notAddedNodeName) err := dsw.DeleteNode(notAddedNodeName)
@ -373,7 +374,7 @@ func Test_DeleteNode_Negative_NodeExistsHasChildVolumes(t *testing.T) {
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
dsw := NewDesiredStateOfWorld(volumePluginMgr) dsw := NewDesiredStateOfWorld(volumePluginMgr)
nodeName := "node-name" nodeName := k8stypes.NodeName("node-name")
dsw.AddNode(nodeName) dsw.AddNode(nodeName)
podName := "pod-uid" podName := "pod-uid"
volumeName := api.UniqueVolumeName("volume-name") volumeName := api.UniqueVolumeName("volume-name")
@ -417,7 +418,7 @@ func Test_DeletePod_Positive_PodExistsNodeExistsVolumeExists(t *testing.T) {
podName := "pod-uid" podName := "pod-uid"
volumeName := api.UniqueVolumeName("volume-name") volumeName := api.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := "node-name" nodeName := k8stypes.NodeName("node-name")
dsw.AddNode(nodeName) dsw.AddNode(nodeName)
generatedVolumeName, podAddErr := dsw.AddPod(types.UniquePodName(podName), controllervolumetesting.NewPod(podName, podName), volumeSpec, nodeName) generatedVolumeName, podAddErr := dsw.AddPod(types.UniquePodName(podName), controllervolumetesting.NewPod(podName, podName), volumeSpec, nodeName)
if podAddErr != nil { if podAddErr != nil {
@ -465,7 +466,7 @@ func Test_DeletePod_Positive_2PodsExistNodeExistsVolumesExist(t *testing.T) {
pod2Name := "pod2-uid" pod2Name := "pod2-uid"
volumeName := api.UniqueVolumeName("volume-name") volumeName := api.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := "node-name" nodeName := k8stypes.NodeName("node-name")
dsw.AddNode(nodeName) dsw.AddNode(nodeName)
generatedVolumeName1, pod1AddErr := dsw.AddPod(types.UniquePodName(pod1Name), controllervolumetesting.NewPod(pod1Name, pod1Name), volumeSpec, nodeName) generatedVolumeName1, pod1AddErr := dsw.AddPod(types.UniquePodName(pod1Name), controllervolumetesting.NewPod(pod1Name, pod1Name), volumeSpec, nodeName)
if pod1AddErr != nil { if pod1AddErr != nil {
@ -526,7 +527,7 @@ func Test_DeletePod_Positive_PodDoesNotExist(t *testing.T) {
pod2Name := "pod2-uid" pod2Name := "pod2-uid"
volumeName := api.UniqueVolumeName("volume-name") volumeName := api.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := "node-name" nodeName := k8stypes.NodeName("node-name")
dsw.AddNode(nodeName) dsw.AddNode(nodeName)
generatedVolumeName, pod1AddErr := dsw.AddPod(types.UniquePodName(pod1Name), controllervolumetesting.NewPod(pod1Name, pod1Name), volumeSpec, nodeName) generatedVolumeName, pod1AddErr := dsw.AddPod(types.UniquePodName(pod1Name), controllervolumetesting.NewPod(pod1Name, pod1Name), volumeSpec, nodeName)
if pod1AddErr != nil { if pod1AddErr != nil {
@ -574,7 +575,7 @@ func Test_DeletePod_Positive_NodeDoesNotExist(t *testing.T) {
podName := "pod-uid" podName := "pod-uid"
volumeName := api.UniqueVolumeName("volume-name") volumeName := api.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
node1Name := "node1-name" node1Name := k8stypes.NodeName("node1-name")
dsw.AddNode(node1Name) dsw.AddNode(node1Name)
generatedVolumeName, podAddErr := dsw.AddPod(types.UniquePodName(podName), controllervolumetesting.NewPod(podName, podName), volumeSpec, node1Name) generatedVolumeName, podAddErr := dsw.AddPod(types.UniquePodName(podName), controllervolumetesting.NewPod(podName, podName), volumeSpec, node1Name)
if podAddErr != nil { if podAddErr != nil {
@ -591,7 +592,7 @@ func Test_DeletePod_Positive_NodeDoesNotExist(t *testing.T) {
generatedVolumeName, generatedVolumeName,
node1Name) node1Name)
} }
node2Name := "node2-name" node2Name := k8stypes.NodeName("node2-name")
// Act // Act
dsw.DeletePod(types.UniquePodName(podName), generatedVolumeName, node2Name) dsw.DeletePod(types.UniquePodName(podName), generatedVolumeName, node2Name)
@ -629,7 +630,7 @@ func Test_DeletePod_Positive_VolumeDoesNotExist(t *testing.T) {
podName := "pod-uid" podName := "pod-uid"
volume1Name := api.UniqueVolumeName("volume1-name") volume1Name := api.UniqueVolumeName("volume1-name")
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name) volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
nodeName := "node-name" nodeName := k8stypes.NodeName("node-name")
dsw.AddNode(nodeName) dsw.AddNode(nodeName)
generatedVolume1Name, podAddErr := dsw.AddPod(types.UniquePodName(podName), controllervolumetesting.NewPod(podName, podName), volume1Spec, nodeName) generatedVolume1Name, podAddErr := dsw.AddPod(types.UniquePodName(podName), controllervolumetesting.NewPod(podName, podName), volume1Spec, nodeName)
if podAddErr != nil { if podAddErr != nil {
@ -680,7 +681,7 @@ func Test_NodeExists_Positive_NodeExists(t *testing.T) {
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
dsw := NewDesiredStateOfWorld(volumePluginMgr) dsw := NewDesiredStateOfWorld(volumePluginMgr)
notAddedNodeName := "node-not-added-name" notAddedNodeName := k8stypes.NodeName("node-not-added-name")
// Act // Act
notAddedNodeExists := dsw.NodeExists(notAddedNodeName) notAddedNodeExists := dsw.NodeExists(notAddedNodeName)
@ -703,7 +704,7 @@ func Test_NodeExists_Positive_NodeDoesntExist(t *testing.T) {
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
dsw := NewDesiredStateOfWorld(volumePluginMgr) dsw := NewDesiredStateOfWorld(volumePluginMgr)
nodeName := "node-name" nodeName := k8stypes.NodeName("node-name")
dsw.AddNode(nodeName) dsw.AddNode(nodeName)
// Act // Act
@ -727,7 +728,7 @@ func Test_VolumeExists_Positive_VolumeExistsNodeExists(t *testing.T) {
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
dsw := NewDesiredStateOfWorld(volumePluginMgr) dsw := NewDesiredStateOfWorld(volumePluginMgr)
nodeName := "node-name" nodeName := k8stypes.NodeName("node-name")
dsw.AddNode(nodeName) dsw.AddNode(nodeName)
podName := "pod-uid" podName := "pod-uid"
volumeName := api.UniqueVolumeName("volume-name") volumeName := api.UniqueVolumeName("volume-name")
@ -757,7 +758,7 @@ func Test_VolumeExists_Positive_VolumeDoesntExistNodeExists(t *testing.T) {
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
dsw := NewDesiredStateOfWorld(volumePluginMgr) dsw := NewDesiredStateOfWorld(volumePluginMgr)
nodeName := "node-name" nodeName := k8stypes.NodeName("node-name")
dsw.AddNode(nodeName) dsw.AddNode(nodeName)
podName := "pod-uid" podName := "pod-uid"
volume1Name := api.UniqueVolumeName("volume1-name") volume1Name := api.UniqueVolumeName("volume1-name")
@ -793,7 +794,7 @@ func Test_VolumeExists_Positive_VolumeDoesntExistNodeDoesntExists(t *testing.T)
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
dsw := NewDesiredStateOfWorld(volumePluginMgr) dsw := NewDesiredStateOfWorld(volumePluginMgr)
nodeName := "node-name" nodeName := k8stypes.NodeName("node-name")
volumeName := api.UniqueVolumeName("volume-name") volumeName := api.UniqueVolumeName("volume-name")
// Act // Act
@ -833,8 +834,8 @@ func Test_GetVolumesToAttach_Positive_TwoNodes(t *testing.T) {
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
dsw := NewDesiredStateOfWorld(volumePluginMgr) dsw := NewDesiredStateOfWorld(volumePluginMgr)
node1Name := "node1-name" node1Name := k8stypes.NodeName("node1-name")
node2Name := "node2-name" node2Name := k8stypes.NodeName("node2-name")
dsw.AddNode(node1Name) dsw.AddNode(node1Name)
dsw.AddNode(node2Name) dsw.AddNode(node2Name)
@ -854,7 +855,7 @@ func Test_GetVolumesToAttach_Positive_TwoNodesOneVolumeEach(t *testing.T) {
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
dsw := NewDesiredStateOfWorld(volumePluginMgr) dsw := NewDesiredStateOfWorld(volumePluginMgr)
node1Name := "node1-name" node1Name := k8stypes.NodeName("node1-name")
pod1Name := "pod1-uid" pod1Name := "pod1-uid"
volume1Name := api.UniqueVolumeName("volume1-name") volume1Name := api.UniqueVolumeName("volume1-name")
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name) volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
@ -866,7 +867,7 @@ func Test_GetVolumesToAttach_Positive_TwoNodesOneVolumeEach(t *testing.T) {
pod1Name, pod1Name,
podAddErr) podAddErr)
} }
node2Name := "node2-name" node2Name := k8stypes.NodeName("node2-name")
pod2Name := "pod2-uid" pod2Name := "pod2-uid"
volume2Name := api.UniqueVolumeName("volume2-name") volume2Name := api.UniqueVolumeName("volume2-name")
volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name) volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name)
@ -899,7 +900,7 @@ func Test_GetVolumesToAttach_Positive_TwoNodesOneVolumeEachExtraPod(t *testing.T
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
dsw := NewDesiredStateOfWorld(volumePluginMgr) dsw := NewDesiredStateOfWorld(volumePluginMgr)
node1Name := "node1-name" node1Name := k8stypes.NodeName("node1-name")
pod1Name := "pod1-uid" pod1Name := "pod1-uid"
volume1Name := api.UniqueVolumeName("volume1-name") volume1Name := api.UniqueVolumeName("volume1-name")
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name) volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
@ -911,7 +912,7 @@ func Test_GetVolumesToAttach_Positive_TwoNodesOneVolumeEachExtraPod(t *testing.T
pod1Name, pod1Name,
podAddErr) podAddErr)
} }
node2Name := "node2-name" node2Name := k8stypes.NodeName("node2-name")
pod2Name := "pod2-uid" pod2Name := "pod2-uid"
volume2Name := api.UniqueVolumeName("volume2-name") volume2Name := api.UniqueVolumeName("volume2-name")
volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name) volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name)
@ -953,7 +954,7 @@ func Test_GetVolumesToAttach_Positive_TwoNodesThreeVolumes(t *testing.T) {
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
dsw := NewDesiredStateOfWorld(volumePluginMgr) dsw := NewDesiredStateOfWorld(volumePluginMgr)
node1Name := "node1-name" node1Name := k8stypes.NodeName("node1-name")
pod1Name := "pod1-uid" pod1Name := "pod1-uid"
volume1Name := api.UniqueVolumeName("volume1-name") volume1Name := api.UniqueVolumeName("volume1-name")
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name) volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
@ -965,7 +966,7 @@ func Test_GetVolumesToAttach_Positive_TwoNodesThreeVolumes(t *testing.T) {
pod1Name, pod1Name,
podAddErr) podAddErr)
} }
node2Name := "node2-name" node2Name := k8stypes.NodeName("node2-name")
pod2aName := "pod2a-name" pod2aName := "pod2a-name"
volume2Name := api.UniqueVolumeName("volume2-name") volume2Name := api.UniqueVolumeName("volume2-name")
volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name) volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name)
@ -1018,7 +1019,7 @@ func Test_GetVolumesToAttach_Positive_TwoNodesThreeVolumes(t *testing.T) {
func verifyVolumeToAttach( func verifyVolumeToAttach(
t *testing.T, t *testing.T,
volumesToAttach []VolumeToAttach, volumesToAttach []VolumeToAttach,
expectedNodeName string, expectedNodeName k8stypes.NodeName,
expectedVolumeName api.UniqueVolumeName, expectedVolumeName api.UniqueVolumeName,
expectedVolumeSpecName string) { expectedVolumeSpecName string) {
for _, volumeToAttach := range volumesToAttach { for _, volumeToAttach := range volumesToAttach {

View File

@ -26,6 +26,7 @@ import (
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater"
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing" controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing"
k8stypes "k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
volumetesting "k8s.io/kubernetes/pkg/volume/testing" volumetesting "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/kubernetes/pkg/volume/util/operationexecutor" "k8s.io/kubernetes/pkg/volume/util/operationexecutor"
@ -86,7 +87,7 @@ func Test_Run_Positive_OneDesiredVolumeAttach(t *testing.T) {
podName := "pod-uid" podName := "pod-uid"
volumeName := api.UniqueVolumeName("volume-name") volumeName := api.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := "node-name" nodeName := k8stypes.NodeName("node-name")
dsw.AddNode(nodeName) dsw.AddNode(nodeName)
volumeExists := dsw.VolumeExists(volumeName, nodeName) volumeExists := dsw.VolumeExists(volumeName, nodeName)
if volumeExists { if volumeExists {
@ -132,7 +133,7 @@ func Test_Run_Positive_OneDesiredVolumeAttachThenDetachWithUnmountedVolume(t *te
podName := "pod-uid" podName := "pod-uid"
volumeName := api.UniqueVolumeName("volume-name") volumeName := api.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := "node-name" nodeName := k8stypes.NodeName("node-name")
dsw.AddNode(nodeName) dsw.AddNode(nodeName)
volumeExists := dsw.VolumeExists(volumeName, nodeName) volumeExists := dsw.VolumeExists(volumeName, nodeName)
if volumeExists { if volumeExists {
@ -199,7 +200,7 @@ func Test_Run_Positive_OneDesiredVolumeAttachThenDetachWithMountedVolume(t *test
podName := "pod-uid" podName := "pod-uid"
volumeName := api.UniqueVolumeName("volume-name") volumeName := api.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := "node-name" nodeName := k8stypes.NodeName("node-name")
dsw.AddNode(nodeName) dsw.AddNode(nodeName)
volumeExists := dsw.VolumeExists(volumeName, nodeName) volumeExists := dsw.VolumeExists(volumeName, nodeName)
if volumeExists { if volumeExists {
@ -266,7 +267,7 @@ func Test_Run_Negative_OneDesiredVolumeAttachThenDetachWithUnmountedVolumeUpdate
podName := "pod-uid" podName := "pod-uid"
volumeName := api.UniqueVolumeName("volume-name") volumeName := api.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := "node-name" nodeName := k8stypes.NodeName("node-name")
dsw.AddNode(nodeName) dsw.AddNode(nodeName)
volumeExists := dsw.VolumeExists(volumeName, nodeName) volumeExists := dsw.VolumeExists(volumeName, nodeName)
if volumeExists { if volumeExists {

View File

@ -60,7 +60,7 @@ type nodeStatusUpdater struct {
func (nsu *nodeStatusUpdater) UpdateNodeStatuses() error { func (nsu *nodeStatusUpdater) UpdateNodeStatuses() error {
nodesToUpdate := nsu.actualStateOfWorld.GetVolumesToReportAttached() nodesToUpdate := nsu.actualStateOfWorld.GetVolumesToReportAttached()
for nodeName, attachedVolumes := range nodesToUpdate { for nodeName, attachedVolumes := range nodesToUpdate {
nodeObj, exists, err := nsu.nodeInformer.GetStore().GetByKey(nodeName) nodeObj, exists, err := nsu.nodeInformer.GetStore().GetByKey(string(nodeName))
if nodeObj == nil || !exists || err != nil { if nodeObj == nil || !exists || err != nil {
// If node does not exist, its status cannot be updated, log error and move on. // If node does not exist, its status cannot be updated, log error and move on.
glog.V(5).Infof( glog.V(5).Infof(
@ -105,7 +105,7 @@ func (nsu *nodeStatusUpdater) UpdateNodeStatuses() error {
err) err)
} }
_, err = nsu.kubeClient.Core().Nodes().PatchStatus(nodeName, patchBytes) _, err = nsu.kubeClient.Core().Nodes().PatchStatus(string(nodeName), patchBytes)
if err != nil { if err != nil {
// If update node status fails, reset flag statusUpdateNeeded back to true // If update node status fails, reset flag statusUpdateNeeded back to true
// to indicate this node status needs to be udpated again // to indicate this node status needs to be udpated again

View File

@ -7735,7 +7735,7 @@ var OpenAPIDefinitions *common.OpenAPIDefinitions = &common.OpenAPIDefinitions{
}, },
"host": { "host": {
SchemaProps: spec.SchemaProps{ SchemaProps: spec.SchemaProps{
Description: "Host name on which the event is generated.", Description: "Node name on which the event is generated.",
Type: []string{"string"}, Type: []string{"string"},
Format: "", Format: "",
}, },

View File

@ -424,11 +424,15 @@ func DefaultAndValidateRunOptions(options *options.ServerRunOptions) {
if !supported { if !supported {
glog.Fatalf("GCE cloud provider has no instances. this shouldn't happen. exiting.") glog.Fatalf("GCE cloud provider has no instances. this shouldn't happen. exiting.")
} }
name, err := os.Hostname() hostname, err := os.Hostname()
if err != nil { if err != nil {
glog.Fatalf("Failed to get hostname: %v", err) glog.Fatalf("Failed to get hostname: %v", err)
} }
addrs, err := instances.NodeAddresses(name) nodeName, err := instances.CurrentNodeName(hostname)
if err != nil {
glog.Fatalf("Failed to get NodeName: %v", err)
}
addrs, err := instances.NodeAddresses(nodeName)
if err != nil { if err != nil {
glog.Warningf("Unable to obtain external host address from cloud provider: %v", err) glog.Warningf("Unable to obtain external host address from cloud provider: %v", err)
} else { } else {

View File

@ -23,11 +23,12 @@ import (
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types" kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/types"
) )
// NewSourceApiserver creates a config source that watches and pulls from the apiserver. // NewSourceApiserver creates a config source that watches and pulls from the apiserver.
func NewSourceApiserver(c *clientset.Clientset, nodeName string, updates chan<- interface{}) { func NewSourceApiserver(c *clientset.Clientset, nodeName types.NodeName, updates chan<- interface{}) {
lw := cache.NewListWatchFromClient(c.CoreClient, "pods", api.NamespaceAll, fields.OneTermEqualSelector(api.PodHostField, nodeName)) lw := cache.NewListWatchFromClient(c.CoreClient, "pods", api.NamespaceAll, fields.OneTermEqualSelector(api.PodHostField, string(nodeName)))
newSourceApiserverFromLW(lw, updates) newSourceApiserverFromLW(lw, updates)
} }

View File

@ -35,11 +35,11 @@ import (
) )
// Generate a pod name that is unique among nodes by appending the nodeName. // Generate a pod name that is unique among nodes by appending the nodeName.
func generatePodName(name, nodeName string) string { func generatePodName(name string, nodeName types.NodeName) string {
return fmt.Sprintf("%s-%s", name, nodeName) return fmt.Sprintf("%s-%s", name, nodeName)
} }
func applyDefaults(pod *api.Pod, source string, isFile bool, nodeName string) error { func applyDefaults(pod *api.Pod, source string, isFile bool, nodeName types.NodeName) error {
if len(pod.UID) == 0 { if len(pod.UID) == 0 {
hasher := md5.New() hasher := md5.New()
if isFile { if isFile {
@ -62,7 +62,7 @@ func applyDefaults(pod *api.Pod, source string, isFile bool, nodeName string) er
glog.V(5).Infof("Using namespace %q for pod %q from %s", pod.Namespace, pod.Name, source) glog.V(5).Infof("Using namespace %q for pod %q from %s", pod.Namespace, pod.Name, source)
// Set the Host field to indicate this pod is scheduled on the current node. // Set the Host field to indicate this pod is scheduled on the current node.
pod.Spec.NodeName = nodeName pod.Spec.NodeName = string(nodeName)
pod.ObjectMeta.SelfLink = getSelfLink(pod.Name, pod.Namespace) pod.ObjectMeta.SelfLink = getSelfLink(pod.Name, pod.Namespace)

View File

@ -30,15 +30,16 @@ import (
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/types"
) )
type sourceFile struct { type sourceFile struct {
path string path string
nodeName string nodeName types.NodeName
updates chan<- interface{} updates chan<- interface{}
} }
func NewSourceFile(path string, nodeName string, period time.Duration, updates chan<- interface{}) { func NewSourceFile(path string, nodeName types.NodeName, period time.Duration, updates chan<- interface{}) {
config := &sourceFile{ config := &sourceFile{
path: path, path: path,
nodeName: nodeName, nodeName: nodeName,

View File

@ -29,6 +29,7 @@ import (
kubetypes "k8s.io/kubernetes/pkg/kubelet/types" kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/securitycontext" "k8s.io/kubernetes/pkg/securitycontext"
"k8s.io/kubernetes/pkg/types"
utiltesting "k8s.io/kubernetes/pkg/util/testing" utiltesting "k8s.io/kubernetes/pkg/util/testing"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
) )
@ -71,7 +72,7 @@ func writeTestFile(t *testing.T, dir, name string, contents string) *os.File {
} }
func TestReadPodsFromFile(t *testing.T) { func TestReadPodsFromFile(t *testing.T) {
hostname := "random-test-hostname" nodeName := "random-test-hostname"
grace := int64(30) grace := int64(30)
var testCases = []struct { var testCases = []struct {
desc string desc string
@ -100,14 +101,14 @@ func TestReadPodsFromFile(t *testing.T) {
}, },
expected: CreatePodUpdate(kubetypes.SET, kubetypes.FileSource, &api.Pod{ expected: CreatePodUpdate(kubetypes.SET, kubetypes.FileSource, &api.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Name: "test-" + hostname, Name: "test-" + nodeName,
UID: "12345", UID: "12345",
Namespace: "mynamespace", Namespace: "mynamespace",
Annotations: map[string]string{kubetypes.ConfigHashAnnotationKey: "12345"}, Annotations: map[string]string{kubetypes.ConfigHashAnnotationKey: "12345"},
SelfLink: getSelfLink("test-"+hostname, "mynamespace"), SelfLink: getSelfLink("test-"+nodeName, "mynamespace"),
}, },
Spec: api.PodSpec{ Spec: api.PodSpec{
NodeName: hostname, NodeName: nodeName,
RestartPolicy: api.RestartPolicyAlways, RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst, DNSPolicy: api.DNSClusterFirst,
TerminationGracePeriodSeconds: &grace, TerminationGracePeriodSeconds: &grace,
@ -142,7 +143,7 @@ func TestReadPodsFromFile(t *testing.T) {
defer os.Remove(file.Name()) defer os.Remove(file.Name())
ch := make(chan interface{}) ch := make(chan interface{})
NewSourceFile(file.Name(), hostname, time.Millisecond, ch) NewSourceFile(file.Name(), types.NodeName(nodeName), time.Millisecond, ch)
select { select {
case got := <-ch: case got := <-ch:
update := got.(kubetypes.PodUpdate) update := got.(kubetypes.PodUpdate)

View File

@ -29,19 +29,20 @@ import (
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/types"
) )
type sourceURL struct { type sourceURL struct {
url string url string
header http.Header header http.Header
nodeName string nodeName types.NodeName
updates chan<- interface{} updates chan<- interface{}
data []byte data []byte
failureLogs int failureLogs int
client *http.Client client *http.Client
} }
func NewSourceURL(url string, header http.Header, nodeName string, period time.Duration, updates chan<- interface{}) { func NewSourceURL(url string, header http.Header, nodeName types.NodeName, period time.Duration, updates chan<- interface{}) {
config := &sourceURL{ config := &sourceURL{
url: url, url: url,
header: header, header: header,

View File

@ -29,6 +29,7 @@ import (
"k8s.io/kubernetes/pkg/api/validation" "k8s.io/kubernetes/pkg/api/validation"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types" kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types"
utiltesting "k8s.io/kubernetes/pkg/util/testing" utiltesting "k8s.io/kubernetes/pkg/util/testing"
) )
@ -121,7 +122,7 @@ func TestExtractInvalidPods(t *testing.T) {
} }
func TestExtractPodsFromHTTP(t *testing.T) { func TestExtractPodsFromHTTP(t *testing.T) {
hostname := "different-value" nodeName := "different-value"
grace := int64(30) grace := int64(30)
var testCases = []struct { var testCases = []struct {
@ -142,7 +143,7 @@ func TestExtractPodsFromHTTP(t *testing.T) {
Namespace: "mynamespace", Namespace: "mynamespace",
}, },
Spec: api.PodSpec{ Spec: api.PodSpec{
NodeName: hostname, NodeName: string(nodeName),
Containers: []api.Container{{Name: "1", Image: "foo", ImagePullPolicy: api.PullAlways}}, Containers: []api.Container{{Name: "1", Image: "foo", ImagePullPolicy: api.PullAlways}},
SecurityContext: &api.PodSecurityContext{}, SecurityContext: &api.PodSecurityContext{},
}, },
@ -155,13 +156,13 @@ func TestExtractPodsFromHTTP(t *testing.T) {
&api.Pod{ &api.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
UID: "111", UID: "111",
Name: "foo" + "-" + hostname, Name: "foo" + "-" + nodeName,
Namespace: "mynamespace", Namespace: "mynamespace",
Annotations: map[string]string{kubetypes.ConfigHashAnnotationKey: "111"}, Annotations: map[string]string{kubetypes.ConfigHashAnnotationKey: "111"},
SelfLink: getSelfLink("foo-"+hostname, "mynamespace"), SelfLink: getSelfLink("foo-"+nodeName, "mynamespace"),
}, },
Spec: api.PodSpec{ Spec: api.PodSpec{
NodeName: hostname, NodeName: nodeName,
RestartPolicy: api.RestartPolicyAlways, RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst, DNSPolicy: api.DNSClusterFirst,
SecurityContext: &api.PodSecurityContext{}, SecurityContext: &api.PodSecurityContext{},
@ -193,7 +194,7 @@ func TestExtractPodsFromHTTP(t *testing.T) {
UID: "111", UID: "111",
}, },
Spec: api.PodSpec{ Spec: api.PodSpec{
NodeName: hostname, NodeName: nodeName,
Containers: []api.Container{{Name: "1", Image: "foo", ImagePullPolicy: api.PullAlways}}, Containers: []api.Container{{Name: "1", Image: "foo", ImagePullPolicy: api.PullAlways}},
SecurityContext: &api.PodSecurityContext{}, SecurityContext: &api.PodSecurityContext{},
}, },
@ -207,7 +208,7 @@ func TestExtractPodsFromHTTP(t *testing.T) {
UID: "222", UID: "222",
}, },
Spec: api.PodSpec{ Spec: api.PodSpec{
NodeName: hostname, NodeName: nodeName,
Containers: []api.Container{{Name: "2", Image: "bar:bartag", ImagePullPolicy: ""}}, Containers: []api.Container{{Name: "2", Image: "bar:bartag", ImagePullPolicy: ""}},
SecurityContext: &api.PodSecurityContext{}, SecurityContext: &api.PodSecurityContext{},
}, },
@ -222,13 +223,13 @@ func TestExtractPodsFromHTTP(t *testing.T) {
&api.Pod{ &api.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
UID: "111", UID: "111",
Name: "foo" + "-" + hostname, Name: "foo" + "-" + nodeName,
Namespace: "default", Namespace: "default",
Annotations: map[string]string{kubetypes.ConfigHashAnnotationKey: "111"}, Annotations: map[string]string{kubetypes.ConfigHashAnnotationKey: "111"},
SelfLink: getSelfLink("foo-"+hostname, kubetypes.NamespaceDefault), SelfLink: getSelfLink("foo-"+nodeName, kubetypes.NamespaceDefault),
}, },
Spec: api.PodSpec{ Spec: api.PodSpec{
NodeName: hostname, NodeName: nodeName,
RestartPolicy: api.RestartPolicyAlways, RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst, DNSPolicy: api.DNSClusterFirst,
TerminationGracePeriodSeconds: &grace, TerminationGracePeriodSeconds: &grace,
@ -248,13 +249,13 @@ func TestExtractPodsFromHTTP(t *testing.T) {
&api.Pod{ &api.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
UID: "222", UID: "222",
Name: "bar" + "-" + hostname, Name: "bar" + "-" + nodeName,
Namespace: "default", Namespace: "default",
Annotations: map[string]string{kubetypes.ConfigHashAnnotationKey: "222"}, Annotations: map[string]string{kubetypes.ConfigHashAnnotationKey: "222"},
SelfLink: getSelfLink("bar-"+hostname, kubetypes.NamespaceDefault), SelfLink: getSelfLink("bar-"+nodeName, kubetypes.NamespaceDefault),
}, },
Spec: api.PodSpec{ Spec: api.PodSpec{
NodeName: hostname, NodeName: nodeName,
RestartPolicy: api.RestartPolicyAlways, RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst, DNSPolicy: api.DNSClusterFirst,
TerminationGracePeriodSeconds: &grace, TerminationGracePeriodSeconds: &grace,
@ -291,7 +292,7 @@ func TestExtractPodsFromHTTP(t *testing.T) {
testServer := httptest.NewServer(&fakeHandler) testServer := httptest.NewServer(&fakeHandler)
defer testServer.Close() defer testServer.Close()
ch := make(chan interface{}, 1) ch := make(chan interface{}, 1)
c := sourceURL{testServer.URL, http.Header{}, hostname, ch, nil, 0, http.DefaultClient} c := sourceURL{testServer.URL, http.Header{}, types.NodeName(nodeName), ch, nil, 0, http.DefaultClient}
if err := c.extractFromURL(); err != nil { if err := c.extractFromURL(); err != nil {
t.Errorf("%s: Unexpected error: %v", testCase.desc, err) t.Errorf("%s: Unexpected error: %v", testCase.desc, err)
continue continue

View File

@ -222,7 +222,7 @@ type KubeletDeps struct {
// makePodSourceConfig creates a config.PodConfig from the given // makePodSourceConfig creates a config.PodConfig from the given
// KubeletConfiguration or returns an error. // KubeletConfiguration or returns an error.
func makePodSourceConfig(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *KubeletDeps, nodeName string) (*config.PodConfig, error) { func makePodSourceConfig(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *KubeletDeps, nodeName types.NodeName) (*config.PodConfig, error) {
manifestURLHeader := make(http.Header) manifestURLHeader := make(http.Header)
if kubeCfg.ManifestURLHeader != "" { if kubeCfg.ManifestURLHeader != "" {
pieces := strings.Split(kubeCfg.ManifestURLHeader, ":") pieces := strings.Split(kubeCfg.ManifestURLHeader, ":")
@ -277,7 +277,7 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
hostname := nodeutil.GetHostname(kubeCfg.HostnameOverride) hostname := nodeutil.GetHostname(kubeCfg.HostnameOverride)
// Query the cloud provider for our node name, default to hostname // Query the cloud provider for our node name, default to hostname
nodeName := hostname nodeName := types.NodeName(hostname)
if kubeDeps.Cloud != nil { if kubeDeps.Cloud != nil {
var err error var err error
instances, ok := kubeDeps.Cloud.Instances() instances, ok := kubeDeps.Cloud.Instances()
@ -377,7 +377,7 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
if kubeClient != nil { if kubeClient != nil {
// TODO: cache.NewListWatchFromClient is limited as it takes a client implementation rather // TODO: cache.NewListWatchFromClient is limited as it takes a client implementation rather
// than an interface. There is no way to construct a list+watcher using resource name. // than an interface. There is no way to construct a list+watcher using resource name.
fieldSelector := fields.Set{api.ObjectNameField: nodeName}.AsSelector() fieldSelector := fields.Set{api.ObjectNameField: string(nodeName)}.AsSelector()
listWatch := &cache.ListWatch{ listWatch := &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
options.FieldSelector = fieldSelector options.FieldSelector = fieldSelector
@ -398,7 +398,7 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
// TODO: what is namespace for node? // TODO: what is namespace for node?
nodeRef := &api.ObjectReference{ nodeRef := &api.ObjectReference{
Kind: "Node", Kind: "Node",
Name: nodeName, Name: string(nodeName),
UID: types.UID(nodeName), UID: types.UID(nodeName),
Namespace: "", Namespace: "",
} }
@ -783,7 +783,7 @@ type Kubelet struct {
kubeletConfiguration componentconfig.KubeletConfiguration kubeletConfiguration componentconfig.KubeletConfiguration
hostname string hostname string
nodeName string nodeName types.NodeName
dockerClient dockertools.DockerInterface dockerClient dockertools.DockerInterface
runtimeCache kubecontainer.RuntimeCache runtimeCache kubecontainer.RuntimeCache
kubeClient clientset.Interface kubeClient clientset.Interface

View File

@ -191,7 +191,7 @@ func (kl *Kubelet) GetNode() (*api.Node, error) {
if kl.standaloneMode { if kl.standaloneMode {
return kl.initialNode() return kl.initialNode()
} }
return kl.nodeInfo.GetNodeInfo(kl.nodeName) return kl.nodeInfo.GetNodeInfo(string(kl.nodeName))
} }
// getNodeAnyWay() must return a *api.Node which is required by RunGeneralPredicates(). // getNodeAnyWay() must return a *api.Node which is required by RunGeneralPredicates().
@ -201,7 +201,7 @@ func (kl *Kubelet) GetNode() (*api.Node, error) {
// zero capacity, and the default labels. // zero capacity, and the default labels.
func (kl *Kubelet) getNodeAnyWay() (*api.Node, error) { func (kl *Kubelet) getNodeAnyWay() (*api.Node, error) {
if !kl.standaloneMode { if !kl.standaloneMode {
if n, err := kl.nodeInfo.GetNodeInfo(kl.nodeName); err == nil { if n, err := kl.nodeInfo.GetNodeInfo(string(kl.nodeName)); err == nil {
return n, nil return n, nil
} }
} }

View File

@ -98,7 +98,7 @@ func (kl *Kubelet) tryRegisterWithApiServer(node *api.Node) bool {
return false return false
} }
existingNode, err := kl.kubeClient.Core().Nodes().Get(kl.nodeName) existingNode, err := kl.kubeClient.Core().Nodes().Get(string(kl.nodeName))
if err != nil { if err != nil {
glog.Errorf("Unable to register node %q with API server: error getting existing node: %v", kl.nodeName, err) glog.Errorf("Unable to register node %q with API server: error getting existing node: %v", kl.nodeName, err)
return false return false
@ -173,7 +173,7 @@ func (kl *Kubelet) reconcileCMADAnnotationWithExistingNode(node, existingNode *a
func (kl *Kubelet) initialNode() (*api.Node, error) { func (kl *Kubelet) initialNode() (*api.Node, error) {
node := &api.Node{ node := &api.Node{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Name: kl.nodeName, Name: string(kl.nodeName),
Labels: map[string]string{ Labels: map[string]string{
unversioned.LabelHostname: kl.hostname, unversioned.LabelHostname: kl.hostname,
unversioned.LabelOS: goRuntime.GOOS, unversioned.LabelOS: goRuntime.GOOS,
@ -309,7 +309,7 @@ func (kl *Kubelet) updateNodeStatus() error {
// tryUpdateNodeStatus tries to update node status to master. If ReconcileCBR0 // tryUpdateNodeStatus tries to update node status to master. If ReconcileCBR0
// is set, this function will also confirm that cbr0 is configured correctly. // is set, this function will also confirm that cbr0 is configured correctly.
func (kl *Kubelet) tryUpdateNodeStatus() error { func (kl *Kubelet) tryUpdateNodeStatus() error {
node, err := kl.kubeClient.Core().Nodes().Get(kl.nodeName) node, err := kl.kubeClient.Core().Nodes().Get(string(kl.nodeName))
if err != nil { if err != nil {
return fmt.Errorf("error getting node %q: %v", kl.nodeName, err) return fmt.Errorf("error getting node %q: %v", kl.nodeName, err)
} }

View File

@ -128,7 +128,7 @@ func newTestKubeletWithImageList(
kubelet.os = &containertest.FakeOS{} kubelet.os = &containertest.FakeOS{}
kubelet.hostname = testKubeletHostname kubelet.hostname = testKubeletHostname
kubelet.nodeName = testKubeletHostname kubelet.nodeName = types.NodeName(testKubeletHostname)
kubelet.runtimeState = newRuntimeState(maxWaitForContainerRuntime) kubelet.runtimeState = newRuntimeState(maxWaitForContainerRuntime)
kubelet.runtimeState.setNetworkState(nil) kubelet.runtimeState.setNetworkState(nil)
kubelet.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", nettest.NewFakeHost(nil), componentconfig.HairpinNone, kubelet.nonMasqueradeCIDR, 1440) kubelet.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", nettest.NewFakeHost(nil), componentconfig.HairpinNone, kubelet.nonMasqueradeCIDR, 1440)
@ -211,7 +211,7 @@ func newTestKubeletWithImageList(
kubelet.resourceAnalyzer = stats.NewResourceAnalyzer(kubelet, volumeStatsAggPeriod, kubelet.containerRuntime) kubelet.resourceAnalyzer = stats.NewResourceAnalyzer(kubelet, volumeStatsAggPeriod, kubelet.containerRuntime)
nodeRef := &api.ObjectReference{ nodeRef := &api.ObjectReference{
Kind: "Node", Kind: "Node",
Name: kubelet.nodeName, Name: string(kubelet.nodeName),
UID: types.UID(kubelet.nodeName), UID: types.UID(kubelet.nodeName),
Namespace: "", Namespace: "",
} }
@ -232,7 +232,7 @@ func newTestKubeletWithImageList(
kubelet.mounter = &mount.FakeMounter{} kubelet.mounter = &mount.FakeMounter{}
kubelet.volumeManager, err = kubeletvolume.NewVolumeManager( kubelet.volumeManager, err = kubeletvolume.NewVolumeManager(
controllerAttachDetachEnabled, controllerAttachDetachEnabled,
kubelet.hostname, kubelet.nodeName,
kubelet.podManager, kubelet.podManager,
fakeKubeClient, fakeKubeClient,
kubelet.volumePluginMgr, kubelet.volumePluginMgr,
@ -402,7 +402,7 @@ func TestHandlePortConflicts(t *testing.T) {
kl.nodeLister = testNodeLister{nodes: []api.Node{ kl.nodeLister = testNodeLister{nodes: []api.Node{
{ {
ObjectMeta: api.ObjectMeta{Name: kl.nodeName}, ObjectMeta: api.ObjectMeta{Name: string(kl.nodeName)},
Status: api.NodeStatus{ Status: api.NodeStatus{
Allocatable: api.ResourceList{ Allocatable: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI), api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
@ -412,7 +412,7 @@ func TestHandlePortConflicts(t *testing.T) {
}} }}
kl.nodeInfo = testNodeInfo{nodes: []api.Node{ kl.nodeInfo = testNodeInfo{nodes: []api.Node{
{ {
ObjectMeta: api.ObjectMeta{Name: kl.nodeName}, ObjectMeta: api.ObjectMeta{Name: string(kl.nodeName)},
Status: api.NodeStatus{ Status: api.NodeStatus{
Allocatable: api.ResourceList{ Allocatable: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI), api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
@ -421,7 +421,7 @@ func TestHandlePortConflicts(t *testing.T) {
}, },
}} }}
spec := api.PodSpec{NodeName: kl.nodeName, Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}} spec := api.PodSpec{NodeName: string(kl.nodeName), Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}
pods := []*api.Pod{ pods := []*api.Pod{
podWithUidNameNsSpec("123456789", "newpod", "foo", spec), podWithUidNameNsSpec("123456789", "newpod", "foo", spec),
podWithUidNameNsSpec("987654321", "oldpod", "foo", spec), podWithUidNameNsSpec("987654321", "oldpod", "foo", spec),
@ -555,7 +555,7 @@ func TestHandleMemExceeded(t *testing.T) {
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
spec := api.PodSpec{NodeName: kl.nodeName, spec := api.PodSpec{NodeName: string(kl.nodeName),
Containers: []api.Container{{Resources: api.ResourceRequirements{ Containers: []api.Container{{Resources: api.ResourceRequirements{
Requests: api.ResourceList{ Requests: api.ResourceList{
"memory": resource.MustParse("90"), "memory": resource.MustParse("90"),
@ -1781,7 +1781,7 @@ func TestHandlePodAdditionsInvokesPodAdmitHandlers(t *testing.T) {
kl := testKubelet.kubelet kl := testKubelet.kubelet
kl.nodeLister = testNodeLister{nodes: []api.Node{ kl.nodeLister = testNodeLister{nodes: []api.Node{
{ {
ObjectMeta: api.ObjectMeta{Name: kl.nodeName}, ObjectMeta: api.ObjectMeta{Name: string(kl.nodeName)},
Status: api.NodeStatus{ Status: api.NodeStatus{
Allocatable: api.ResourceList{ Allocatable: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI), api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
@ -1791,7 +1791,7 @@ func TestHandlePodAdditionsInvokesPodAdmitHandlers(t *testing.T) {
}} }}
kl.nodeInfo = testNodeInfo{nodes: []api.Node{ kl.nodeInfo = testNodeInfo{nodes: []api.Node{
{ {
ObjectMeta: api.ObjectMeta{Name: kl.nodeName}, ObjectMeta: api.ObjectMeta{Name: string(kl.nodeName)},
Status: api.NodeStatus{ Status: api.NodeStatus{
Allocatable: api.ResourceList{ Allocatable: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI), api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),

View File

@ -94,7 +94,7 @@ func TestRunOnce(t *testing.T) {
} }
kb.volumeManager, err = volumemanager.NewVolumeManager( kb.volumeManager, err = volumemanager.NewVolumeManager(
true, true,
kb.hostname, kb.nodeName,
kb.podManager, kb.podManager,
kb.kubeClient, kb.kubeClient,
kb.volumePluginMgr, kb.volumePluginMgr,
@ -109,7 +109,7 @@ func TestRunOnce(t *testing.T) {
kb.resourceAnalyzer = stats.NewResourceAnalyzer(kb, volumeStatsAggPeriod, kb.containerRuntime) kb.resourceAnalyzer = stats.NewResourceAnalyzer(kb, volumeStatsAggPeriod, kb.containerRuntime)
nodeRef := &api.ObjectReference{ nodeRef := &api.ObjectReference{
Kind: "Node", Kind: "Node",
Name: kb.nodeName, Name: string(kb.nodeName),
UID: types.UID(kb.nodeName), UID: types.UID(kb.nodeName),
Namespace: "", Namespace: "",
} }

View File

@ -25,6 +25,7 @@ import (
"k8s.io/kubernetes/pkg/apis/certificates" "k8s.io/kubernetes/pkg/apis/certificates"
unversionedcertificates "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/unversioned" unversionedcertificates "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/unversioned"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/types"
certutil "k8s.io/kubernetes/pkg/util/cert" certutil "k8s.io/kubernetes/pkg/util/cert"
"k8s.io/kubernetes/pkg/watch" "k8s.io/kubernetes/pkg/watch"
) )
@ -33,7 +34,7 @@ import (
// then it will watch the object's status, once approved by API server, it will return the API // then it will watch the object's status, once approved by API server, it will return the API
// server's issued certificate (pem-encoded). If there is any errors, or the watch timeouts, // server's issued certificate (pem-encoded). If there is any errors, or the watch timeouts,
// it will return an error. This is intended for use on nodes (kubelet and kubeadm). // it will return an error. This is intended for use on nodes (kubelet and kubeadm).
func RequestNodeCertificate(client unversionedcertificates.CertificateSigningRequestInterface, privateKeyData []byte, nodeName string) (certData []byte, err error) { func RequestNodeCertificate(client unversionedcertificates.CertificateSigningRequestInterface, privateKeyData []byte, nodeName types.NodeName) (certData []byte, err error) {
subject := &pkix.Name{ subject := &pkix.Name{
Organization: []string{"system:nodes"}, Organization: []string{"system:nodes"},
CommonName: fmt.Sprintf("system:node:%s", nodeName), CommonName: fmt.Sprintf("system:node:%s", nodeName),

View File

@ -160,7 +160,7 @@ type AttachedVolume struct {
// NewActualStateOfWorld returns a new instance of ActualStateOfWorld. // NewActualStateOfWorld returns a new instance of ActualStateOfWorld.
func NewActualStateOfWorld( func NewActualStateOfWorld(
nodeName string, nodeName types.NodeName,
volumePluginMgr *volume.VolumePluginMgr) ActualStateOfWorld { volumePluginMgr *volume.VolumePluginMgr) ActualStateOfWorld {
return &actualStateOfWorld{ return &actualStateOfWorld{
nodeName: nodeName, nodeName: nodeName,
@ -185,7 +185,7 @@ func IsRemountRequiredError(err error) bool {
type actualStateOfWorld struct { type actualStateOfWorld struct {
// nodeName is the name of this node. This value is passed to Attach/Detach // nodeName is the name of this node. This value is passed to Attach/Detach
nodeName string nodeName types.NodeName
// attachedVolumes is a map containing the set of volumes the kubelet volume // attachedVolumes is a map containing the set of volumes the kubelet volume
// manager believes to be successfully attached to this node. Volume types // manager believes to be successfully attached to this node. Volume types
// that do not implement an attacher interface are assumed to be in this // that do not implement an attacher interface are assumed to be in this
@ -271,12 +271,12 @@ type mountedPod struct {
} }
func (asw *actualStateOfWorld) MarkVolumeAsAttached( func (asw *actualStateOfWorld) MarkVolumeAsAttached(
volumeName api.UniqueVolumeName, volumeSpec *volume.Spec, _, devicePath string) error { volumeName api.UniqueVolumeName, volumeSpec *volume.Spec, _ types.NodeName, devicePath string) error {
return asw.addVolume(volumeName, volumeSpec, devicePath) return asw.addVolume(volumeName, volumeSpec, devicePath)
} }
func (asw *actualStateOfWorld) MarkVolumeAsDetached( func (asw *actualStateOfWorld) MarkVolumeAsDetached(
volumeName api.UniqueVolumeName, nodeName string) { volumeName api.UniqueVolumeName, nodeName types.NodeName) {
asw.DeleteVolume(volumeName) asw.DeleteVolume(volumeName)
} }
@ -296,11 +296,11 @@ func (asw *actualStateOfWorld) MarkVolumeAsMounted(
volumeGidValue) volumeGidValue)
} }
func (asw *actualStateOfWorld) AddVolumeToReportAsAttached(volumeName api.UniqueVolumeName, nodeName string) { func (asw *actualStateOfWorld) AddVolumeToReportAsAttached(volumeName api.UniqueVolumeName, nodeName types.NodeName) {
// no operation for kubelet side // no operation for kubelet side
} }
func (asw *actualStateOfWorld) RemoveVolumeFromReportAsAttached(volumeName api.UniqueVolumeName, nodeName string) error { func (asw *actualStateOfWorld) RemoveVolumeFromReportAsAttached(volumeName api.UniqueVolumeName, nodeName types.NodeName) error {
// no operation for kubelet side // no operation for kubelet side
return nil return nil
} }

View File

@ -71,7 +71,7 @@ type Reconciler interface {
// successive executions // successive executions
// waitForAttachTimeout - the amount of time the Mount function will wait for // waitForAttachTimeout - the amount of time the Mount function will wait for
// the volume to be attached // the volume to be attached
// hostName - the hostname for this node, used by Attach and Detach methods // nodeName - the Name for this node, used by Attach and Detach methods
// desiredStateOfWorld - cache containing the desired state of the world // desiredStateOfWorld - cache containing the desired state of the world
// actualStateOfWorld - cache containing the actual state of the world // actualStateOfWorld - cache containing the actual state of the world
// operationExecutor - used to trigger attach/detach/mount/unmount operations // operationExecutor - used to trigger attach/detach/mount/unmount operations
@ -85,7 +85,7 @@ func NewReconciler(
loopSleepDuration time.Duration, loopSleepDuration time.Duration,
reconstructDuration time.Duration, reconstructDuration time.Duration,
waitForAttachTimeout time.Duration, waitForAttachTimeout time.Duration,
hostName string, nodeName types.NodeName,
desiredStateOfWorld cache.DesiredStateOfWorld, desiredStateOfWorld cache.DesiredStateOfWorld,
actualStateOfWorld cache.ActualStateOfWorld, actualStateOfWorld cache.ActualStateOfWorld,
operationExecutor operationexecutor.OperationExecutor, operationExecutor operationexecutor.OperationExecutor,
@ -98,7 +98,7 @@ func NewReconciler(
loopSleepDuration: loopSleepDuration, loopSleepDuration: loopSleepDuration,
reconstructDuration: reconstructDuration, reconstructDuration: reconstructDuration,
waitForAttachTimeout: waitForAttachTimeout, waitForAttachTimeout: waitForAttachTimeout,
hostName: hostName, nodeName: nodeName,
desiredStateOfWorld: desiredStateOfWorld, desiredStateOfWorld: desiredStateOfWorld,
actualStateOfWorld: actualStateOfWorld, actualStateOfWorld: actualStateOfWorld,
operationExecutor: operationExecutor, operationExecutor: operationExecutor,
@ -115,7 +115,7 @@ type reconciler struct {
loopSleepDuration time.Duration loopSleepDuration time.Duration
reconstructDuration time.Duration reconstructDuration time.Duration
waitForAttachTimeout time.Duration waitForAttachTimeout time.Duration
hostName string nodeName types.NodeName
desiredStateOfWorld cache.DesiredStateOfWorld desiredStateOfWorld cache.DesiredStateOfWorld
actualStateOfWorld cache.ActualStateOfWorld actualStateOfWorld cache.ActualStateOfWorld
operationExecutor operationexecutor.OperationExecutor operationExecutor operationexecutor.OperationExecutor
@ -201,7 +201,7 @@ func (rc *reconciler) reconcile() {
volumeToMount.Pod.UID) volumeToMount.Pod.UID)
err := rc.operationExecutor.VerifyControllerAttachedVolume( err := rc.operationExecutor.VerifyControllerAttachedVolume(
volumeToMount.VolumeToMount, volumeToMount.VolumeToMount,
rc.hostName, rc.nodeName,
rc.actualStateOfWorld) rc.actualStateOfWorld)
if err != nil && if err != nil &&
!nestedpendingoperations.IsAlreadyExists(err) && !nestedpendingoperations.IsAlreadyExists(err) &&
@ -230,7 +230,7 @@ func (rc *reconciler) reconcile() {
volumeToAttach := operationexecutor.VolumeToAttach{ volumeToAttach := operationexecutor.VolumeToAttach{
VolumeName: volumeToMount.VolumeName, VolumeName: volumeToMount.VolumeName,
VolumeSpec: volumeToMount.VolumeSpec, VolumeSpec: volumeToMount.VolumeSpec,
NodeName: rc.hostName, NodeName: rc.nodeName,
} }
glog.V(12).Infof("Attempting to start AttachVolume for volume %q (spec.Name: %q) pod %q (UID: %q)", glog.V(12).Infof("Attempting to start AttachVolume for volume %q (spec.Name: %q) pod %q (UID: %q)",
volumeToMount.VolumeName, volumeToMount.VolumeName,
@ -334,7 +334,7 @@ func (rc *reconciler) reconcile() {
// Kubelet not responsible for detaching or this volume has a non-attachable volume plugin, // Kubelet not responsible for detaching or this volume has a non-attachable volume plugin,
// so just remove it to actualStateOfWorld without attach. // so just remove it to actualStateOfWorld without attach.
rc.actualStateOfWorld.MarkVolumeAsDetached( rc.actualStateOfWorld.MarkVolumeAsDetached(
attachedVolume.VolumeName, rc.hostName) attachedVolume.VolumeName, rc.nodeName)
} else { } else {
// Only detach if kubelet detach is enabled // Only detach if kubelet detach is enabled
glog.V(12).Infof("Attempting to start DetachVolume for volume %q (spec.Name: %q)", glog.V(12).Infof("Attempting to start DetachVolume for volume %q (spec.Name: %q)",

View File

@ -29,6 +29,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache" "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
k8stypes "k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@ -45,9 +46,9 @@ const (
reconcilerReconstructSleepPeriod time.Duration = 10 * time.Minute reconcilerReconstructSleepPeriod time.Duration = 10 * time.Minute
// waitForAttachTimeout is the maximum amount of time a // waitForAttachTimeout is the maximum amount of time a
// operationexecutor.Mount call will wait for a volume to be attached. // operationexecutor.Mount call will wait for a volume to be attached.
waitForAttachTimeout time.Duration = 1 * time.Second waitForAttachTimeout time.Duration = 1 * time.Second
nodeName string = "myhostname" nodeName k8stypes.NodeName = k8stypes.NodeName("mynodename")
kubeletPodsDir string = "fake-dir" kubeletPodsDir string = "fake-dir"
) )
// Calls Run() // Calls Run()
@ -452,7 +453,7 @@ func createTestClient() *fake.Clientset {
fakeClient.AddReactor("get", "nodes", fakeClient.AddReactor("get", "nodes",
func(action core.Action) (bool, runtime.Object, error) { func(action core.Action) (bool, runtime.Object, error) {
return true, &api.Node{ return true, &api.Node{
ObjectMeta: api.ObjectMeta{Name: nodeName}, ObjectMeta: api.ObjectMeta{Name: string(nodeName)},
Status: api.NodeStatus{ Status: api.NodeStatus{
VolumesAttached: []api.AttachedVolume{ VolumesAttached: []api.AttachedVolume{
{ {
@ -460,7 +461,7 @@ func createTestClient() *fake.Clientset {
DevicePath: "fake/path", DevicePath: "fake/path",
}, },
}}, }},
Spec: api.NodeSpec{ExternalID: nodeName}, Spec: api.NodeSpec{ExternalID: string(nodeName)},
}, nil }, nil
}) })
fakeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) { fakeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {

View File

@ -33,6 +33,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache" "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache"
"k8s.io/kubernetes/pkg/kubelet/volumemanager/populator" "k8s.io/kubernetes/pkg/kubelet/volumemanager/populator"
"k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler" "k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler"
k8stypes "k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/util/runtime" "k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
@ -143,7 +144,7 @@ type VolumeManager interface {
// Must be pre-initialized. // Must be pre-initialized.
func NewVolumeManager( func NewVolumeManager(
controllerAttachDetachEnabled bool, controllerAttachDetachEnabled bool,
hostName string, nodeName k8stypes.NodeName,
podManager pod.Manager, podManager pod.Manager,
kubeClient internalclientset.Interface, kubeClient internalclientset.Interface,
volumePluginMgr *volume.VolumePluginMgr, volumePluginMgr *volume.VolumePluginMgr,
@ -156,7 +157,7 @@ func NewVolumeManager(
kubeClient: kubeClient, kubeClient: kubeClient,
volumePluginMgr: volumePluginMgr, volumePluginMgr: volumePluginMgr,
desiredStateOfWorld: cache.NewDesiredStateOfWorld(volumePluginMgr), desiredStateOfWorld: cache.NewDesiredStateOfWorld(volumePluginMgr),
actualStateOfWorld: cache.NewActualStateOfWorld(hostName, volumePluginMgr), actualStateOfWorld: cache.NewActualStateOfWorld(nodeName, volumePluginMgr),
operationExecutor: operationexecutor.NewOperationExecutor( operationExecutor: operationexecutor.NewOperationExecutor(
kubeClient, kubeClient,
volumePluginMgr, volumePluginMgr,
@ -169,7 +170,7 @@ func NewVolumeManager(
reconcilerLoopSleepPeriod, reconcilerLoopSleepPeriod,
reconcilerReconstructSleepPeriod, reconcilerReconstructSleepPeriod,
waitForAttachTimeout, waitForAttachTimeout,
hostName, nodeName,
vm.desiredStateOfWorld, vm.desiredStateOfWorld,
vm.actualStateOfWorld, vm.actualStateOfWorld,
vm.operationExecutor, vm.operationExecutor,

43
pkg/types/nodename.go Normal file
View File

@ -0,0 +1,43 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package types
// NodeName is a type that holds a api.Node's Name identifier.
// Being a type captures intent and helps make sure that the node name
// is not confused with similar concepts (the hostname, the cloud provider id,
// the cloud provider name etc)
//
// To clarify the various types:
//
// * Node.Name is the Name field of the Node in the API. This should be stored in a NodeName.
// Unfortunately, because Name is part of ObjectMeta, we can't store it as a NodeName at the API level.
//
// * Hostname is the hostname of the local machine (from uname -n).
// However, some components allow the user to pass in a --hostname-override flag,
// which will override this in most places. In the absence of anything more meaningful,
// kubelet will use Hostname as the Node.Name when it creates the Node.
//
// * The cloudproviders have the own names: GCE has InstanceName, AWS has InstanceId.
//
// For GCE, InstanceName is the Name of an Instance object in the GCE API. On GCE, Instance.Name becomes the
// Hostname, and thus it makes sense also to use it as the Node.Name. But that is GCE specific, and it is up
// to the cloudprovider how to do this mapping.
//
// For AWS, the InstanceID is not yet suitable for use as a Node.Name, so we actually use the
// PrivateDnsName for the Node.Name. And this is _not_ always the same as the hostname: if
// we are using a custom DHCP domain it won't be.
type NodeName string

View File

@ -28,6 +28,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/types"
) )
func GetHostname(hostnameOverride string) string { func GetHostname(hostnameOverride string) string {
@ -86,7 +87,7 @@ func GetZoneKey(node *api.Node) string {
} }
// SetNodeCondition updates specific node condition with patch operation. // SetNodeCondition updates specific node condition with patch operation.
func SetNodeCondition(c clientset.Interface, node string, condition api.NodeCondition) error { func SetNodeCondition(c clientset.Interface, node types.NodeName, condition api.NodeCondition) error {
generatePatch := func(condition api.NodeCondition) ([]byte, error) { generatePatch := func(condition api.NodeCondition) ([]byte, error) {
raw, err := json.Marshal(&[]api.NodeCondition{condition}) raw, err := json.Marshal(&[]api.NodeCondition{condition})
if err != nil { if err != nil {
@ -99,6 +100,6 @@ func SetNodeCondition(c clientset.Interface, node string, condition api.NodeCond
if err != nil { if err != nil {
return nil return nil
} }
_, err = c.Core().Nodes().PatchStatus(node, patch) _, err = c.Core().Nodes().PatchStatus(string(node), patch)
return err return err
} }

View File

@ -25,6 +25,7 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws" "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/exec" "k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume"
@ -57,7 +58,7 @@ func (plugin *awsElasticBlockStorePlugin) GetDeviceMountRefs(deviceMountPath str
return mount.GetMountRefs(mounter, deviceMountPath) return mount.GetMountRefs(mounter, deviceMountPath)
} }
func (attacher *awsElasticBlockStoreAttacher) Attach(spec *volume.Spec, hostName string) (string, error) { func (attacher *awsElasticBlockStoreAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
volumeSource, readOnly, err := getVolumeSource(spec) volumeSource, readOnly, err := getVolumeSource(spec)
if err != nil { if err != nil {
return "", err return "", err
@ -67,7 +68,7 @@ func (attacher *awsElasticBlockStoreAttacher) Attach(spec *volume.Spec, hostName
// awsCloud.AttachDisk checks if disk is already attached to node and // awsCloud.AttachDisk checks if disk is already attached to node and
// succeeds in that case, so no need to do that separately. // succeeds in that case, so no need to do that separately.
devicePath, err := attacher.awsVolumes.AttachDisk(volumeID, hostName, readOnly) devicePath, err := attacher.awsVolumes.AttachDisk(volumeID, nodeName, readOnly)
if err != nil { if err != nil {
glog.Errorf("Error attaching volume %q: %+v", volumeID, err) glog.Errorf("Error attaching volume %q: %+v", volumeID, err)
return "", err return "", err
@ -185,24 +186,24 @@ func (plugin *awsElasticBlockStorePlugin) NewDetacher() (volume.Detacher, error)
}, nil }, nil
} }
func (detacher *awsElasticBlockStoreDetacher) Detach(deviceMountPath string, hostName string) error { func (detacher *awsElasticBlockStoreDetacher) Detach(deviceMountPath string, nodeName types.NodeName) error {
volumeID := path.Base(deviceMountPath) volumeID := path.Base(deviceMountPath)
attached, err := detacher.awsVolumes.DiskIsAttached(volumeID, hostName) attached, err := detacher.awsVolumes.DiskIsAttached(volumeID, nodeName)
if err != nil { if err != nil {
// Log error and continue with detach // Log error and continue with detach
glog.Errorf( glog.Errorf(
"Error checking if volume (%q) is already attached to current node (%q). Will continue and try detach anyway. err=%v", "Error checking if volume (%q) is already attached to current node (%q). Will continue and try detach anyway. err=%v",
volumeID, hostName, err) volumeID, nodeName, err)
} }
if err == nil && !attached { if err == nil && !attached {
// Volume is already detached from node. // Volume is already detached from node.
glog.Infof("detach operation was successful. volume %q is already detached from node %q.", volumeID, hostName) glog.Infof("detach operation was successful. volume %q is already detached from node %q.", volumeID, nodeName)
return nil return nil
} }
if _, err = detacher.awsVolumes.DetachDisk(volumeID, hostName); err != nil { if _, err = detacher.awsVolumes.DetachDisk(volumeID, nodeName); err != nil {
glog.Errorf("Error detaching volumeID %q: %v", volumeID, err) glog.Errorf("Error detaching volumeID %q: %v", volumeID, err)
return err return err
} }

View File

@ -26,6 +26,7 @@ import (
volumetest "k8s.io/kubernetes/pkg/volume/testing" volumetest "k8s.io/kubernetes/pkg/volume/testing"
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/types"
) )
func TestGetDeviceName_Volume(t *testing.T) { func TestGetDeviceName_Volume(t *testing.T) {
@ -74,7 +75,7 @@ type testcase struct {
func TestAttachDetach(t *testing.T) { func TestAttachDetach(t *testing.T) {
diskName := "disk" diskName := "disk"
instanceID := "instance" nodeName := types.NodeName("instance")
readOnly := false readOnly := false
spec := createVolSpec(diskName, readOnly) spec := createVolSpec(diskName, readOnly)
attachError := errors.New("Fake attach error") attachError := errors.New("Fake attach error")
@ -84,10 +85,10 @@ func TestAttachDetach(t *testing.T) {
// Successful Attach call // Successful Attach call
{ {
name: "Attach_Positive", name: "Attach_Positive",
attach: attachCall{diskName, instanceID, readOnly, "/dev/sda", nil}, attach: attachCall{diskName, nodeName, readOnly, "/dev/sda", nil},
test: func(testcase *testcase) (string, error) { test: func(testcase *testcase) (string, error) {
attacher := newAttacher(testcase) attacher := newAttacher(testcase)
return attacher.Attach(spec, instanceID) return attacher.Attach(spec, nodeName)
}, },
expectedDevice: "/dev/sda", expectedDevice: "/dev/sda",
}, },
@ -95,10 +96,10 @@ func TestAttachDetach(t *testing.T) {
// Attach call fails // Attach call fails
{ {
name: "Attach_Negative", name: "Attach_Negative",
attach: attachCall{diskName, instanceID, readOnly, "", attachError}, attach: attachCall{diskName, nodeName, readOnly, "", attachError},
test: func(testcase *testcase) (string, error) { test: func(testcase *testcase) (string, error) {
attacher := newAttacher(testcase) attacher := newAttacher(testcase)
return attacher.Attach(spec, instanceID) return attacher.Attach(spec, nodeName)
}, },
expectedError: attachError, expectedError: attachError,
}, },
@ -106,43 +107,43 @@ func TestAttachDetach(t *testing.T) {
// Detach succeeds // Detach succeeds
{ {
name: "Detach_Positive", name: "Detach_Positive",
diskIsAttached: diskIsAttachedCall{diskName, instanceID, true, nil}, diskIsAttached: diskIsAttachedCall{diskName, nodeName, true, nil},
detach: detachCall{diskName, instanceID, "/dev/sda", nil}, detach: detachCall{diskName, nodeName, "/dev/sda", nil},
test: func(testcase *testcase) (string, error) { test: func(testcase *testcase) (string, error) {
detacher := newDetacher(testcase) detacher := newDetacher(testcase)
return "", detacher.Detach(diskName, instanceID) return "", detacher.Detach(diskName, nodeName)
}, },
}, },
// Disk is already detached // Disk is already detached
{ {
name: "Detach_Positive_AlreadyDetached", name: "Detach_Positive_AlreadyDetached",
diskIsAttached: diskIsAttachedCall{diskName, instanceID, false, nil}, diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, nil},
test: func(testcase *testcase) (string, error) { test: func(testcase *testcase) (string, error) {
detacher := newDetacher(testcase) detacher := newDetacher(testcase)
return "", detacher.Detach(diskName, instanceID) return "", detacher.Detach(diskName, nodeName)
}, },
}, },
// Detach succeeds when DiskIsAttached fails // Detach succeeds when DiskIsAttached fails
{ {
name: "Detach_Positive_CheckFails", name: "Detach_Positive_CheckFails",
diskIsAttached: diskIsAttachedCall{diskName, instanceID, false, diskCheckError}, diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, diskCheckError},
detach: detachCall{diskName, instanceID, "/dev/sda", nil}, detach: detachCall{diskName, nodeName, "/dev/sda", nil},
test: func(testcase *testcase) (string, error) { test: func(testcase *testcase) (string, error) {
detacher := newDetacher(testcase) detacher := newDetacher(testcase)
return "", detacher.Detach(diskName, instanceID) return "", detacher.Detach(diskName, nodeName)
}, },
}, },
// Detach fails // Detach fails
{ {
name: "Detach_Negative", name: "Detach_Negative",
diskIsAttached: diskIsAttachedCall{diskName, instanceID, false, diskCheckError}, diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, diskCheckError},
detach: detachCall{diskName, instanceID, "", detachError}, detach: detachCall{diskName, nodeName, "", detachError},
test: func(testcase *testcase) (string, error) { test: func(testcase *testcase) (string, error) {
detacher := newDetacher(testcase) detacher := newDetacher(testcase)
return "", detacher.Detach(diskName, instanceID) return "", detacher.Detach(diskName, nodeName)
}, },
expectedError: detachError, expectedError: detachError,
}, },
@ -216,7 +217,7 @@ func createPVSpec(name string, readOnly bool) *volume.Spec {
type attachCall struct { type attachCall struct {
diskName string diskName string
instanceID string nodeName types.NodeName
readOnly bool readOnly bool
retDeviceName string retDeviceName string
ret error ret error
@ -224,21 +225,22 @@ type attachCall struct {
type detachCall struct { type detachCall struct {
diskName string diskName string
instanceID string nodeName types.NodeName
retDeviceName string retDeviceName string
ret error ret error
} }
type diskIsAttachedCall struct { type diskIsAttachedCall struct {
diskName, instanceID string diskName string
isAttached bool nodeName types.NodeName
ret error isAttached bool
ret error
} }
func (testcase *testcase) AttachDisk(diskName string, instanceID string, readOnly bool) (string, error) { func (testcase *testcase) AttachDisk(diskName string, nodeName types.NodeName, readOnly bool) (string, error) {
expected := &testcase.attach expected := &testcase.attach
if expected.diskName == "" && expected.instanceID == "" { if expected.diskName == "" && expected.nodeName == "" {
// testcase.attach looks uninitialized, test did not expect to call // testcase.attach looks uninitialized, test did not expect to call
// AttachDisk // AttachDisk
testcase.t.Errorf("Unexpected AttachDisk call!") testcase.t.Errorf("Unexpected AttachDisk call!")
@ -250,9 +252,9 @@ func (testcase *testcase) AttachDisk(diskName string, instanceID string, readOnl
return "", errors.New("Unexpected AttachDisk call: wrong diskName") return "", errors.New("Unexpected AttachDisk call: wrong diskName")
} }
if expected.instanceID != instanceID { if expected.nodeName != nodeName {
testcase.t.Errorf("Unexpected AttachDisk call: expected instanceID %s, got %s", expected.instanceID, instanceID) testcase.t.Errorf("Unexpected AttachDisk call: expected nodeName %s, got %s", expected.nodeName, nodeName)
return "", errors.New("Unexpected AttachDisk call: wrong instanceID") return "", errors.New("Unexpected AttachDisk call: wrong nodeName")
} }
if expected.readOnly != readOnly { if expected.readOnly != readOnly {
@ -260,15 +262,15 @@ func (testcase *testcase) AttachDisk(diskName string, instanceID string, readOnl
return "", errors.New("Unexpected AttachDisk call: wrong readOnly") return "", errors.New("Unexpected AttachDisk call: wrong readOnly")
} }
glog.V(4).Infof("AttachDisk call: %s, %s, %v, returning %q, %v", diskName, instanceID, readOnly, expected.retDeviceName, expected.ret) glog.V(4).Infof("AttachDisk call: %s, %s, %v, returning %q, %v", diskName, nodeName, readOnly, expected.retDeviceName, expected.ret)
return expected.retDeviceName, expected.ret return expected.retDeviceName, expected.ret
} }
func (testcase *testcase) DetachDisk(diskName string, instanceID string) (string, error) { func (testcase *testcase) DetachDisk(diskName string, nodeName types.NodeName) (string, error) {
expected := &testcase.detach expected := &testcase.detach
if expected.diskName == "" && expected.instanceID == "" { if expected.diskName == "" && expected.nodeName == "" {
// testcase.detach looks uninitialized, test did not expect to call // testcase.detach looks uninitialized, test did not expect to call
// DetachDisk // DetachDisk
testcase.t.Errorf("Unexpected DetachDisk call!") testcase.t.Errorf("Unexpected DetachDisk call!")
@ -280,20 +282,20 @@ func (testcase *testcase) DetachDisk(diskName string, instanceID string) (string
return "", errors.New("Unexpected DetachDisk call: wrong diskName") return "", errors.New("Unexpected DetachDisk call: wrong diskName")
} }
if expected.instanceID != instanceID { if expected.nodeName != nodeName {
testcase.t.Errorf("Unexpected DetachDisk call: expected instanceID %s, got %s", expected.instanceID, instanceID) testcase.t.Errorf("Unexpected DetachDisk call: expected nodeName %s, got %s", expected.nodeName, nodeName)
return "", errors.New("Unexpected DetachDisk call: wrong instanceID") return "", errors.New("Unexpected DetachDisk call: wrong nodeName")
} }
glog.V(4).Infof("DetachDisk call: %s, %s, returning %q, %v", diskName, instanceID, expected.retDeviceName, expected.ret) glog.V(4).Infof("DetachDisk call: %s, %s, returning %q, %v", diskName, nodeName, expected.retDeviceName, expected.ret)
return expected.retDeviceName, expected.ret return expected.retDeviceName, expected.ret
} }
func (testcase *testcase) DiskIsAttached(diskName, instanceID string) (bool, error) { func (testcase *testcase) DiskIsAttached(diskName string, nodeName types.NodeName) (bool, error) {
expected := &testcase.diskIsAttached expected := &testcase.diskIsAttached
if expected.diskName == "" && expected.instanceID == "" { if expected.diskName == "" && expected.nodeName == "" {
// testcase.diskIsAttached looks uninitialized, test did not expect to // testcase.diskIsAttached looks uninitialized, test did not expect to
// call DiskIsAttached // call DiskIsAttached
testcase.t.Errorf("Unexpected DiskIsAttached call!") testcase.t.Errorf("Unexpected DiskIsAttached call!")
@ -305,12 +307,12 @@ func (testcase *testcase) DiskIsAttached(diskName, instanceID string) (bool, err
return false, errors.New("Unexpected DiskIsAttached call: wrong diskName") return false, errors.New("Unexpected DiskIsAttached call: wrong diskName")
} }
if expected.instanceID != instanceID { if expected.nodeName != nodeName {
testcase.t.Errorf("Unexpected DiskIsAttached call: expected instanceID %s, got %s", expected.instanceID, instanceID) testcase.t.Errorf("Unexpected DiskIsAttached call: expected nodeName %s, got %s", expected.nodeName, nodeName)
return false, errors.New("Unexpected DiskIsAttached call: wrong instanceID") return false, errors.New("Unexpected DiskIsAttached call: wrong nodeName")
} }
glog.V(4).Infof("DiskIsAttached call: %s, %s, returning %v, %v", diskName, instanceID, expected.isAttached, expected.ret) glog.V(4).Infof("DiskIsAttached call: %s, %s, returning %v, %v", diskName, nodeName, expected.isAttached, expected.ret)
return expected.isAttached, expected.ret return expected.isAttached, expected.ret
} }

View File

@ -27,6 +27,7 @@ import (
"github.com/Azure/azure-sdk-for-go/arm/compute" "github.com/Azure/azure-sdk-for-go/arm/compute"
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/exec" "k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/keymutex" "k8s.io/kubernetes/pkg/util/keymutex"
"k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/mount"
@ -65,23 +66,23 @@ func (plugin *azureDataDiskPlugin) NewAttacher() (volume.Attacher, error) {
}, nil }, nil
} }
// Attach attaches a volume.Spec to a Azure VM referenced by hostname, returning the disk's LUN // Attach attaches a volume.Spec to a Azure VM referenced by NodeName, returning the disk's LUN
func (attacher *azureDiskAttacher) Attach(spec *volume.Spec, hostName string) (string, error) { func (attacher *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
volumeSource, err := getVolumeSource(spec) volumeSource, err := getVolumeSource(spec)
if err != nil { if err != nil {
glog.Warningf("failed to get azure disk spec") glog.Warningf("failed to get azure disk spec")
return "", err return "", err
} }
instanceid, err := attacher.azureProvider.InstanceID(hostName) instanceid, err := attacher.azureProvider.InstanceID(nodeName)
if err != nil { if err != nil {
glog.Warningf("failed to get azure instance id") glog.Warningf("failed to get azure instance id")
return "", fmt.Errorf("failed to get azure instance id for host %q", hostName) return "", fmt.Errorf("failed to get azure instance id for node %q", nodeName)
} }
if ind := strings.LastIndex(instanceid, "/"); ind >= 0 { if ind := strings.LastIndex(instanceid, "/"); ind >= 0 {
instanceid = instanceid[(ind + 1):] instanceid = instanceid[(ind + 1):]
} }
lun, err := attacher.azureProvider.GetDiskLun(volumeSource.DiskName, volumeSource.DataDiskURI, instanceid) lun, err := attacher.azureProvider.GetDiskLun(volumeSource.DiskName, volumeSource.DataDiskURI, nodeName)
if err == cloudprovider.InstanceNotFound { if err == cloudprovider.InstanceNotFound {
// Log error and continue with attach // Log error and continue with attach
glog.Warningf( glog.Warningf(
@ -96,15 +97,15 @@ func (attacher *azureDiskAttacher) Attach(spec *volume.Spec, hostName string) (s
getLunMutex.LockKey(instanceid) getLunMutex.LockKey(instanceid)
defer getLunMutex.UnlockKey(instanceid) defer getLunMutex.UnlockKey(instanceid)
lun, err = attacher.azureProvider.GetNextDiskLun(instanceid) lun, err = attacher.azureProvider.GetNextDiskLun(nodeName)
if err != nil { if err != nil {
glog.Warningf("no LUN available for instance %q", instanceid) glog.Warningf("no LUN available for instance %q", nodeName)
return "", fmt.Errorf("all LUNs are used, cannot attach volume %q to instance %q", volumeSource.DiskName, instanceid) return "", fmt.Errorf("all LUNs are used, cannot attach volume %q to instance %q", volumeSource.DiskName, instanceid)
} }
err = attacher.azureProvider.AttachDisk(volumeSource.DiskName, volumeSource.DataDiskURI, instanceid, lun, compute.CachingTypes(*volumeSource.CachingMode)) err = attacher.azureProvider.AttachDisk(volumeSource.DiskName, volumeSource.DataDiskURI, nodeName, lun, compute.CachingTypes(*volumeSource.CachingMode))
if err == nil { if err == nil {
glog.V(4).Infof("Attach operation successful: volume %q attached to node %q.", volumeSource.DataDiskURI, instanceid) glog.V(4).Infof("Attach operation successful: volume %q attached to node %q.", volumeSource.DataDiskURI, nodeName)
} else { } else {
glog.V(2).Infof("Attach volume %q to instance %q failed with %v", volumeSource.DataDiskURI, instanceid, err) glog.V(2).Infof("Attach volume %q to instance %q failed with %v", volumeSource.DataDiskURI, instanceid, err)
return "", fmt.Errorf("Attach volume %q to instance %q failed with %v", volumeSource.DiskName, instanceid, err) return "", fmt.Errorf("Attach volume %q to instance %q failed with %v", volumeSource.DiskName, instanceid, err)
@ -213,21 +214,21 @@ func (plugin *azureDataDiskPlugin) NewDetacher() (volume.Detacher, error) {
} }
// Detach detaches disk from Azure VM. // Detach detaches disk from Azure VM.
func (detacher *azureDiskDetacher) Detach(diskName string, hostName string) error { func (detacher *azureDiskDetacher) Detach(diskName string, nodeName types.NodeName) error {
if diskName == "" { if diskName == "" {
return fmt.Errorf("invalid disk to detach: %q", diskName) return fmt.Errorf("invalid disk to detach: %q", diskName)
} }
instanceid, err := detacher.azureProvider.InstanceID(hostName) instanceid, err := detacher.azureProvider.InstanceID(nodeName)
if err != nil { if err != nil {
glog.Warningf("no instance id for host %q, skip detaching", hostName) glog.Warningf("no instance id for node %q, skip detaching", nodeName)
return nil return nil
} }
if ind := strings.LastIndex(instanceid, "/"); ind >= 0 { if ind := strings.LastIndex(instanceid, "/"); ind >= 0 {
instanceid = instanceid[(ind + 1):] instanceid = instanceid[(ind + 1):]
} }
glog.V(4).Infof("detach %v from host %q", diskName, instanceid) glog.V(4).Infof("detach %v from node %q", diskName, nodeName)
err = detacher.azureProvider.DetachDiskByName(diskName, "" /* diskURI */, instanceid) err = detacher.azureProvider.DetachDiskByName(diskName, "" /* diskURI */, nodeName)
if err != nil { if err != nil {
glog.Errorf("failed to detach azure disk %q, err %v", diskName, err) glog.Errorf("failed to detach azure disk %q, err %v", diskName, err)
} }

View File

@ -50,15 +50,15 @@ type azureDataDiskPlugin struct {
// azure cloud provider should implement it // azure cloud provider should implement it
type azureCloudProvider interface { type azureCloudProvider interface {
// Attaches the disk to the host machine. // Attaches the disk to the host machine.
AttachDisk(diskName, diskUri, vmName string, lun int32, cachingMode compute.CachingTypes) error AttachDisk(diskName, diskUri string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error
// Detaches the disk, identified by disk name or uri, from the host machine. // Detaches the disk, identified by disk name or uri, from the host machine.
DetachDiskByName(diskName, diskUri, vmName string) error DetachDiskByName(diskName, diskUri string, nodeName types.NodeName) error
// Get the LUN number of the disk that is attached to the host // Get the LUN number of the disk that is attached to the host
GetDiskLun(diskName, diskUri, vmName string) (int32, error) GetDiskLun(diskName, diskUri string, nodeName types.NodeName) (int32, error)
// Get the next available LUN number to attach a new VHD // Get the next available LUN number to attach a new VHD
GetNextDiskLun(vmName string) (int32, error) GetNextDiskLun(nodeName types.NodeName) (int32, error)
// InstanceID returns the cloud provider ID of the specified instance. // InstanceID returns the cloud provider ID of the specified instance.
InstanceID(name string) (string, error) InstanceID(nodeName types.NodeName) (string, error)
} }
var _ volume.VolumePlugin = &azureDataDiskPlugin{} var _ volume.VolumePlugin = &azureDataDiskPlugin{}

View File

@ -24,6 +24,7 @@ import (
"time" "time"
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/exec" "k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume"
@ -59,7 +60,7 @@ func (plugin *cinderPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string
return mount.GetMountRefs(mounter, deviceMountPath) return mount.GetMountRefs(mounter, deviceMountPath)
} }
func (attacher *cinderDiskAttacher) Attach(spec *volume.Spec, hostName string) (string, error) { func (attacher *cinderDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
volumeSource, _, err := getVolumeSource(spec) volumeSource, _, err := getVolumeSource(spec)
if err != nil { if err != nil {
return "", err return "", err
@ -71,7 +72,7 @@ func (attacher *cinderDiskAttacher) Attach(spec *volume.Spec, hostName string) (
if !res { if !res {
return "", fmt.Errorf("failed to list openstack instances") return "", fmt.Errorf("failed to list openstack instances")
} }
instanceid, err := instances.InstanceID(hostName) instanceid, err := instances.InstanceID(nodeName)
if err != nil { if err != nil {
return "", err return "", err
} }
@ -208,13 +209,13 @@ func (plugin *cinderPlugin) NewDetacher() (volume.Detacher, error) {
}, nil }, nil
} }
func (detacher *cinderDiskDetacher) Detach(deviceMountPath string, hostName string) error { func (detacher *cinderDiskDetacher) Detach(deviceMountPath string, nodeName types.NodeName) error {
volumeID := path.Base(deviceMountPath) volumeID := path.Base(deviceMountPath)
instances, res := detacher.cinderProvider.Instances() instances, res := detacher.cinderProvider.Instances()
if !res { if !res {
return fmt.Errorf("failed to list openstack instances") return fmt.Errorf("failed to list openstack instances")
} }
instanceid, err := instances.InstanceID(hostName) instanceid, err := instances.InstanceID(nodeName)
if ind := strings.LastIndex(instanceid, "/"); ind >= 0 { if ind := strings.LastIndex(instanceid, "/"); ind >= 0 {
instanceid = instanceid[(ind + 1):] instanceid = instanceid[(ind + 1):]
} }
@ -224,12 +225,12 @@ func (detacher *cinderDiskDetacher) Detach(deviceMountPath string, hostName stri
// Log error and continue with detach // Log error and continue with detach
glog.Errorf( glog.Errorf(
"Error checking if volume (%q) is already attached to current node (%q). Will continue and try detach anyway. err=%v", "Error checking if volume (%q) is already attached to current node (%q). Will continue and try detach anyway. err=%v",
volumeID, hostName, err) volumeID, nodeName, err)
} }
if err == nil && !attached { if err == nil && !attached {
// Volume is already detached from node. // Volume is already detached from node.
glog.Infof("detach operation was successful. volume %q is already detached from node %q.", volumeID, hostName) glog.Infof("detach operation was successful. volume %q is already detached from node %q.", volumeID, nodeName)
return nil return nil
} }

View File

@ -26,6 +26,7 @@ import (
volumetest "k8s.io/kubernetes/pkg/volume/testing" volumetest "k8s.io/kubernetes/pkg/volume/testing"
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/types"
) )
func TestGetDeviceName_Volume(t *testing.T) { func TestGetDeviceName_Volume(t *testing.T) {
@ -77,6 +78,7 @@ type testcase struct {
func TestAttachDetach(t *testing.T) { func TestAttachDetach(t *testing.T) {
diskName := "disk" diskName := "disk"
instanceID := "instance" instanceID := "instance"
nodeName := types.NodeName(instanceID)
readOnly := false readOnly := false
spec := createVolSpec(diskName, readOnly) spec := createVolSpec(diskName, readOnly)
attachError := errors.New("Fake attach error") attachError := errors.New("Fake attach error")
@ -93,7 +95,7 @@ func TestAttachDetach(t *testing.T) {
diskPath: diskPathCall{diskName, instanceID, "/dev/sda", nil}, diskPath: diskPathCall{diskName, instanceID, "/dev/sda", nil},
test: func(testcase *testcase) (string, error) { test: func(testcase *testcase) (string, error) {
attacher := newAttacher(testcase) attacher := newAttacher(testcase)
return attacher.Attach(spec, instanceID) return attacher.Attach(spec, nodeName)
}, },
expectedDevice: "/dev/sda", expectedDevice: "/dev/sda",
}, },
@ -106,7 +108,7 @@ func TestAttachDetach(t *testing.T) {
diskPath: diskPathCall{diskName, instanceID, "/dev/sda", nil}, diskPath: diskPathCall{diskName, instanceID, "/dev/sda", nil},
test: func(testcase *testcase) (string, error) { test: func(testcase *testcase) (string, error) {
attacher := newAttacher(testcase) attacher := newAttacher(testcase)
return attacher.Attach(spec, instanceID) return attacher.Attach(spec, nodeName)
}, },
expectedDevice: "/dev/sda", expectedDevice: "/dev/sda",
}, },
@ -120,7 +122,7 @@ func TestAttachDetach(t *testing.T) {
diskPath: diskPathCall{diskName, instanceID, "/dev/sda", nil}, diskPath: diskPathCall{diskName, instanceID, "/dev/sda", nil},
test: func(testcase *testcase) (string, error) { test: func(testcase *testcase) (string, error) {
attacher := newAttacher(testcase) attacher := newAttacher(testcase)
return attacher.Attach(spec, instanceID) return attacher.Attach(spec, nodeName)
}, },
expectedDevice: "/dev/sda", expectedDevice: "/dev/sda",
}, },
@ -133,7 +135,7 @@ func TestAttachDetach(t *testing.T) {
attach: attachCall{diskName, instanceID, "/dev/sda", attachError}, attach: attachCall{diskName, instanceID, "/dev/sda", attachError},
test: func(testcase *testcase) (string, error) { test: func(testcase *testcase) (string, error) {
attacher := newAttacher(testcase) attacher := newAttacher(testcase)
return attacher.Attach(spec, instanceID) return attacher.Attach(spec, nodeName)
}, },
expectedError: attachError, expectedError: attachError,
}, },
@ -147,7 +149,7 @@ func TestAttachDetach(t *testing.T) {
diskPath: diskPathCall{diskName, instanceID, "", diskPathError}, diskPath: diskPathCall{diskName, instanceID, "", diskPathError},
test: func(testcase *testcase) (string, error) { test: func(testcase *testcase) (string, error) {
attacher := newAttacher(testcase) attacher := newAttacher(testcase)
return attacher.Attach(spec, instanceID) return attacher.Attach(spec, nodeName)
}, },
expectedError: diskPathError, expectedError: diskPathError,
}, },
@ -160,7 +162,7 @@ func TestAttachDetach(t *testing.T) {
detach: detachCall{diskName, instanceID, nil}, detach: detachCall{diskName, instanceID, nil},
test: func(testcase *testcase) (string, error) { test: func(testcase *testcase) (string, error) {
detacher := newDetacher(testcase) detacher := newDetacher(testcase)
return "", detacher.Detach(diskName, instanceID) return "", detacher.Detach(diskName, nodeName)
}, },
}, },
@ -171,7 +173,7 @@ func TestAttachDetach(t *testing.T) {
diskIsAttached: diskIsAttachedCall{diskName, instanceID, false, nil}, diskIsAttached: diskIsAttachedCall{diskName, instanceID, false, nil},
test: func(testcase *testcase) (string, error) { test: func(testcase *testcase) (string, error) {
detacher := newDetacher(testcase) detacher := newDetacher(testcase)
return "", detacher.Detach(diskName, instanceID) return "", detacher.Detach(diskName, nodeName)
}, },
}, },
@ -183,7 +185,7 @@ func TestAttachDetach(t *testing.T) {
detach: detachCall{diskName, instanceID, nil}, detach: detachCall{diskName, instanceID, nil},
test: func(testcase *testcase) (string, error) { test: func(testcase *testcase) (string, error) {
detacher := newDetacher(testcase) detacher := newDetacher(testcase)
return "", detacher.Detach(diskName, instanceID) return "", detacher.Detach(diskName, nodeName)
}, },
}, },
@ -195,7 +197,7 @@ func TestAttachDetach(t *testing.T) {
detach: detachCall{diskName, instanceID, detachError}, detach: detachCall{diskName, instanceID, detachError},
test: func(testcase *testcase) (string, error) { test: func(testcase *testcase) (string, error) {
detacher := newDetacher(testcase) detacher := newDetacher(testcase)
return "", detacher.Detach(diskName, instanceID) return "", detacher.Detach(diskName, nodeName)
}, },
expectedError: detachError, expectedError: detachError,
}, },
@ -420,30 +422,30 @@ type instances struct {
instanceID string instanceID string
} }
func (instances *instances) NodeAddresses(name string) ([]api.NodeAddress, error) { func (instances *instances) NodeAddresses(name types.NodeName) ([]api.NodeAddress, error) {
return []api.NodeAddress{}, errors.New("Not implemented") return []api.NodeAddress{}, errors.New("Not implemented")
} }
func (instances *instances) ExternalID(name string) (string, error) { func (instances *instances) ExternalID(name types.NodeName) (string, error) {
return "", errors.New("Not implemented") return "", errors.New("Not implemented")
} }
func (instances *instances) InstanceID(name string) (string, error) { func (instances *instances) InstanceID(name types.NodeName) (string, error) {
return instances.instanceID, nil return instances.instanceID, nil
} }
func (instances *instances) InstanceType(name string) (string, error) { func (instances *instances) InstanceType(name types.NodeName) (string, error) {
return "", errors.New("Not implemented") return "", errors.New("Not implemented")
} }
func (instances *instances) List(filter string) ([]string, error) { func (instances *instances) List(filter string) ([]types.NodeName, error) {
return []string{}, errors.New("Not implemented") return []types.NodeName{}, errors.New("Not implemented")
} }
func (instances *instances) AddSSHKeyToAllInstances(user string, keyData []byte) error { func (instances *instances) AddSSHKeyToAllInstances(user string, keyData []byte) error {
return errors.New("Not implemented") return errors.New("Not implemented")
} }
func (instances *instances) CurrentNodeName(hostname string) (string, error) { func (instances *instances) CurrentNodeName(hostname string) (types.NodeName, error) {
return "", errors.New("Not implemented") return "", errors.New("Not implemented")
} }

View File

@ -26,6 +26,7 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/exec" "k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
@ -60,13 +61,13 @@ func (plugin *gcePersistentDiskPlugin) GetDeviceMountRefs(deviceMountPath string
} }
// Attach checks with the GCE cloud provider if the specified volume is already // Attach checks with the GCE cloud provider if the specified volume is already
// attached to the specified node. If the volume is attached, it succeeds // attached to the node with the specified Name.
// (returns nil). If it is not, Attach issues a call to the GCE cloud provider // If the volume is attached, it succeeds (returns nil).
// to attach it. // If it is not, Attach issues a call to the GCE cloud provider to attach it.
// Callers are responsible for retryinging on failure. // Callers are responsible for retrying on failure.
// Callers are responsible for thread safety between concurrent attach and // Callers are responsible for thread safety between concurrent attach and
// detach operations. // detach operations.
func (attacher *gcePersistentDiskAttacher) Attach(spec *volume.Spec, hostName string) (string, error) { func (attacher *gcePersistentDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
volumeSource, readOnly, err := getVolumeSource(spec) volumeSource, readOnly, err := getVolumeSource(spec)
if err != nil { if err != nil {
return "", err return "", err
@ -74,20 +75,20 @@ func (attacher *gcePersistentDiskAttacher) Attach(spec *volume.Spec, hostName st
pdName := volumeSource.PDName pdName := volumeSource.PDName
attached, err := attacher.gceDisks.DiskIsAttached(pdName, hostName) attached, err := attacher.gceDisks.DiskIsAttached(pdName, nodeName)
if err != nil { if err != nil {
// Log error and continue with attach // Log error and continue with attach
glog.Errorf( glog.Errorf(
"Error checking if PD (%q) is already attached to current node (%q). Will continue and try attach anyway. err=%v", "Error checking if PD (%q) is already attached to current node (%q). Will continue and try attach anyway. err=%v",
pdName, hostName, err) pdName, nodeName, err)
} }
if err == nil && attached { if err == nil && attached {
// Volume is already attached to node. // Volume is already attached to node.
glog.Infof("Attach operation is successful. PD %q is already attached to node %q.", pdName, hostName) glog.Infof("Attach operation is successful. PD %q is already attached to node %q.", pdName, nodeName)
} else { } else {
if err := attacher.gceDisks.AttachDisk(pdName, hostName, readOnly); err != nil { if err := attacher.gceDisks.AttachDisk(pdName, nodeName, readOnly); err != nil {
glog.Errorf("Error attaching PD %q to node %q: %+v", pdName, hostName, err) glog.Errorf("Error attaching PD %q to node %q: %+v", pdName, nodeName, err)
return "", err return "", err
} }
} }
@ -210,25 +211,25 @@ func (plugin *gcePersistentDiskPlugin) NewDetacher() (volume.Detacher, error) {
// Callers are responsible for retryinging on failure. // Callers are responsible for retryinging on failure.
// Callers are responsible for thread safety between concurrent attach and detach // Callers are responsible for thread safety between concurrent attach and detach
// operations. // operations.
func (detacher *gcePersistentDiskDetacher) Detach(deviceMountPath string, hostName string) error { func (detacher *gcePersistentDiskDetacher) Detach(deviceMountPath string, nodeName types.NodeName) error {
pdName := path.Base(deviceMountPath) pdName := path.Base(deviceMountPath)
attached, err := detacher.gceDisks.DiskIsAttached(pdName, hostName) attached, err := detacher.gceDisks.DiskIsAttached(pdName, nodeName)
if err != nil { if err != nil {
// Log error and continue with detach // Log error and continue with detach
glog.Errorf( glog.Errorf(
"Error checking if PD (%q) is already attached to current node (%q). Will continue and try detach anyway. err=%v", "Error checking if PD (%q) is already attached to current node (%q). Will continue and try detach anyway. err=%v",
pdName, hostName, err) pdName, nodeName, err)
} }
if err == nil && !attached { if err == nil && !attached {
// Volume is not attached to node. Success! // Volume is not attached to node. Success!
glog.Infof("Detach operation is successful. PD %q was not attached to node %q.", pdName, hostName) glog.Infof("Detach operation is successful. PD %q was not attached to node %q.", pdName, nodeName)
return nil return nil
} }
if err = detacher.gceDisks.DetachDisk(pdName, hostName); err != nil { if err = detacher.gceDisks.DetachDisk(pdName, nodeName); err != nil {
glog.Errorf("Error detaching PD %q from node %q: %v", pdName, hostName, err) glog.Errorf("Error detaching PD %q from node %q: %v", pdName, nodeName, err)
return err return err
} }

View File

@ -26,6 +26,7 @@ import (
volumetest "k8s.io/kubernetes/pkg/volume/testing" volumetest "k8s.io/kubernetes/pkg/volume/testing"
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/types"
) )
func TestGetDeviceName_Volume(t *testing.T) { func TestGetDeviceName_Volume(t *testing.T) {
@ -73,7 +74,7 @@ type testcase struct {
func TestAttachDetach(t *testing.T) { func TestAttachDetach(t *testing.T) {
diskName := "disk" diskName := "disk"
instanceID := "instance" nodeName := types.NodeName("instance")
readOnly := false readOnly := false
spec := createVolSpec(diskName, readOnly) spec := createVolSpec(diskName, readOnly)
attachError := errors.New("Fake attach error") attachError := errors.New("Fake attach error")
@ -83,11 +84,11 @@ func TestAttachDetach(t *testing.T) {
// Successful Attach call // Successful Attach call
{ {
name: "Attach_Positive", name: "Attach_Positive",
diskIsAttached: diskIsAttachedCall{diskName, instanceID, false, nil}, diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, nil},
attach: attachCall{diskName, instanceID, readOnly, nil}, attach: attachCall{diskName, nodeName, readOnly, nil},
test: func(testcase *testcase) error { test: func(testcase *testcase) error {
attacher := newAttacher(testcase) attacher := newAttacher(testcase)
devicePath, err := attacher.Attach(spec, instanceID) devicePath, err := attacher.Attach(spec, nodeName)
if devicePath != "/dev/disk/by-id/google-disk" { if devicePath != "/dev/disk/by-id/google-disk" {
return fmt.Errorf("devicePath incorrect. Expected<\"/dev/disk/by-id/google-disk\"> Actual: <%q>", devicePath) return fmt.Errorf("devicePath incorrect. Expected<\"/dev/disk/by-id/google-disk\"> Actual: <%q>", devicePath)
} }
@ -98,10 +99,10 @@ func TestAttachDetach(t *testing.T) {
// Disk is already attached // Disk is already attached
{ {
name: "Attach_Positive_AlreadyAttached", name: "Attach_Positive_AlreadyAttached",
diskIsAttached: diskIsAttachedCall{diskName, instanceID, true, nil}, diskIsAttached: diskIsAttachedCall{diskName, nodeName, true, nil},
test: func(testcase *testcase) error { test: func(testcase *testcase) error {
attacher := newAttacher(testcase) attacher := newAttacher(testcase)
devicePath, err := attacher.Attach(spec, instanceID) devicePath, err := attacher.Attach(spec, nodeName)
if devicePath != "/dev/disk/by-id/google-disk" { if devicePath != "/dev/disk/by-id/google-disk" {
return fmt.Errorf("devicePath incorrect. Expected<\"/dev/disk/by-id/google-disk\"> Actual: <%q>", devicePath) return fmt.Errorf("devicePath incorrect. Expected<\"/dev/disk/by-id/google-disk\"> Actual: <%q>", devicePath)
} }
@ -112,11 +113,11 @@ func TestAttachDetach(t *testing.T) {
// DiskIsAttached fails and Attach succeeds // DiskIsAttached fails and Attach succeeds
{ {
name: "Attach_Positive_CheckFails", name: "Attach_Positive_CheckFails",
diskIsAttached: diskIsAttachedCall{diskName, instanceID, false, diskCheckError}, diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, diskCheckError},
attach: attachCall{diskName, instanceID, readOnly, nil}, attach: attachCall{diskName, nodeName, readOnly, nil},
test: func(testcase *testcase) error { test: func(testcase *testcase) error {
attacher := newAttacher(testcase) attacher := newAttacher(testcase)
devicePath, err := attacher.Attach(spec, instanceID) devicePath, err := attacher.Attach(spec, nodeName)
if devicePath != "/dev/disk/by-id/google-disk" { if devicePath != "/dev/disk/by-id/google-disk" {
return fmt.Errorf("devicePath incorrect. Expected<\"/dev/disk/by-id/google-disk\"> Actual: <%q>", devicePath) return fmt.Errorf("devicePath incorrect. Expected<\"/dev/disk/by-id/google-disk\"> Actual: <%q>", devicePath)
} }
@ -127,11 +128,11 @@ func TestAttachDetach(t *testing.T) {
// Attach call fails // Attach call fails
{ {
name: "Attach_Negative", name: "Attach_Negative",
diskIsAttached: diskIsAttachedCall{diskName, instanceID, false, diskCheckError}, diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, diskCheckError},
attach: attachCall{diskName, instanceID, readOnly, attachError}, attach: attachCall{diskName, nodeName, readOnly, attachError},
test: func(testcase *testcase) error { test: func(testcase *testcase) error {
attacher := newAttacher(testcase) attacher := newAttacher(testcase)
devicePath, err := attacher.Attach(spec, instanceID) devicePath, err := attacher.Attach(spec, nodeName)
if devicePath != "" { if devicePath != "" {
return fmt.Errorf("devicePath incorrect. Expected<\"\"> Actual: <%q>", devicePath) return fmt.Errorf("devicePath incorrect. Expected<\"\"> Actual: <%q>", devicePath)
} }
@ -143,43 +144,43 @@ func TestAttachDetach(t *testing.T) {
// Detach succeeds // Detach succeeds
{ {
name: "Detach_Positive", name: "Detach_Positive",
diskIsAttached: diskIsAttachedCall{diskName, instanceID, true, nil}, diskIsAttached: diskIsAttachedCall{diskName, nodeName, true, nil},
detach: detachCall{diskName, instanceID, nil}, detach: detachCall{diskName, nodeName, nil},
test: func(testcase *testcase) error { test: func(testcase *testcase) error {
detacher := newDetacher(testcase) detacher := newDetacher(testcase)
return detacher.Detach(diskName, instanceID) return detacher.Detach(diskName, nodeName)
}, },
}, },
// Disk is already detached // Disk is already detached
{ {
name: "Detach_Positive_AlreadyDetached", name: "Detach_Positive_AlreadyDetached",
diskIsAttached: diskIsAttachedCall{diskName, instanceID, false, nil}, diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, nil},
test: func(testcase *testcase) error { test: func(testcase *testcase) error {
detacher := newDetacher(testcase) detacher := newDetacher(testcase)
return detacher.Detach(diskName, instanceID) return detacher.Detach(diskName, nodeName)
}, },
}, },
// Detach succeeds when DiskIsAttached fails // Detach succeeds when DiskIsAttached fails
{ {
name: "Detach_Positive_CheckFails", name: "Detach_Positive_CheckFails",
diskIsAttached: diskIsAttachedCall{diskName, instanceID, false, diskCheckError}, diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, diskCheckError},
detach: detachCall{diskName, instanceID, nil}, detach: detachCall{diskName, nodeName, nil},
test: func(testcase *testcase) error { test: func(testcase *testcase) error {
detacher := newDetacher(testcase) detacher := newDetacher(testcase)
return detacher.Detach(diskName, instanceID) return detacher.Detach(diskName, nodeName)
}, },
}, },
// Detach fails // Detach fails
{ {
name: "Detach_Negative", name: "Detach_Negative",
diskIsAttached: diskIsAttachedCall{diskName, instanceID, false, diskCheckError}, diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, diskCheckError},
detach: detachCall{diskName, instanceID, detachError}, detach: detachCall{diskName, nodeName, detachError},
test: func(testcase *testcase) error { test: func(testcase *testcase) error {
detacher := newDetacher(testcase) detacher := newDetacher(testcase)
return detacher.Detach(diskName, instanceID) return detacher.Detach(diskName, nodeName)
}, },
expectedReturn: detachError, expectedReturn: detachError,
}, },
@ -253,28 +254,29 @@ func createPVSpec(name string, readOnly bool) *volume.Spec {
// Fake GCE implementation // Fake GCE implementation
type attachCall struct { type attachCall struct {
diskName string diskName string
instanceID string nodeName types.NodeName
readOnly bool readOnly bool
ret error ret error
} }
type detachCall struct { type detachCall struct {
devicePath string devicePath string
instanceID string nodeName types.NodeName
ret error ret error
} }
type diskIsAttachedCall struct { type diskIsAttachedCall struct {
diskName, instanceID string diskName string
isAttached bool nodeName types.NodeName
ret error isAttached bool
ret error
} }
func (testcase *testcase) AttachDisk(diskName, instanceID string, readOnly bool) error { func (testcase *testcase) AttachDisk(diskName string, nodeName types.NodeName, readOnly bool) error {
expected := &testcase.attach expected := &testcase.attach
if expected.diskName == "" && expected.instanceID == "" { if expected.diskName == "" && expected.nodeName == "" {
// testcase.attach looks uninitialized, test did not expect to call // testcase.attach looks uninitialized, test did not expect to call
// AttachDisk // AttachDisk
testcase.t.Errorf("Unexpected AttachDisk call!") testcase.t.Errorf("Unexpected AttachDisk call!")
@ -286,9 +288,9 @@ func (testcase *testcase) AttachDisk(diskName, instanceID string, readOnly bool)
return errors.New("Unexpected AttachDisk call: wrong diskName") return errors.New("Unexpected AttachDisk call: wrong diskName")
} }
if expected.instanceID != instanceID { if expected.nodeName != nodeName {
testcase.t.Errorf("Unexpected AttachDisk call: expected instanceID %s, got %s", expected.instanceID, instanceID) testcase.t.Errorf("Unexpected AttachDisk call: expected nodeName %s, got %s", expected.nodeName, nodeName)
return errors.New("Unexpected AttachDisk call: wrong instanceID") return errors.New("Unexpected AttachDisk call: wrong nodeName")
} }
if expected.readOnly != readOnly { if expected.readOnly != readOnly {
@ -296,15 +298,15 @@ func (testcase *testcase) AttachDisk(diskName, instanceID string, readOnly bool)
return errors.New("Unexpected AttachDisk call: wrong readOnly") return errors.New("Unexpected AttachDisk call: wrong readOnly")
} }
glog.V(4).Infof("AttachDisk call: %s, %s, %v, returning %v", diskName, instanceID, readOnly, expected.ret) glog.V(4).Infof("AttachDisk call: %s, %s, %v, returning %v", diskName, nodeName, readOnly, expected.ret)
return expected.ret return expected.ret
} }
func (testcase *testcase) DetachDisk(devicePath, instanceID string) error { func (testcase *testcase) DetachDisk(devicePath string, nodeName types.NodeName) error {
expected := &testcase.detach expected := &testcase.detach
if expected.devicePath == "" && expected.instanceID == "" { if expected.devicePath == "" && expected.nodeName == "" {
// testcase.detach looks uninitialized, test did not expect to call // testcase.detach looks uninitialized, test did not expect to call
// DetachDisk // DetachDisk
testcase.t.Errorf("Unexpected DetachDisk call!") testcase.t.Errorf("Unexpected DetachDisk call!")
@ -316,20 +318,20 @@ func (testcase *testcase) DetachDisk(devicePath, instanceID string) error {
return errors.New("Unexpected DetachDisk call: wrong diskName") return errors.New("Unexpected DetachDisk call: wrong diskName")
} }
if expected.instanceID != instanceID { if expected.nodeName != nodeName {
testcase.t.Errorf("Unexpected DetachDisk call: expected instanceID %s, got %s", expected.instanceID, instanceID) testcase.t.Errorf("Unexpected DetachDisk call: expected nodeName %s, got %s", expected.nodeName, nodeName)
return errors.New("Unexpected DetachDisk call: wrong instanceID") return errors.New("Unexpected DetachDisk call: wrong nodeName")
} }
glog.V(4).Infof("DetachDisk call: %s, %s, returning %v", devicePath, instanceID, expected.ret) glog.V(4).Infof("DetachDisk call: %s, %s, returning %v", devicePath, nodeName, expected.ret)
return expected.ret return expected.ret
} }
func (testcase *testcase) DiskIsAttached(diskName, instanceID string) (bool, error) { func (testcase *testcase) DiskIsAttached(diskName string, nodeName types.NodeName) (bool, error) {
expected := &testcase.diskIsAttached expected := &testcase.diskIsAttached
if expected.diskName == "" && expected.instanceID == "" { if expected.diskName == "" && expected.nodeName == "" {
// testcase.diskIsAttached looks uninitialized, test did not expect to // testcase.diskIsAttached looks uninitialized, test did not expect to
// call DiskIsAttached // call DiskIsAttached
testcase.t.Errorf("Unexpected DiskIsAttached call!") testcase.t.Errorf("Unexpected DiskIsAttached call!")
@ -341,12 +343,12 @@ func (testcase *testcase) DiskIsAttached(diskName, instanceID string) (bool, err
return false, errors.New("Unexpected DiskIsAttached call: wrong diskName") return false, errors.New("Unexpected DiskIsAttached call: wrong diskName")
} }
if expected.instanceID != instanceID { if expected.nodeName != nodeName {
testcase.t.Errorf("Unexpected DiskIsAttached call: expected instanceID %s, got %s", expected.instanceID, instanceID) testcase.t.Errorf("Unexpected DiskIsAttached call: expected nodeName %s, got %s", expected.nodeName, nodeName)
return false, errors.New("Unexpected DiskIsAttached call: wrong instanceID") return false, errors.New("Unexpected DiskIsAttached call: wrong nodeName")
} }
glog.V(4).Infof("DiskIsAttached call: %s, %s, returning %v, %v", diskName, instanceID, expected.isAttached, expected.ret) glog.V(4).Infof("DiskIsAttached call: %s, %s, returning %v, %v", diskName, nodeName, expected.isAttached, expected.ret)
return expected.isAttached, expected.ret return expected.isAttached, expected.ret
} }

View File

@ -370,7 +370,7 @@ func (fv *FakeVolume) TearDownAt(dir string) error {
return os.RemoveAll(dir) return os.RemoveAll(dir)
} }
func (fv *FakeVolume) Attach(spec *Spec, hostName string) (string, error) { func (fv *FakeVolume) Attach(spec *Spec, nodeName types.NodeName) (string, error) {
fv.Lock() fv.Lock()
defer fv.Unlock() defer fv.Unlock()
fv.AttachCallCount++ fv.AttachCallCount++
@ -416,7 +416,7 @@ func (fv *FakeVolume) GetMountDeviceCallCount() int {
return fv.MountDeviceCallCount return fv.MountDeviceCallCount
} }
func (fv *FakeVolume) Detach(deviceMountPath string, hostName string) error { func (fv *FakeVolume) Detach(deviceMountPath string, nodeName types.NodeName) error {
fv.Lock() fv.Lock()
defer fv.Unlock() defer fv.Unlock()
fv.DetachCallCount++ fv.DetachCallCount++

View File

@ -101,7 +101,7 @@ type OperationExecutor interface {
// If the volume is not found or there is an error (fetching the node // If the volume is not found or there is an error (fetching the node
// object, for example) then an error is returned which triggers exponential // object, for example) then an error is returned which triggers exponential
// back off on retries. // back off on retries.
VerifyControllerAttachedVolume(volumeToMount VolumeToMount, nodeName string, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error VerifyControllerAttachedVolume(volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error
// IsOperationPending returns true if an operation for the given volumeName and podName is pending, // IsOperationPending returns true if an operation for the given volumeName and podName is pending,
// otherwise it returns false // otherwise it returns false
@ -149,18 +149,18 @@ type ActualStateOfWorldAttacherUpdater interface {
// TODO: in the future, we should be able to remove the volumeName // TODO: in the future, we should be able to remove the volumeName
// argument to this method -- since it is used only for attachable // argument to this method -- since it is used only for attachable
// volumes. See issue 29695. // volumes. See issue 29695.
MarkVolumeAsAttached(volumeName api.UniqueVolumeName, volumeSpec *volume.Spec, nodeName, devicePath string) error MarkVolumeAsAttached(volumeName api.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) error
// Marks the specified volume as detached from the specified node // Marks the specified volume as detached from the specified node
MarkVolumeAsDetached(volumeName api.UniqueVolumeName, nodeName string) MarkVolumeAsDetached(volumeName api.UniqueVolumeName, nodeName types.NodeName)
// Marks desire to detach the specified volume (remove the volume from the node's // Marks desire to detach the specified volume (remove the volume from the node's
// volumesToReportedAsAttached list) // volumesToReportedAsAttached list)
RemoveVolumeFromReportAsAttached(volumeName api.UniqueVolumeName, nodeName string) error RemoveVolumeFromReportAsAttached(volumeName api.UniqueVolumeName, nodeName types.NodeName) error
// Unmarks the desire to detach for the specified volume (add the volume back to // Unmarks the desire to detach for the specified volume (add the volume back to
// the node's volumesToReportedAsAttached list) // the node's volumesToReportedAsAttached list)
AddVolumeToReportAsAttached(volumeName api.UniqueVolumeName, nodeName string) AddVolumeToReportAsAttached(volumeName api.UniqueVolumeName, nodeName types.NodeName)
} }
// VolumeToAttach represents a volume that should be attached to a node. // VolumeToAttach represents a volume that should be attached to a node.
@ -175,7 +175,7 @@ type VolumeToAttach struct {
// NodeName is the identifier for the node that the volume should be // NodeName is the identifier for the node that the volume should be
// attached to. // attached to.
NodeName string NodeName types.NodeName
// scheduledPods is a map containing the set of pods that reference this // scheduledPods is a map containing the set of pods that reference this
// volume and are scheduled to the underlying node. The key in the map is // volume and are scheduled to the underlying node. The key in the map is
@ -234,7 +234,7 @@ type AttachedVolume struct {
VolumeSpec *volume.Spec VolumeSpec *volume.Spec
// NodeName is the identifier for the node that the volume is attached to. // NodeName is the identifier for the node that the volume is attached to.
NodeName string NodeName types.NodeName
// PluginIsAttachable indicates that the plugin for this volume implements // PluginIsAttachable indicates that the plugin for this volume implements
// the volume.Attacher interface // the volume.Attacher interface
@ -453,7 +453,7 @@ func (oe *operationExecutor) UnmountDevice(
func (oe *operationExecutor) VerifyControllerAttachedVolume( func (oe *operationExecutor) VerifyControllerAttachedVolume(
volumeToMount VolumeToMount, volumeToMount VolumeToMount,
nodeName string, nodeName types.NodeName,
actualStateOfWorld ActualStateOfWorldAttacherUpdater) error { actualStateOfWorld ActualStateOfWorldAttacherUpdater) error {
verifyControllerAttachedVolumeFunc, err := verifyControllerAttachedVolumeFunc, err :=
oe.generateVerifyControllerAttachedVolumeFunc(volumeToMount, nodeName, actualStateOfWorld) oe.generateVerifyControllerAttachedVolumeFunc(volumeToMount, nodeName, actualStateOfWorld)
@ -605,7 +605,7 @@ func (oe *operationExecutor) generateDetachVolumeFunc(
func (oe *operationExecutor) verifyVolumeIsSafeToDetach( func (oe *operationExecutor) verifyVolumeIsSafeToDetach(
volumeToDetach AttachedVolume) error { volumeToDetach AttachedVolume) error {
// Fetch current node object // Fetch current node object
node, fetchErr := oe.kubeClient.Core().Nodes().Get(volumeToDetach.NodeName) node, fetchErr := oe.kubeClient.Core().Nodes().Get(string(volumeToDetach.NodeName))
if fetchErr != nil { if fetchErr != nil {
if errors.IsNotFound(fetchErr) { if errors.IsNotFound(fetchErr) {
glog.Warningf("Node %q not found on API server. DetachVolume will skip safe to detach check.", glog.Warningf("Node %q not found on API server. DetachVolume will skip safe to detach check.",
@ -1001,7 +1001,7 @@ func (oe *operationExecutor) generateUnmountDeviceFunc(
func (oe *operationExecutor) generateVerifyControllerAttachedVolumeFunc( func (oe *operationExecutor) generateVerifyControllerAttachedVolumeFunc(
volumeToMount VolumeToMount, volumeToMount VolumeToMount,
nodeName string, nodeName types.NodeName,
actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, error) { actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, error) {
return func() error { return func() error {
if !volumeToMount.PluginIsAttachable { if !volumeToMount.PluginIsAttachable {
@ -1040,7 +1040,7 @@ func (oe *operationExecutor) generateVerifyControllerAttachedVolumeFunc(
} }
// Fetch current node object // Fetch current node object
node, fetchErr := oe.kubeClient.Core().Nodes().Get(nodeName) node, fetchErr := oe.kubeClient.Core().Nodes().Get(string(nodeName))
if fetchErr != nil { if fetchErr != nil {
// On failure, return error. Caller will log and retry. // On failure, return error. Caller will log and retry.
return fmt.Errorf( return fmt.Errorf(

View File

@ -24,6 +24,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/types"
) )
// Volume represents a directory used by pods or hosts on a node. All method // Volume represents a directory used by pods or hosts on a node. All method
@ -140,10 +141,10 @@ type Deleter interface {
// Attacher can attach a volume to a node. // Attacher can attach a volume to a node.
type Attacher interface { type Attacher interface {
// Attaches the volume specified by the given spec to the given host. // Attaches the volume specified by the given spec to the node with the given Name.
// On success, returns the device path where the device was attached on the // On success, returns the device path where the device was attached on the
// node. // node.
Attach(spec *Spec, hostName string) (string, error) Attach(spec *Spec, nodeName types.NodeName) (string, error)
// WaitForAttach blocks until the device is attached to this // WaitForAttach blocks until the device is attached to this
// node. If it successfully attaches, the path to the device // node. If it successfully attaches, the path to the device
@ -163,8 +164,8 @@ type Attacher interface {
// Detacher can detach a volume from a node. // Detacher can detach a volume from a node.
type Detacher interface { type Detacher interface {
// Detach the given device from the given host. // Detach the given device from the node with the given Name.
Detach(deviceName, hostName string) error Detach(deviceName string, nodeName types.NodeName) error
// WaitForDetach blocks until the device is detached from this // WaitForDetach blocks until the device is detached from this
// node. If the device does not detach within the given timeout // node. If the device does not detach within the given timeout

View File

@ -24,6 +24,7 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere" "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/exec" "k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/keymutex" "k8s.io/kubernetes/pkg/util/keymutex"
"k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/mount"
@ -60,21 +61,21 @@ func (plugin *vsphereVolumePlugin) NewAttacher() (volume.Attacher, error) {
// Callers are responsible for retryinging on failure. // Callers are responsible for retryinging on failure.
// Callers are responsible for thread safety between concurrent attach and // Callers are responsible for thread safety between concurrent attach and
// detach operations. // detach operations.
func (attacher *vsphereVMDKAttacher) Attach(spec *volume.Spec, hostName string) (string, error) { func (attacher *vsphereVMDKAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
volumeSource, _, err := getVolumeSource(spec) volumeSource, _, err := getVolumeSource(spec)
if err != nil { if err != nil {
return "", err return "", err
} }
glog.V(4).Infof("vSphere: Attach disk called for host %s", hostName) glog.V(4).Infof("vSphere: Attach disk called for node %s", nodeName)
// Keeps concurrent attach operations to same host atomic // Keeps concurrent attach operations to same host atomic
attachdetachMutex.LockKey(hostName) attachdetachMutex.LockKey(string(nodeName))
defer attachdetachMutex.UnlockKey(hostName) defer attachdetachMutex.UnlockKey(string(nodeName))
// vsphereCloud.AttachDisk checks if disk is already attached to host and // vsphereCloud.AttachDisk checks if disk is already attached to host and
// succeeds in that case, so no need to do that separately. // succeeds in that case, so no need to do that separately.
_, diskUUID, err := attacher.vsphereVolumes.AttachDisk(volumeSource.VolumePath, hostName) _, diskUUID, err := attacher.vsphereVolumes.AttachDisk(volumeSource.VolumePath, nodeName)
if err != nil { if err != nil {
glog.Errorf("Error attaching volume %q: %+v", volumeSource.VolumePath, err) glog.Errorf("Error attaching volume %q: %+v", volumeSource.VolumePath, err)
return "", err return "", err
@ -190,27 +191,27 @@ func (plugin *vsphereVolumePlugin) NewDetacher() (volume.Detacher, error) {
}, nil }, nil
} }
// Detach the given device from the given host. // Detach the given device from the given node.
func (detacher *vsphereVMDKDetacher) Detach(deviceMountPath string, hostName string) error { func (detacher *vsphereVMDKDetacher) Detach(deviceMountPath string, nodeName types.NodeName) error {
volPath := getVolPathfromDeviceMountPath(deviceMountPath) volPath := getVolPathfromDeviceMountPath(deviceMountPath)
attached, err := detacher.vsphereVolumes.DiskIsAttached(volPath, hostName) attached, err := detacher.vsphereVolumes.DiskIsAttached(volPath, nodeName)
if err != nil { if err != nil {
// Log error and continue with detach // Log error and continue with detach
glog.Errorf( glog.Errorf(
"Error checking if volume (%q) is already attached to current node (%q). Will continue and try detach anyway. err=%v", "Error checking if volume (%q) is already attached to current node (%q). Will continue and try detach anyway. err=%v",
volPath, hostName, err) volPath, nodeName, err)
} }
if err == nil && !attached { if err == nil && !attached {
// Volume is already detached from node. // Volume is already detached from node.
glog.Infof("detach operation was successful. volume %q is already detached from node %q.", volPath, hostName) glog.Infof("detach operation was successful. volume %q is already detached from node %q.", volPath, nodeName)
return nil return nil
} }
attachdetachMutex.LockKey(hostName) attachdetachMutex.LockKey(string(nodeName))
defer attachdetachMutex.UnlockKey(hostName) defer attachdetachMutex.UnlockKey(string(nodeName))
if err := detacher.vsphereVolumes.DetachDisk(volPath, hostName); err != nil { if err := detacher.vsphereVolumes.DetachDisk(volPath, nodeName); err != nil {
glog.Errorf("Error detaching volume %q: %v", volPath, err) glog.Errorf("Error detaching volume %q: %v", volPath, err)
return err return err
} }

View File

@ -26,6 +26,7 @@ import (
volumetest "k8s.io/kubernetes/pkg/volume/testing" volumetest "k8s.io/kubernetes/pkg/volume/testing"
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/types"
) )
func TestGetDeviceName_Volume(t *testing.T) { func TestGetDeviceName_Volume(t *testing.T) {
@ -75,7 +76,7 @@ type testcase struct {
func TestAttachDetach(t *testing.T) { func TestAttachDetach(t *testing.T) {
uuid := "00000000000000" uuid := "00000000000000"
diskName := "[local] volumes/test" diskName := "[local] volumes/test"
hostName := "host" nodeName := types.NodeName("host")
spec := createVolSpec(diskName) spec := createVolSpec(diskName)
attachError := errors.New("Fake attach error") attachError := errors.New("Fake attach error")
detachError := errors.New("Fake detach error") detachError := errors.New("Fake detach error")
@ -84,10 +85,10 @@ func TestAttachDetach(t *testing.T) {
// Successful Attach call // Successful Attach call
{ {
name: "Attach_Positive", name: "Attach_Positive",
attach: attachCall{diskName, hostName, uuid, nil}, attach: attachCall{diskName, nodeName, uuid, nil},
test: func(testcase *testcase) (string, error) { test: func(testcase *testcase) (string, error) {
attacher := newAttacher(testcase) attacher := newAttacher(testcase)
return attacher.Attach(spec, hostName) return attacher.Attach(spec, nodeName)
}, },
expectedDevice: "/dev/disk/by-id/wwn-0x" + uuid, expectedDevice: "/dev/disk/by-id/wwn-0x" + uuid,
}, },
@ -95,10 +96,10 @@ func TestAttachDetach(t *testing.T) {
// Attach call fails // Attach call fails
{ {
name: "Attach_Negative", name: "Attach_Negative",
attach: attachCall{diskName, hostName, "", attachError}, attach: attachCall{diskName, nodeName, "", attachError},
test: func(testcase *testcase) (string, error) { test: func(testcase *testcase) (string, error) {
attacher := newAttacher(testcase) attacher := newAttacher(testcase)
return attacher.Attach(spec, hostName) return attacher.Attach(spec, nodeName)
}, },
expectedError: attachError, expectedError: attachError,
}, },
@ -106,43 +107,43 @@ func TestAttachDetach(t *testing.T) {
// Detach succeeds // Detach succeeds
{ {
name: "Detach_Positive", name: "Detach_Positive",
diskIsAttached: diskIsAttachedCall{diskName, hostName, true, nil}, diskIsAttached: diskIsAttachedCall{diskName, nodeName, true, nil},
detach: detachCall{diskName, hostName, nil}, detach: detachCall{diskName, nodeName, nil},
test: func(testcase *testcase) (string, error) { test: func(testcase *testcase) (string, error) {
detacher := newDetacher(testcase) detacher := newDetacher(testcase)
return "", detacher.Detach(diskName, hostName) return "", detacher.Detach(diskName, nodeName)
}, },
}, },
// Disk is already detached // Disk is already detached
{ {
name: "Detach_Positive_AlreadyDetached", name: "Detach_Positive_AlreadyDetached",
diskIsAttached: diskIsAttachedCall{diskName, hostName, false, nil}, diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, nil},
test: func(testcase *testcase) (string, error) { test: func(testcase *testcase) (string, error) {
detacher := newDetacher(testcase) detacher := newDetacher(testcase)
return "", detacher.Detach(diskName, hostName) return "", detacher.Detach(diskName, nodeName)
}, },
}, },
// Detach succeeds when DiskIsAttached fails // Detach succeeds when DiskIsAttached fails
{ {
name: "Detach_Positive_CheckFails", name: "Detach_Positive_CheckFails",
diskIsAttached: diskIsAttachedCall{diskName, hostName, false, diskCheckError}, diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, diskCheckError},
detach: detachCall{diskName, hostName, nil}, detach: detachCall{diskName, nodeName, nil},
test: func(testcase *testcase) (string, error) { test: func(testcase *testcase) (string, error) {
detacher := newDetacher(testcase) detacher := newDetacher(testcase)
return "", detacher.Detach(diskName, hostName) return "", detacher.Detach(diskName, nodeName)
}, },
}, },
// Detach fails // Detach fails
{ {
name: "Detach_Negative", name: "Detach_Negative",
diskIsAttached: diskIsAttachedCall{diskName, hostName, false, diskCheckError}, diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, diskCheckError},
detach: detachCall{diskName, hostName, detachError}, detach: detachCall{diskName, nodeName, detachError},
test: func(testcase *testcase) (string, error) { test: func(testcase *testcase) (string, error) {
detacher := newDetacher(testcase) detacher := newDetacher(testcase)
return "", detacher.Detach(diskName, hostName) return "", detacher.Detach(diskName, nodeName)
}, },
expectedError: detachError, expectedError: detachError,
}, },
@ -214,27 +215,28 @@ func createPVSpec(name string) *volume.Spec {
type attachCall struct { type attachCall struct {
diskName string diskName string
hostName string nodeName types.NodeName
retDeviceUUID string retDeviceUUID string
ret error ret error
} }
type detachCall struct { type detachCall struct {
diskName string diskName string
hostName string nodeName types.NodeName
ret error ret error
} }
type diskIsAttachedCall struct { type diskIsAttachedCall struct {
diskName, hostName string diskName string
isAttached bool nodeName types.NodeName
ret error isAttached bool
ret error
} }
func (testcase *testcase) AttachDisk(diskName string, hostName string) (string, string, error) { func (testcase *testcase) AttachDisk(diskName string, nodeName types.NodeName) (string, string, error) {
expected := &testcase.attach expected := &testcase.attach
if expected.diskName == "" && expected.hostName == "" { if expected.diskName == "" && expected.nodeName == "" {
// testcase.attach looks uninitialized, test did not expect to call // testcase.attach looks uninitialized, test did not expect to call
// AttachDisk // AttachDisk
testcase.t.Errorf("Unexpected AttachDisk call!") testcase.t.Errorf("Unexpected AttachDisk call!")
@ -246,20 +248,20 @@ func (testcase *testcase) AttachDisk(diskName string, hostName string) (string,
return "", "", errors.New("Unexpected AttachDisk call: wrong diskName") return "", "", errors.New("Unexpected AttachDisk call: wrong diskName")
} }
if expected.hostName != hostName { if expected.nodeName != nodeName {
testcase.t.Errorf("Unexpected AttachDisk call: expected hostName %s, got %s", expected.hostName, hostName) testcase.t.Errorf("Unexpected AttachDisk call: expected nodeName %s, got %s", expected.nodeName, nodeName)
return "", "", errors.New("Unexpected AttachDisk call: wrong hostName") return "", "", errors.New("Unexpected AttachDisk call: wrong nodeName")
} }
glog.V(4).Infof("AttachDisk call: %s, %s, returning %q, %v", diskName, hostName, expected.retDeviceUUID, expected.ret) glog.V(4).Infof("AttachDisk call: %s, %s, returning %q, %v", diskName, nodeName, expected.retDeviceUUID, expected.ret)
return "", expected.retDeviceUUID, expected.ret return "", expected.retDeviceUUID, expected.ret
} }
func (testcase *testcase) DetachDisk(diskName string, hostName string) error { func (testcase *testcase) DetachDisk(diskName string, nodeName types.NodeName) error {
expected := &testcase.detach expected := &testcase.detach
if expected.diskName == "" && expected.hostName == "" { if expected.diskName == "" && expected.nodeName == "" {
// testcase.detach looks uninitialized, test did not expect to call // testcase.detach looks uninitialized, test did not expect to call
// DetachDisk // DetachDisk
testcase.t.Errorf("Unexpected DetachDisk call!") testcase.t.Errorf("Unexpected DetachDisk call!")
@ -271,20 +273,20 @@ func (testcase *testcase) DetachDisk(diskName string, hostName string) error {
return errors.New("Unexpected DetachDisk call: wrong diskName") return errors.New("Unexpected DetachDisk call: wrong diskName")
} }
if expected.hostName != hostName { if expected.nodeName != nodeName {
testcase.t.Errorf("Unexpected DetachDisk call: expected hostname %s, got %s", expected.hostName, hostName) testcase.t.Errorf("Unexpected DetachDisk call: expected nodeName %s, got %s", expected.nodeName, nodeName)
return errors.New("Unexpected DetachDisk call: wrong hostname") return errors.New("Unexpected DetachDisk call: wrong nodeName")
} }
glog.V(4).Infof("DetachDisk call: %s, %s, returning %v", diskName, hostName, expected.ret) glog.V(4).Infof("DetachDisk call: %s, %s, returning %v", diskName, nodeName, expected.ret)
return expected.ret return expected.ret
} }
func (testcase *testcase) DiskIsAttached(diskName, hostName string) (bool, error) { func (testcase *testcase) DiskIsAttached(diskName string, nodeName types.NodeName) (bool, error) {
expected := &testcase.diskIsAttached expected := &testcase.diskIsAttached
if expected.diskName == "" && expected.hostName == "" { if expected.diskName == "" && expected.nodeName == "" {
// testcase.diskIsAttached looks uninitialized, test did not expect to // testcase.diskIsAttached looks uninitialized, test did not expect to
// call DiskIsAttached // call DiskIsAttached
testcase.t.Errorf("Unexpected DiskIsAttached call!") testcase.t.Errorf("Unexpected DiskIsAttached call!")
@ -296,12 +298,12 @@ func (testcase *testcase) DiskIsAttached(diskName, hostName string) (bool, error
return false, errors.New("Unexpected DiskIsAttached call: wrong diskName") return false, errors.New("Unexpected DiskIsAttached call: wrong diskName")
} }
if expected.hostName != hostName { if expected.nodeName != nodeName {
testcase.t.Errorf("Unexpected DiskIsAttached call: expected hostName %s, got %s", expected.hostName, hostName) testcase.t.Errorf("Unexpected DiskIsAttached call: expected nodeName %s, got %s", expected.nodeName, nodeName)
return false, errors.New("Unexpected DiskIsAttached call: wrong hostName") return false, errors.New("Unexpected DiskIsAttached call: wrong nodeName")
} }
glog.V(4).Infof("DiskIsAttached call: %s, %s, returning %v, %v", diskName, hostName, expected.isAttached, expected.ret) glog.V(4).Infof("DiskIsAttached call: %s, %s, returning %v, %v", diskName, nodeName, expected.isAttached, expected.ret)
return expected.isAttached, expected.ret return expected.isAttached, expected.ret
} }

View File

@ -24,6 +24,7 @@ import (
"k8s.io/kubernetes/pkg/admission" "k8s.io/kubernetes/pkg/admission"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws" "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
"k8s.io/kubernetes/pkg/types"
) )
type mockVolumes struct { type mockVolumes struct {
@ -33,11 +34,11 @@ type mockVolumes struct {
var _ aws.Volumes = &mockVolumes{} var _ aws.Volumes = &mockVolumes{}
func (v *mockVolumes) AttachDisk(diskName string, instanceName string, readOnly bool) (string, error) { func (v *mockVolumes) AttachDisk(diskName string, nodeName types.NodeName, readOnly bool) (string, error) {
return "", fmt.Errorf("not implemented") return "", fmt.Errorf("not implemented")
} }
func (v *mockVolumes) DetachDisk(diskName string, instanceName string) (string, error) { func (v *mockVolumes) DetachDisk(diskName string, nodeName types.NodeName) (string, error) {
return "", fmt.Errorf("not implemented") return "", fmt.Errorf("not implemented")
} }
@ -57,7 +58,7 @@ func (c *mockVolumes) GetDiskPath(volumeName string) (string, error) {
return "", fmt.Errorf("not implemented") return "", fmt.Errorf("not implemented")
} }
func (c *mockVolumes) DiskIsAttached(volumeName, instanceID string) (bool, error) { func (c *mockVolumes) DiskIsAttached(volumeName string, nodeName types.NodeName) (bool, error) {
return false, fmt.Errorf("not implemented") return false, fmt.Errorf("not implemented")
} }

View File

@ -37,6 +37,7 @@ import (
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws" awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
) )
@ -54,8 +55,8 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
var ( var (
podClient client.PodInterface podClient client.PodInterface
nodeClient client.NodeInterface nodeClient client.NodeInterface
host0Name string host0Name types.NodeName
host1Name string host1Name types.NodeName
) )
f := framework.NewDefaultFramework("pod-disks") f := framework.NewDefaultFramework("pod-disks")
@ -68,8 +69,8 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
Expect(len(nodes.Items)).To(BeNumerically(">=", 2), "Requires at least 2 nodes") Expect(len(nodes.Items)).To(BeNumerically(">=", 2), "Requires at least 2 nodes")
host0Name = nodes.Items[0].ObjectMeta.Name host0Name = types.NodeName(nodes.Items[0].ObjectMeta.Name)
host1Name = nodes.Items[1].ObjectMeta.Name host1Name = types.NodeName(nodes.Items[1].ObjectMeta.Name)
mathrand.Seed(time.Now().UTC().UnixNano()) mathrand.Seed(time.Now().UTC().UnixNano())
}) })
@ -91,7 +92,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
By("cleaning up PD-RW test environment") By("cleaning up PD-RW test environment")
podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0)) podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0))
podClient.Delete(host1Pod.Name, api.NewDeleteOptions(0)) podClient.Delete(host1Pod.Name, api.NewDeleteOptions(0))
detachAndDeletePDs(diskName, []string{host0Name, host1Name}) detachAndDeletePDs(diskName, []types.NodeName{host0Name, host1Name})
}() }()
By("submitting host0Pod to kubernetes") By("submitting host0Pod to kubernetes")
@ -155,7 +156,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
By("cleaning up PD-RW test environment") By("cleaning up PD-RW test environment")
podClient.Delete(host0Pod.Name, &api.DeleteOptions{}) podClient.Delete(host0Pod.Name, &api.DeleteOptions{})
podClient.Delete(host1Pod.Name, &api.DeleteOptions{}) podClient.Delete(host1Pod.Name, &api.DeleteOptions{})
detachAndDeletePDs(diskName, []string{host0Name, host1Name}) detachAndDeletePDs(diskName, []types.NodeName{host0Name, host1Name})
}() }()
By("submitting host0Pod to kubernetes") By("submitting host0Pod to kubernetes")
@ -220,7 +221,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
podClient.Delete(rwPod.Name, api.NewDeleteOptions(0)) podClient.Delete(rwPod.Name, api.NewDeleteOptions(0))
podClient.Delete(host0ROPod.Name, api.NewDeleteOptions(0)) podClient.Delete(host0ROPod.Name, api.NewDeleteOptions(0))
podClient.Delete(host1ROPod.Name, api.NewDeleteOptions(0)) podClient.Delete(host1ROPod.Name, api.NewDeleteOptions(0))
detachAndDeletePDs(diskName, []string{host0Name, host1Name}) detachAndDeletePDs(diskName, []types.NodeName{host0Name, host1Name})
}() }()
By("submitting rwPod to ensure PD is formatted") By("submitting rwPod to ensure PD is formatted")
@ -272,7 +273,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
podClient.Delete(rwPod.Name, &api.DeleteOptions{}) podClient.Delete(rwPod.Name, &api.DeleteOptions{})
podClient.Delete(host0ROPod.Name, &api.DeleteOptions{}) podClient.Delete(host0ROPod.Name, &api.DeleteOptions{})
podClient.Delete(host1ROPod.Name, &api.DeleteOptions{}) podClient.Delete(host1ROPod.Name, &api.DeleteOptions{})
detachAndDeletePDs(diskName, []string{host0Name, host1Name}) detachAndDeletePDs(diskName, []types.NodeName{host0Name, host1Name})
}() }()
By("submitting rwPod to ensure PD is formatted") By("submitting rwPod to ensure PD is formatted")
@ -322,7 +323,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
if host0Pod != nil { if host0Pod != nil {
podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0)) podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0))
} }
detachAndDeletePDs(diskName, []string{host0Name}) detachAndDeletePDs(diskName, []types.NodeName{host0Name})
}() }()
fileAndContentToVerify := make(map[string]string) fileAndContentToVerify := make(map[string]string)
@ -377,8 +378,8 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
if host0Pod != nil { if host0Pod != nil {
podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0)) podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0))
} }
detachAndDeletePDs(disk1Name, []string{host0Name}) detachAndDeletePDs(disk1Name, []types.NodeName{host0Name})
detachAndDeletePDs(disk2Name, []string{host0Name}) detachAndDeletePDs(disk2Name, []types.NodeName{host0Name})
}() }()
containerName := "mycontainer" containerName := "mycontainer"
@ -535,16 +536,14 @@ func deletePD(pdName string) error {
} }
} }
func detachPD(hostName, pdName string) error { func detachPD(nodeName types.NodeName, pdName string) error {
if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" { if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
instanceName := strings.Split(hostName, ".")[0]
gceCloud, err := getGCECloud() gceCloud, err := getGCECloud()
if err != nil { if err != nil {
return err return err
} }
err = gceCloud.DetachDisk(pdName, instanceName) err = gceCloud.DetachDisk(pdName, nodeName)
if err != nil { if err != nil {
if gerr, ok := err.(*googleapi.Error); ok && strings.Contains(gerr.Message, "Invalid value for field 'disk'") { if gerr, ok := err.(*googleapi.Error); ok && strings.Contains(gerr.Message, "Invalid value for field 'disk'") {
// PD already detached, ignore error. // PD already detached, ignore error.
@ -575,7 +574,7 @@ func detachPD(hostName, pdName string) error {
} }
} }
func testPDPod(diskNames []string, targetHost string, readOnly bool, numContainers int) *api.Pod { func testPDPod(diskNames []string, targetNode types.NodeName, readOnly bool, numContainers int) *api.Pod {
containers := make([]api.Container, numContainers) containers := make([]api.Container, numContainers)
for i := range containers { for i := range containers {
containers[i].Name = "mycontainer" containers[i].Name = "mycontainer"
@ -608,7 +607,7 @@ func testPDPod(diskNames []string, targetHost string, readOnly bool, numContaine
}, },
Spec: api.PodSpec{ Spec: api.PodSpec{
Containers: containers, Containers: containers,
NodeName: targetHost, NodeName: string(targetNode),
}, },
} }
@ -644,31 +643,31 @@ func testPDPod(diskNames []string, targetHost string, readOnly bool, numContaine
} }
// Waits for specified PD to to detach from specified hostName // Waits for specified PD to to detach from specified hostName
func waitForPDDetach(diskName, hostName string) error { func waitForPDDetach(diskName string, nodeName types.NodeName) error {
if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" { if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
framework.Logf("Waiting for GCE PD %q to detach from node %q.", diskName, hostName) framework.Logf("Waiting for GCE PD %q to detach from node %q.", diskName, nodeName)
gceCloud, err := getGCECloud() gceCloud, err := getGCECloud()
if err != nil { if err != nil {
return err return err
} }
for start := time.Now(); time.Since(start) < gcePDDetachTimeout; time.Sleep(gcePDDetachPollTime) { for start := time.Now(); time.Since(start) < gcePDDetachTimeout; time.Sleep(gcePDDetachPollTime) {
diskAttached, err := gceCloud.DiskIsAttached(diskName, hostName) diskAttached, err := gceCloud.DiskIsAttached(diskName, nodeName)
if err != nil { if err != nil {
framework.Logf("Error waiting for PD %q to detach from node %q. 'DiskIsAttached(...)' failed with %v", diskName, hostName, err) framework.Logf("Error waiting for PD %q to detach from node %q. 'DiskIsAttached(...)' failed with %v", diskName, nodeName, err)
return err return err
} }
if !diskAttached { if !diskAttached {
// Specified disk does not appear to be attached to specified node // Specified disk does not appear to be attached to specified node
framework.Logf("GCE PD %q appears to have successfully detached from %q.", diskName, hostName) framework.Logf("GCE PD %q appears to have successfully detached from %q.", diskName, nodeName)
return nil return nil
} }
framework.Logf("Waiting for GCE PD %q to detach from %q.", diskName, hostName) framework.Logf("Waiting for GCE PD %q to detach from %q.", diskName, nodeName)
} }
return fmt.Errorf("Gave up waiting for GCE PD %q to detach from %q after %v", diskName, hostName, gcePDDetachTimeout) return fmt.Errorf("Gave up waiting for GCE PD %q to detach from %q after %v", diskName, nodeName, gcePDDetachTimeout)
} }
return nil return nil
@ -684,7 +683,7 @@ func getGCECloud() (*gcecloud.GCECloud, error) {
return gceCloud, nil return gceCloud, nil
} }
func detachAndDeletePDs(diskName string, hosts []string) { func detachAndDeletePDs(diskName string, hosts []types.NodeName) {
for _, host := range hosts { for _, host := range hosts {
framework.Logf("Detaching GCE PD %q from node %q.", diskName, host) framework.Logf("Detaching GCE PD %q from node %q.", diskName, host)
detachPD(host, diskName) detachPD(host, diskName)
@ -697,7 +696,8 @@ func detachAndDeletePDs(diskName string, hosts []string) {
func waitForPDInVolumesInUse( func waitForPDInVolumesInUse(
nodeClient client.NodeInterface, nodeClient client.NodeInterface,
diskName, nodeName string, diskName string,
nodeName types.NodeName,
timeout time.Duration, timeout time.Duration,
shouldExist bool) error { shouldExist bool) error {
logStr := "to contain" logStr := "to contain"
@ -708,7 +708,7 @@ func waitForPDInVolumesInUse(
"Waiting for node %s's VolumesInUse Status %s PD %q", "Waiting for node %s's VolumesInUse Status %s PD %q",
nodeName, logStr, diskName) nodeName, logStr, diskName)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(nodeStatusPollTime) { for start := time.Now(); time.Since(start) < timeout; time.Sleep(nodeStatusPollTime) {
nodeObj, err := nodeClient.Get(nodeName) nodeObj, err := nodeClient.Get(string(nodeName))
if err != nil || nodeObj == nil { if err != nil || nodeObj == nil {
framework.Logf( framework.Logf(
"Failed to fetch node object %q from API server. err=%v", "Failed to fetch node object %q from API server. err=%v",