Merge pull request #29048 from justinsb/volumes_nodename_not_hostname
Automatic merge from submit-queue Use strongly-typed types.NodeName for a node name We had another bug where we confused the hostname with the NodeName. Also, if we want to use different values for the Node.Name (which is an important step for making installation easier), we need to keep better control over this. A tedious but mechanical commit therefore, to change all uses of the node name to use types.NodeName
This commit is contained in:
commit
1854bdcb0c
@ -16549,7 +16549,7 @@
|
||||
},
|
||||
"host": {
|
||||
"type": "string",
|
||||
"description": "Host name on which the event is generated."
|
||||
"description": "Node name on which the event is generated."
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/golang/glog"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/api"
|
||||
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
|
||||
"k8s.io/kubernetes/pkg/apis/certificates"
|
||||
@ -29,6 +30,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
|
||||
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/csr"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
certutil "k8s.io/kubernetes/pkg/util/cert"
|
||||
)
|
||||
|
||||
@ -37,11 +39,16 @@ func PerformTLSBootstrap(s *kubeadmapi.KubeadmConfig, apiEndpoint string, caCert
|
||||
// TODO(phase1+) try all the api servers until we find one that works
|
||||
bareClientConfig := kubeadmutil.CreateBasicClientConfig("kubernetes", apiEndpoint, caCert)
|
||||
|
||||
nodeName, err := os.Hostname()
|
||||
hostName, err := os.Hostname()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("<node/csr> failed to get node hostname [%v]", err)
|
||||
}
|
||||
|
||||
// TODO: hostname == nodename doesn't hold on all clouds (AWS).
|
||||
// But we don't have a cloudprovider, so we're stuck.
|
||||
glog.Errorf("assuming that hostname is the same as NodeName")
|
||||
nodeName := types.NodeName(hostName)
|
||||
|
||||
bootstrapClientConfig, err := clientcmd.NewDefaultClientConfig(
|
||||
*kubeadmutil.MakeClientConfigWithToken(
|
||||
bareClientConfig, "kubernetes", fmt.Sprintf("kubelet-%s", nodeName), s.Secrets.BearerToken,
|
||||
|
@ -30,6 +30,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
|
||||
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/csr"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
certutil "k8s.io/kubernetes/pkg/util/cert"
|
||||
)
|
||||
|
||||
@ -42,7 +43,7 @@ const (
|
||||
// The kubeconfig at bootstrapPath is used to request a client certificate from the API server.
|
||||
// On success, a kubeconfig file referencing the generated key and obtained certificate is written to kubeconfigPath.
|
||||
// The certificate and key file are stored in certDir.
|
||||
func bootstrapClientCert(kubeconfigPath string, bootstrapPath string, certDir string, nodeName string) error {
|
||||
func bootstrapClientCert(kubeconfigPath string, bootstrapPath string, certDir string, nodeName types.NodeName) error {
|
||||
// Short-circuit if the kubeconfig file already exists.
|
||||
// TODO: inspect the kubeconfig, ensure a rest client can be built from it, verify client cert expiration, etc.
|
||||
_, err := os.Stat(kubeconfigPath)
|
||||
|
@ -61,6 +61,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/server"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
certutil "k8s.io/kubernetes/pkg/util/cert"
|
||||
utilconfig "k8s.io/kubernetes/pkg/util/config"
|
||||
"k8s.io/kubernetes/pkg/util/configz"
|
||||
@ -169,7 +170,7 @@ func getRemoteKubeletConfig(s *options.KubeletServer, kubeDeps *kubelet.KubeletD
|
||||
}
|
||||
|
||||
configmap, err := func() (*api.ConfigMap, error) {
|
||||
var nodename string
|
||||
var nodename types.NodeName
|
||||
hostname := nodeutil.GetHostname(s.HostnameOverride)
|
||||
|
||||
if kubeDeps != nil && kubeDeps.Cloud != nil {
|
||||
@ -460,9 +461,9 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.KubeletDeps) (err error) {
|
||||
|
||||
// getNodeName returns the node name according to the cloud provider
|
||||
// if cloud provider is specified. Otherwise, returns the hostname of the node.
|
||||
func getNodeName(cloud cloudprovider.Interface, hostname string) (string, error) {
|
||||
func getNodeName(cloud cloudprovider.Interface, hostname string) (types.NodeName, error) {
|
||||
if cloud == nil {
|
||||
return hostname, nil
|
||||
return types.NodeName(hostname), nil
|
||||
}
|
||||
|
||||
instances, ok := cloud.Instances()
|
||||
@ -607,7 +608,7 @@ func RunKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *kubelet
|
||||
}
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
kubeDeps.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "kubelet", Host: nodeName})
|
||||
kubeDeps.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "kubelet", Host: string(nodeName)})
|
||||
eventBroadcaster.StartLogging(glog.V(3).Infof)
|
||||
if kubeDeps.EventClient != nil {
|
||||
glog.V(4).Infof("Sending events to api server.")
|
||||
|
@ -44,6 +44,7 @@ import (
|
||||
kconfig "k8s.io/kubernetes/pkg/kubelet/config"
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockertools"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
)
|
||||
|
||||
// TODO(jdef): passing the value of envContainerID to all docker containers instantiated
|
||||
|
@ -5008,7 +5008,7 @@ The resulting set of endpoints can be viewed as:<br>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">host</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">Host name on which the event is generated.</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">Node name on which the event is generated.</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">string</p></td>
|
||||
<td class="tableblock halign-left valign-top"></td>
|
||||
|
@ -2568,7 +2568,7 @@ type SerializedReference struct {
|
||||
type EventSource struct {
|
||||
// Component from which the event is generated.
|
||||
Component string `json:"component,omitempty"`
|
||||
// Host name on which the event is generated.
|
||||
// Node name on which the event is generated.
|
||||
Host string `json:"host,omitempty"`
|
||||
}
|
||||
|
||||
|
@ -769,7 +769,7 @@ message EventSource {
|
||||
// Component from which the event is generated.
|
||||
optional string component = 1;
|
||||
|
||||
// Host name on which the event is generated.
|
||||
// Node name on which the event is generated.
|
||||
optional string host = 2;
|
||||
}
|
||||
|
||||
|
@ -3017,7 +3017,7 @@ type SerializedReference struct {
|
||||
type EventSource struct {
|
||||
// Component from which the event is generated.
|
||||
Component string `json:"component,omitempty" protobuf:"bytes,1,opt,name=component"`
|
||||
// Host name on which the event is generated.
|
||||
// Node name on which the event is generated.
|
||||
Host string `json:"host,omitempty" protobuf:"bytes,2,opt,name=host"`
|
||||
}
|
||||
|
||||
|
@ -477,7 +477,7 @@ func (EventList) SwaggerDoc() map[string]string {
|
||||
var map_EventSource = map[string]string{
|
||||
"": "EventSource contains information for an event.",
|
||||
"component": "Component from which the event is generated.",
|
||||
"host": "Host name on which the event is generated.",
|
||||
"host": "Node name on which the event is generated.",
|
||||
}
|
||||
|
||||
func (EventSource) SwaggerDoc() map[string]string {
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
)
|
||||
|
||||
// Interface is an abstract, pluggable interface for cloud providers.
|
||||
@ -63,7 +64,7 @@ func GetLoadBalancerName(service *api.Service) string {
|
||||
return ret
|
||||
}
|
||||
|
||||
func GetInstanceProviderID(cloud Interface, nodeName string) (string, error) {
|
||||
func GetInstanceProviderID(cloud Interface, nodeName types.NodeName) (string, error) {
|
||||
instances, ok := cloud.Instances()
|
||||
if !ok {
|
||||
return "", fmt.Errorf("failed to get instances from cloud provider")
|
||||
@ -86,11 +87,11 @@ type LoadBalancer interface {
|
||||
// EnsureLoadBalancer creates a new load balancer 'name', or updates the existing one. Returns the status of the balancer
|
||||
// Implementations must treat the *api.Service parameter as read-only and not modify it.
|
||||
// Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager
|
||||
EnsureLoadBalancer(clusterName string, service *api.Service, hosts []string) (*api.LoadBalancerStatus, error)
|
||||
EnsureLoadBalancer(clusterName string, service *api.Service, nodeNames []string) (*api.LoadBalancerStatus, error)
|
||||
// UpdateLoadBalancer updates hosts under the specified load balancer.
|
||||
// Implementations must treat the *api.Service parameter as read-only and not modify it.
|
||||
// Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager
|
||||
UpdateLoadBalancer(clusterName string, service *api.Service, hosts []string) error
|
||||
UpdateLoadBalancer(clusterName string, service *api.Service, nodeNames []string) error
|
||||
// EnsureLoadBalancerDeleted deletes the specified load balancer if it
|
||||
// exists, returning nil if the load balancer specified either didn't exist or
|
||||
// was successfully deleted.
|
||||
@ -108,22 +109,22 @@ type Instances interface {
|
||||
// TODO(roberthbailey): This currently is only used in such a way that it
|
||||
// returns the address of the calling instance. We should do a rename to
|
||||
// make this clearer.
|
||||
NodeAddresses(name string) ([]api.NodeAddress, error)
|
||||
// ExternalID returns the cloud provider ID of the specified instance (deprecated).
|
||||
NodeAddresses(name types.NodeName) ([]api.NodeAddress, error)
|
||||
// ExternalID returns the cloud provider ID of the node with the specified NodeName.
|
||||
// Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound)
|
||||
ExternalID(name string) (string, error)
|
||||
// InstanceID returns the cloud provider ID of the specified instance.
|
||||
InstanceID(name string) (string, error)
|
||||
ExternalID(nodeName types.NodeName) (string, error)
|
||||
// InstanceID returns the cloud provider ID of the node with the specified NodeName.
|
||||
InstanceID(nodeName types.NodeName) (string, error)
|
||||
// InstanceType returns the type of the specified instance.
|
||||
InstanceType(name string) (string, error)
|
||||
InstanceType(name types.NodeName) (string, error)
|
||||
// List lists instances that match 'filter' which is a regular expression which must match the entire instance name (fqdn)
|
||||
List(filter string) ([]string, error)
|
||||
List(filter string) ([]types.NodeName, error)
|
||||
// AddSSHKeyToAllInstances adds an SSH public key as a legal identity for all instances
|
||||
// expected format for the key is standard ssh-keygen format: <protocol> <blob>
|
||||
AddSSHKeyToAllInstances(user string, keyData []byte) error
|
||||
// CurrentNodeName returns the name of the node we are currently running on
|
||||
// On most clouds (e.g. GCE) this is the hostname, so we provide the hostname
|
||||
CurrentNodeName(hostname string) (string, error)
|
||||
CurrentNodeName(hostname string) (types.NodeName, error)
|
||||
}
|
||||
|
||||
// Route is a representation of an advanced routing rule.
|
||||
@ -131,9 +132,8 @@ type Route struct {
|
||||
// Name is the name of the routing rule in the cloud-provider.
|
||||
// It will be ignored in a Create (although nameHint may influence it)
|
||||
Name string
|
||||
// TargetInstance is the name of the instance as specified in routing rules
|
||||
// for the cloud-provider (in gce: the Instance Name).
|
||||
TargetInstance string
|
||||
// TargetNode is the NodeName of the target instance.
|
||||
TargetNode types.NodeName
|
||||
// DestinationCIDR is the CIDR format IP range that this routing rule
|
||||
// applies to.
|
||||
DestinationCIDR string
|
||||
|
@ -296,14 +296,14 @@ type VolumeOptions struct {
|
||||
// Volumes is an interface for managing cloud-provisioned volumes
|
||||
// TODO: Allow other clouds to implement this
|
||||
type Volumes interface {
|
||||
// Attach the disk to the specified instance
|
||||
// instanceName can be empty to mean "the instance on which we are running"
|
||||
// Attach the disk to the node with the specified NodeName
|
||||
// nodeName can be empty to mean "the instance on which we are running"
|
||||
// Returns the device (e.g. /dev/xvdf) where we attached the volume
|
||||
AttachDisk(diskName string, instanceName string, readOnly bool) (string, error)
|
||||
// Detach the disk from the specified instance
|
||||
// instanceName can be empty to mean "the instance on which we are running"
|
||||
AttachDisk(diskName string, nodeName types.NodeName, readOnly bool) (string, error)
|
||||
// Detach the disk from the node with the specified NodeName
|
||||
// nodeName can be empty to mean "the instance on which we are running"
|
||||
// Returns the device where the volume was attached
|
||||
DetachDisk(diskName string, instanceName string) (string, error)
|
||||
DetachDisk(diskName string, nodeName types.NodeName) (string, error)
|
||||
|
||||
// Create a volume with the specified options
|
||||
CreateDisk(volumeOptions *VolumeOptions) (volumeName string, err error)
|
||||
@ -319,8 +319,8 @@ type Volumes interface {
|
||||
// return the device path where the volume is attached
|
||||
GetDiskPath(volumeName string) (string, error)
|
||||
|
||||
// Check if the volume is already attached to the instance
|
||||
DiskIsAttached(diskName, instanceID string) (bool, error)
|
||||
// Check if the volume is already attached to the node with the specified NodeName
|
||||
DiskIsAttached(diskName string, nodeName types.NodeName) (bool, error)
|
||||
}
|
||||
|
||||
// InstanceGroups is an interface for managing cloud-managed instance groups / autoscaling instance groups
|
||||
@ -362,7 +362,7 @@ type Cloud struct {
|
||||
// attached, to avoid a race condition where we assign a device mapping
|
||||
// and then get a second request before we attach the volume
|
||||
attachingMutex sync.Mutex
|
||||
attaching map[ /*nodeName*/ string]map[mountDevice]string
|
||||
attaching map[types.NodeName]map[mountDevice]string
|
||||
}
|
||||
|
||||
var _ Volumes = &Cloud{}
|
||||
@ -537,7 +537,7 @@ func (c *Cloud) AddSSHKeyToAllInstances(user string, keyData []byte) error {
|
||||
}
|
||||
|
||||
// CurrentNodeName returns the name of the current node
|
||||
func (c *Cloud) CurrentNodeName(hostname string) (string, error) {
|
||||
func (c *Cloud) CurrentNodeName(hostname string) (types.NodeName, error) {
|
||||
return c.selfAWSInstance.nodeName, nil
|
||||
}
|
||||
|
||||
@ -795,7 +795,7 @@ func newAWSCloud(config io.Reader, awsServices Services) (*Cloud, error) {
|
||||
cfg: cfg,
|
||||
region: regionName,
|
||||
|
||||
attaching: make(map[string]map[mountDevice]string),
|
||||
attaching: make(map[types.NodeName]map[mountDevice]string),
|
||||
}
|
||||
|
||||
selfAWSInstance, err := awsCloud.buildSelfAWSInstance()
|
||||
@ -877,7 +877,7 @@ func (c *Cloud) Routes() (cloudprovider.Routes, bool) {
|
||||
}
|
||||
|
||||
// NodeAddresses is an implementation of Instances.NodeAddresses.
|
||||
func (c *Cloud) NodeAddresses(name string) ([]api.NodeAddress, error) {
|
||||
func (c *Cloud) NodeAddresses(name types.NodeName) ([]api.NodeAddress, error) {
|
||||
if c.selfAWSInstance.nodeName == name || len(name) == 0 {
|
||||
addresses := []api.NodeAddress{}
|
||||
|
||||
@ -932,15 +932,15 @@ func (c *Cloud) NodeAddresses(name string) ([]api.NodeAddress, error) {
|
||||
return addresses, nil
|
||||
}
|
||||
|
||||
// ExternalID returns the cloud provider ID of the specified instance (deprecated).
|
||||
func (c *Cloud) ExternalID(name string) (string, error) {
|
||||
if c.selfAWSInstance.nodeName == name {
|
||||
// ExternalID returns the cloud provider ID of the node with the specified nodeName (deprecated).
|
||||
func (c *Cloud) ExternalID(nodeName types.NodeName) (string, error) {
|
||||
if c.selfAWSInstance.nodeName == nodeName {
|
||||
// We assume that if this is run on the instance itself, the instance exists and is alive
|
||||
return c.selfAWSInstance.awsID, nil
|
||||
}
|
||||
// We must verify that the instance still exists
|
||||
// Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound)
|
||||
instance, err := c.findInstanceByNodeName(name)
|
||||
instance, err := c.findInstanceByNodeName(nodeName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -950,34 +950,34 @@ func (c *Cloud) ExternalID(name string) (string, error) {
|
||||
return orEmpty(instance.InstanceId), nil
|
||||
}
|
||||
|
||||
// InstanceID returns the cloud provider ID of the specified instance.
|
||||
func (c *Cloud) InstanceID(name string) (string, error) {
|
||||
// InstanceID returns the cloud provider ID of the node with the specified nodeName.
|
||||
func (c *Cloud) InstanceID(nodeName types.NodeName) (string, error) {
|
||||
// In the future it is possible to also return an endpoint as:
|
||||
// <endpoint>/<zone>/<instanceid>
|
||||
if c.selfAWSInstance.nodeName == name {
|
||||
if c.selfAWSInstance.nodeName == nodeName {
|
||||
return "/" + c.selfAWSInstance.availabilityZone + "/" + c.selfAWSInstance.awsID, nil
|
||||
}
|
||||
inst, err := c.getInstanceByNodeName(name)
|
||||
inst, err := c.getInstanceByNodeName(nodeName)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("getInstanceByNodeName failed for %q with %v", name, err)
|
||||
return "", fmt.Errorf("getInstanceByNodeName failed for %q with %v", nodeName, err)
|
||||
}
|
||||
return "/" + orEmpty(inst.Placement.AvailabilityZone) + "/" + orEmpty(inst.InstanceId), nil
|
||||
}
|
||||
|
||||
// InstanceType returns the type of the specified instance.
|
||||
func (c *Cloud) InstanceType(name string) (string, error) {
|
||||
if c.selfAWSInstance.nodeName == name {
|
||||
// InstanceType returns the type of the node with the specified nodeName.
|
||||
func (c *Cloud) InstanceType(nodeName types.NodeName) (string, error) {
|
||||
if c.selfAWSInstance.nodeName == nodeName {
|
||||
return c.selfAWSInstance.instanceType, nil
|
||||
}
|
||||
inst, err := c.getInstanceByNodeName(name)
|
||||
inst, err := c.getInstanceByNodeName(nodeName)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("getInstanceByNodeName failed for %q with %v", name, err)
|
||||
return "", fmt.Errorf("getInstanceByNodeName failed for %q with %v", nodeName, err)
|
||||
}
|
||||
return orEmpty(inst.InstanceType), nil
|
||||
}
|
||||
|
||||
// Return a list of instances matching regex string.
|
||||
func (c *Cloud) getInstancesByRegex(regex string) ([]string, error) {
|
||||
func (c *Cloud) getInstancesByRegex(regex string) ([]types.NodeName, error) {
|
||||
filters := []*ec2.Filter{newEc2Filter("instance-state-name", "running")}
|
||||
filters = c.addFilters(filters)
|
||||
request := &ec2.DescribeInstancesInput{
|
||||
@ -986,10 +986,10 @@ func (c *Cloud) getInstancesByRegex(regex string) ([]string, error) {
|
||||
|
||||
instances, err := c.ec2.DescribeInstances(request)
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
return []types.NodeName{}, err
|
||||
}
|
||||
if len(instances) == 0 {
|
||||
return []string{}, fmt.Errorf("no instances returned")
|
||||
return []types.NodeName{}, fmt.Errorf("no instances returned")
|
||||
}
|
||||
|
||||
if strings.HasPrefix(regex, "'") && strings.HasSuffix(regex, "'") {
|
||||
@ -999,10 +999,10 @@ func (c *Cloud) getInstancesByRegex(regex string) ([]string, error) {
|
||||
|
||||
re, err := regexp.Compile(regex)
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
return []types.NodeName{}, err
|
||||
}
|
||||
|
||||
matchingInstances := []string{}
|
||||
matchingInstances := []types.NodeName{}
|
||||
for _, instance := range instances {
|
||||
// Only return fully-ready instances when listing instances
|
||||
// (vs a query by name, where we will return it if we find it)
|
||||
@ -1011,16 +1011,16 @@ func (c *Cloud) getInstancesByRegex(regex string) ([]string, error) {
|
||||
continue
|
||||
}
|
||||
|
||||
privateDNSName := orEmpty(instance.PrivateDnsName)
|
||||
if privateDNSName == "" {
|
||||
nodeName := mapInstanceToNodeName(instance)
|
||||
if nodeName == "" {
|
||||
glog.V(2).Infof("Skipping EC2 instance (no PrivateDNSName): %s",
|
||||
orEmpty(instance.InstanceId))
|
||||
aws.StringValue(instance.InstanceId))
|
||||
continue
|
||||
}
|
||||
|
||||
for _, tag := range instance.Tags {
|
||||
if orEmpty(tag.Key) == "Name" && re.MatchString(orEmpty(tag.Value)) {
|
||||
matchingInstances = append(matchingInstances, privateDNSName)
|
||||
matchingInstances = append(matchingInstances, nodeName)
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -1030,7 +1030,7 @@ func (c *Cloud) getInstancesByRegex(regex string) ([]string, error) {
|
||||
}
|
||||
|
||||
// List is an implementation of Instances.List.
|
||||
func (c *Cloud) List(filter string) ([]string, error) {
|
||||
func (c *Cloud) List(filter string) ([]types.NodeName, error) {
|
||||
// TODO: Should really use tag query. No need to go regexp.
|
||||
return c.getInstancesByRegex(filter)
|
||||
}
|
||||
@ -1102,7 +1102,7 @@ type awsInstance struct {
|
||||
awsID string
|
||||
|
||||
// node name in k8s
|
||||
nodeName string
|
||||
nodeName types.NodeName
|
||||
|
||||
// availability zone the instance resides in
|
||||
availabilityZone string
|
||||
@ -1126,7 +1126,7 @@ func newAWSInstance(ec2Service EC2, instance *ec2.Instance) *awsInstance {
|
||||
self := &awsInstance{
|
||||
ec2: ec2Service,
|
||||
awsID: aws.StringValue(instance.InstanceId),
|
||||
nodeName: aws.StringValue(instance.PrivateDnsName),
|
||||
nodeName: mapInstanceToNodeName(instance),
|
||||
availabilityZone: az,
|
||||
instanceType: aws.StringValue(instance.InstanceType),
|
||||
vpcID: aws.StringValue(instance.VpcId),
|
||||
@ -1436,8 +1436,8 @@ func (c *Cloud) buildSelfAWSInstance() (*awsInstance, error) {
|
||||
return newAWSInstance(c.ec2, instance), nil
|
||||
}
|
||||
|
||||
// Gets the awsInstance with node-name nodeName, or the 'self' instance if nodeName == ""
|
||||
func (c *Cloud) getAwsInstance(nodeName string) (*awsInstance, error) {
|
||||
// Gets the awsInstance with for the node with the specified nodeName, or the 'self' instance if nodeName == ""
|
||||
func (c *Cloud) getAwsInstance(nodeName types.NodeName) (*awsInstance, error) {
|
||||
var awsInstance *awsInstance
|
||||
if nodeName == "" {
|
||||
awsInstance = c.selfAWSInstance
|
||||
@ -1454,15 +1454,15 @@ func (c *Cloud) getAwsInstance(nodeName string) (*awsInstance, error) {
|
||||
}
|
||||
|
||||
// AttachDisk implements Volumes.AttachDisk
|
||||
func (c *Cloud) AttachDisk(diskName string, instanceName string, readOnly bool) (string, error) {
|
||||
func (c *Cloud) AttachDisk(diskName string, nodeName types.NodeName, readOnly bool) (string, error) {
|
||||
disk, err := newAWSDisk(c, diskName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
awsInstance, err := c.getAwsInstance(instanceName)
|
||||
awsInstance, err := c.getAwsInstance(nodeName)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error finding instance %s: %v", instanceName, err)
|
||||
return "", fmt.Errorf("error finding instance %s: %v", nodeName, err)
|
||||
}
|
||||
|
||||
if readOnly {
|
||||
@ -1528,32 +1528,32 @@ func (c *Cloud) AttachDisk(diskName string, instanceName string, readOnly bool)
|
||||
// which could theoretically be against a different device (or even instance).
|
||||
if attachment == nil {
|
||||
// Impossible?
|
||||
return "", fmt.Errorf("unexpected state: attachment nil after attached %q to %q", diskName, instanceName)
|
||||
return "", fmt.Errorf("unexpected state: attachment nil after attached %q to %q", diskName, nodeName)
|
||||
}
|
||||
if ec2Device != aws.StringValue(attachment.Device) {
|
||||
return "", fmt.Errorf("disk attachment of %q to %q failed: requested device %q but found %q", diskName, instanceName, ec2Device, aws.StringValue(attachment.Device))
|
||||
return "", fmt.Errorf("disk attachment of %q to %q failed: requested device %q but found %q", diskName, nodeName, ec2Device, aws.StringValue(attachment.Device))
|
||||
}
|
||||
if awsInstance.awsID != aws.StringValue(attachment.InstanceId) {
|
||||
return "", fmt.Errorf("disk attachment of %q to %q failed: requested instance %q but found %q", diskName, instanceName, awsInstance.awsID, aws.StringValue(attachment.InstanceId))
|
||||
return "", fmt.Errorf("disk attachment of %q to %q failed: requested instance %q but found %q", diskName, nodeName, awsInstance.awsID, aws.StringValue(attachment.InstanceId))
|
||||
}
|
||||
|
||||
return hostDevice, nil
|
||||
}
|
||||
|
||||
// DetachDisk implements Volumes.DetachDisk
|
||||
func (c *Cloud) DetachDisk(diskName string, instanceName string) (string, error) {
|
||||
func (c *Cloud) DetachDisk(diskName string, nodeName types.NodeName) (string, error) {
|
||||
disk, err := newAWSDisk(c, diskName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
awsInstance, err := c.getAwsInstance(instanceName)
|
||||
awsInstance, err := c.getAwsInstance(nodeName)
|
||||
if err != nil {
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
// If instance no longer exists, safe to assume volume is not attached.
|
||||
glog.Warningf(
|
||||
"Instance %q does not exist. DetachDisk will assume disk %q is not attached to it.",
|
||||
instanceName,
|
||||
nodeName,
|
||||
diskName)
|
||||
return "", nil
|
||||
}
|
||||
@ -1743,14 +1743,14 @@ func (c *Cloud) GetDiskPath(volumeName string) (string, error) {
|
||||
}
|
||||
|
||||
// DiskIsAttached implements Volumes.DiskIsAttached
|
||||
func (c *Cloud) DiskIsAttached(diskName, instanceID string) (bool, error) {
|
||||
awsInstance, err := c.getAwsInstance(instanceID)
|
||||
func (c *Cloud) DiskIsAttached(diskName string, nodeName types.NodeName) (bool, error) {
|
||||
awsInstance, err := c.getAwsInstance(nodeName)
|
||||
if err != nil {
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
// If instance no longer exists, safe to assume volume is not attached.
|
||||
glog.Warningf(
|
||||
"Instance %q does not exist. DiskIsAttached will assume disk %q is not attached to it.",
|
||||
instanceID,
|
||||
nodeName,
|
||||
diskName)
|
||||
return false, nil
|
||||
}
|
||||
@ -3184,11 +3184,23 @@ func (c *Cloud) getInstancesByNodeNamesCached(nodeNames sets.String) ([]*ec2.Ins
|
||||
return instances, nil
|
||||
}
|
||||
|
||||
// mapNodeNameToPrivateDNSName maps a k8s NodeName to an AWS Instance PrivateDNSName
|
||||
// This is a simple string cast
|
||||
func mapNodeNameToPrivateDNSName(nodeName types.NodeName) string {
|
||||
return string(nodeName)
|
||||
}
|
||||
|
||||
// mapInstanceToNodeName maps a EC2 instance to a k8s NodeName, by extracting the PrivateDNSName
|
||||
func mapInstanceToNodeName(i *ec2.Instance) types.NodeName {
|
||||
return types.NodeName(aws.StringValue(i.PrivateDnsName))
|
||||
}
|
||||
|
||||
// Returns the instance with the specified node name
|
||||
// Returns nil if it does not exist
|
||||
func (c *Cloud) findInstanceByNodeName(nodeName string) (*ec2.Instance, error) {
|
||||
func (c *Cloud) findInstanceByNodeName(nodeName types.NodeName) (*ec2.Instance, error) {
|
||||
privateDNSName := mapNodeNameToPrivateDNSName(nodeName)
|
||||
filters := []*ec2.Filter{
|
||||
newEc2Filter("private-dns-name", nodeName),
|
||||
newEc2Filter("private-dns-name", privateDNSName),
|
||||
newEc2Filter("instance-state-name", "running"),
|
||||
}
|
||||
filters = c.addFilters(filters)
|
||||
@ -3211,7 +3223,7 @@ func (c *Cloud) findInstanceByNodeName(nodeName string) (*ec2.Instance, error) {
|
||||
|
||||
// Returns the instance with the specified node name
|
||||
// Like findInstanceByNodeName, but returns error if node not found
|
||||
func (c *Cloud) getInstanceByNodeName(nodeName string) (*ec2.Instance, error) {
|
||||
func (c *Cloud) getInstanceByNodeName(nodeName types.NodeName) (*ec2.Instance, error) {
|
||||
instance, err := c.findInstanceByNodeName(nodeName)
|
||||
if err == nil && instance == nil {
|
||||
return nil, cloudprovider.InstanceNotFound
|
||||
|
@ -86,9 +86,9 @@ func (c *Cloud) ListRoutes(clusterName string) ([]*cloudprovider.Route, error) {
|
||||
glog.Warningf("unable to find instance ID %s in the list of instances being routed to", instanceID)
|
||||
continue
|
||||
}
|
||||
instanceName := orEmpty(instance.PrivateDnsName)
|
||||
nodeName := mapInstanceToNodeName(instance)
|
||||
routeName := clusterName + "-" + destinationCIDR
|
||||
routes = append(routes, &cloudprovider.Route{Name: routeName, TargetInstance: instanceName, DestinationCIDR: destinationCIDR})
|
||||
routes = append(routes, &cloudprovider.Route{Name: routeName, TargetNode: nodeName, DestinationCIDR: destinationCIDR})
|
||||
}
|
||||
|
||||
return routes, nil
|
||||
@ -110,7 +110,7 @@ func (c *Cloud) configureInstanceSourceDestCheck(instanceID string, sourceDestCh
|
||||
// CreateRoute implements Routes.CreateRoute
|
||||
// Create the described route
|
||||
func (c *Cloud) CreateRoute(clusterName string, nameHint string, route *cloudprovider.Route) error {
|
||||
instance, err := c.getInstanceByNodeName(route.TargetInstance)
|
||||
instance, err := c.getInstanceByNodeName(route.TargetNode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -34,6 +34,7 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
)
|
||||
|
||||
const TestClusterId = "clusterid.test"
|
||||
@ -605,11 +606,11 @@ func TestList(t *testing.T) {
|
||||
|
||||
table := []struct {
|
||||
input string
|
||||
expect []string
|
||||
expect []types.NodeName
|
||||
}{
|
||||
{"blahonga", []string{}},
|
||||
{"quux", []string{"instance3.ec2.internal"}},
|
||||
{"a", []string{"instance1.ec2.internal", "instance2.ec2.internal"}},
|
||||
{"blahonga", []types.NodeName{}},
|
||||
{"quux", []types.NodeName{"instance3.ec2.internal"}},
|
||||
{"a", []types.NodeName{"instance1.ec2.internal", "instance2.ec2.internal"}},
|
||||
}
|
||||
|
||||
for _, item := range table {
|
||||
@ -705,7 +706,7 @@ func TestNodeAddresses(t *testing.T) {
|
||||
fakeServices.selfInstance.PublicIpAddress = aws.String("2.3.4.5")
|
||||
fakeServices.selfInstance.PrivateIpAddress = aws.String("192.168.0.2")
|
||||
|
||||
addrs4, err4 := aws4.NodeAddresses(*instance0.PrivateDnsName)
|
||||
addrs4, err4 := aws4.NodeAddresses(mapInstanceToNodeName(&instance0))
|
||||
if err4 != nil {
|
||||
t.Errorf("unexpected error: %v", err4)
|
||||
}
|
||||
@ -1062,7 +1063,7 @@ func TestIpPermissionExistsHandlesMultipleGroupIdsWithUserIds(t *testing.T) {
|
||||
func TestFindInstanceByNodeNameExcludesTerminatedInstances(t *testing.T) {
|
||||
awsServices := NewFakeAWSServices()
|
||||
|
||||
nodeName := "my-dns.internal"
|
||||
nodeName := types.NodeName("my-dns.internal")
|
||||
|
||||
var tag ec2.Tag
|
||||
tag.Key = aws.String(TagNameKubernetesCluster)
|
||||
@ -1071,13 +1072,13 @@ func TestFindInstanceByNodeNameExcludesTerminatedInstances(t *testing.T) {
|
||||
|
||||
var runningInstance ec2.Instance
|
||||
runningInstance.InstanceId = aws.String("i-running")
|
||||
runningInstance.PrivateDnsName = aws.String(nodeName)
|
||||
runningInstance.PrivateDnsName = aws.String(string(nodeName))
|
||||
runningInstance.State = &ec2.InstanceState{Code: aws.Int64(16), Name: aws.String("running")}
|
||||
runningInstance.Tags = tags
|
||||
|
||||
var terminatedInstance ec2.Instance
|
||||
terminatedInstance.InstanceId = aws.String("i-terminated")
|
||||
terminatedInstance.PrivateDnsName = aws.String(nodeName)
|
||||
terminatedInstance.PrivateDnsName = aws.String(string(nodeName))
|
||||
terminatedInstance.State = &ec2.InstanceState{Code: aws.Int64(48), Name: aws.String("terminated")}
|
||||
terminatedInstance.Tags = tags
|
||||
|
||||
|
@ -24,10 +24,11 @@ import (
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
)
|
||||
|
||||
// NodeAddresses returns the addresses of the specified instance.
|
||||
func (az *Cloud) NodeAddresses(name string) ([]api.NodeAddress, error) {
|
||||
func (az *Cloud) NodeAddresses(name types.NodeName) ([]api.NodeAddress, error) {
|
||||
ip, err := az.getIPForMachine(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -35,18 +36,18 @@ func (az *Cloud) NodeAddresses(name string) ([]api.NodeAddress, error) {
|
||||
|
||||
return []api.NodeAddress{
|
||||
{Type: api.NodeInternalIP, Address: ip},
|
||||
{Type: api.NodeHostName, Address: name},
|
||||
{Type: api.NodeHostName, Address: string(name)},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ExternalID returns the cloud provider ID of the specified instance (deprecated).
|
||||
func (az *Cloud) ExternalID(name string) (string, error) {
|
||||
func (az *Cloud) ExternalID(name types.NodeName) (string, error) {
|
||||
return az.InstanceID(name)
|
||||
}
|
||||
|
||||
// InstanceID returns the cloud provider ID of the specified instance.
|
||||
// Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound)
|
||||
func (az *Cloud) InstanceID(name string) (string, error) {
|
||||
func (az *Cloud) InstanceID(name types.NodeName) (string, error) {
|
||||
machine, exists, err := az.getVirtualMachine(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@ -60,7 +61,7 @@ func (az *Cloud) InstanceID(name string) (string, error) {
|
||||
// Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound)
|
||||
// (Implementer Note): This is used by kubelet. Kubelet will label the node. Real log from kubelet:
|
||||
// Adding node label from cloud provider: beta.kubernetes.io/instance-type=[value]
|
||||
func (az *Cloud) InstanceType(name string) (string, error) {
|
||||
func (az *Cloud) InstanceType(name types.NodeName) (string, error) {
|
||||
machine, exists, err := az.getVirtualMachine(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@ -71,7 +72,7 @@ func (az *Cloud) InstanceType(name string) (string, error) {
|
||||
}
|
||||
|
||||
// List lists instances that match 'filter' which is a regular expression which must match the entire instance name (fqdn)
|
||||
func (az *Cloud) List(filter string) ([]string, error) {
|
||||
func (az *Cloud) List(filter string) ([]types.NodeName, error) {
|
||||
allNodes, err := az.listAllNodesInResourceGroup()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -82,9 +83,9 @@ func (az *Cloud) List(filter string) ([]string, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nodeNames := make([]string, len(filteredNodes))
|
||||
nodeNames := make([]types.NodeName, len(filteredNodes))
|
||||
for i, v := range filteredNodes {
|
||||
nodeNames[i] = *v.Name
|
||||
nodeNames[i] = types.NodeName(*v.Name)
|
||||
}
|
||||
|
||||
return nodeNames, nil
|
||||
@ -98,8 +99,8 @@ func (az *Cloud) AddSSHKeyToAllInstances(user string, keyData []byte) error {
|
||||
|
||||
// CurrentNodeName returns the name of the node we are currently running on
|
||||
// On most clouds (e.g. GCE) this is the hostname, so we provide the hostname
|
||||
func (az *Cloud) CurrentNodeName(hostname string) (string, error) {
|
||||
return hostname, nil
|
||||
func (az *Cloud) CurrentNodeName(hostname string) (types.NodeName, error) {
|
||||
return types.NodeName(hostname), nil
|
||||
}
|
||||
|
||||
func (az *Cloud) listAllNodesInResourceGroup() ([]compute.VirtualMachine, error) {
|
||||
@ -144,3 +145,15 @@ func filterNodes(nodes []compute.VirtualMachine, filter string) ([]compute.Virtu
|
||||
|
||||
return filteredNodes, nil
|
||||
}
|
||||
|
||||
// mapNodeNameToVMName maps a k8s NodeName to an Azure VM Name
|
||||
// This is a simple string cast.
|
||||
func mapNodeNameToVMName(nodeName types.NodeName) string {
|
||||
return string(nodeName)
|
||||
}
|
||||
|
||||
// mapVMNameToNodeName maps an Azure VM Name to a k8s NodeName
|
||||
// This is a simple string cast.
|
||||
func mapVMNameToNodeName(vmName string) types.NodeName {
|
||||
return types.NodeName(vmName)
|
||||
}
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
)
|
||||
|
||||
// GetLoadBalancer returns whether the specified load balancer exists, and
|
||||
@ -60,7 +61,7 @@ func (az *Cloud) GetLoadBalancer(clusterName string, service *api.Service) (stat
|
||||
}
|
||||
|
||||
// EnsureLoadBalancer creates a new load balancer 'name', or updates the existing one. Returns the status of the balancer
|
||||
func (az *Cloud) EnsureLoadBalancer(clusterName string, service *api.Service, hosts []string) (*api.LoadBalancerStatus, error) {
|
||||
func (az *Cloud) EnsureLoadBalancer(clusterName string, service *api.Service, nodeNames []string) (*api.LoadBalancerStatus, error) {
|
||||
lbName := getLoadBalancerName(clusterName)
|
||||
pipName := getPublicIPName(clusterName, service)
|
||||
serviceName := getServiceName(service)
|
||||
@ -99,7 +100,7 @@ func (az *Cloud) EnsureLoadBalancer(clusterName string, service *api.Service, ho
|
||||
}
|
||||
}
|
||||
|
||||
lb, lbNeedsUpdate, err := az.reconcileLoadBalancer(lb, pip, clusterName, service, hosts)
|
||||
lb, lbNeedsUpdate, err := az.reconcileLoadBalancer(lb, pip, clusterName, service, nodeNames)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -114,11 +115,11 @@ func (az *Cloud) EnsureLoadBalancer(clusterName string, service *api.Service, ho
|
||||
// Add the machines to the backend pool if they're not already
|
||||
lbBackendName := getBackendPoolName(clusterName)
|
||||
lbBackendPoolID := az.getBackendPoolID(lbName, lbBackendName)
|
||||
hostUpdates := make([]func() error, len(hosts))
|
||||
for i, host := range hosts {
|
||||
localHost := host
|
||||
hostUpdates := make([]func() error, len(nodeNames))
|
||||
for i, nodeName := range nodeNames {
|
||||
localNodeName := nodeName
|
||||
f := func() error {
|
||||
err := az.ensureHostInPool(serviceName, localHost, lbBackendPoolID)
|
||||
err := az.ensureHostInPool(serviceName, types.NodeName(localNodeName), lbBackendPoolID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("ensure(%s): lb(%s) - failed to ensure host in pool: %q", serviceName, lbName, err)
|
||||
}
|
||||
@ -139,8 +140,8 @@ func (az *Cloud) EnsureLoadBalancer(clusterName string, service *api.Service, ho
|
||||
}
|
||||
|
||||
// UpdateLoadBalancer updates hosts under the specified load balancer.
|
||||
func (az *Cloud) UpdateLoadBalancer(clusterName string, service *api.Service, hosts []string) error {
|
||||
_, err := az.EnsureLoadBalancer(clusterName, service, hosts)
|
||||
func (az *Cloud) UpdateLoadBalancer(clusterName string, service *api.Service, nodeNames []string) error {
|
||||
_, err := az.EnsureLoadBalancer(clusterName, service, nodeNames)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -257,7 +258,7 @@ func (az *Cloud) ensurePublicIPDeleted(serviceName, pipName string) error {
|
||||
// This ensures load balancer exists and the frontend ip config is setup.
|
||||
// This also reconciles the Service's Ports with the LoadBalancer config.
|
||||
// This entails adding rules/probes for expected Ports and removing stale rules/ports.
|
||||
func (az *Cloud) reconcileLoadBalancer(lb network.LoadBalancer, pip *network.PublicIPAddress, clusterName string, service *api.Service, hosts []string) (network.LoadBalancer, bool, error) {
|
||||
func (az *Cloud) reconcileLoadBalancer(lb network.LoadBalancer, pip *network.PublicIPAddress, clusterName string, service *api.Service, nodeNames []string) (network.LoadBalancer, bool, error) {
|
||||
lbName := getLoadBalancerName(clusterName)
|
||||
serviceName := getServiceName(service)
|
||||
lbFrontendIPConfigName := getFrontendIPConfigName(service)
|
||||
@ -556,8 +557,9 @@ func findSecurityRule(rules []network.SecurityRule, rule network.SecurityRule) b
|
||||
|
||||
// This ensures the given VM's Primary NIC's Primary IP Configuration is
|
||||
// participating in the specified LoadBalancer Backend Pool.
|
||||
func (az *Cloud) ensureHostInPool(serviceName, machineName string, backendPoolID string) error {
|
||||
machine, err := az.VirtualMachinesClient.Get(az.ResourceGroup, machineName, "")
|
||||
func (az *Cloud) ensureHostInPool(serviceName string, nodeName types.NodeName, backendPoolID string) error {
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
machine, err := az.VirtualMachinesClient.Get(az.ResourceGroup, vmName, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
)
|
||||
|
||||
// ListRoutes lists all managed routes that belong to the specified clusterName
|
||||
@ -41,13 +42,13 @@ func (az *Cloud) ListRoutes(clusterName string) (routes []*cloudprovider.Route,
|
||||
if routeTable.Properties.Routes != nil {
|
||||
kubeRoutes = make([]*cloudprovider.Route, len(*routeTable.Properties.Routes))
|
||||
for i, route := range *routeTable.Properties.Routes {
|
||||
instance := getInstanceName(*route.Name)
|
||||
instance := mapRouteNameToNodeName(*route.Name)
|
||||
cidr := *route.Properties.AddressPrefix
|
||||
glog.V(10).Infof("list: * instance=%q, cidr=%q", instance, cidr)
|
||||
|
||||
kubeRoutes[i] = &cloudprovider.Route{
|
||||
Name: *route.Name,
|
||||
TargetInstance: instance,
|
||||
TargetNode: instance,
|
||||
DestinationCIDR: cidr,
|
||||
}
|
||||
}
|
||||
@ -61,7 +62,7 @@ func (az *Cloud) ListRoutes(clusterName string) (routes []*cloudprovider.Route,
|
||||
// route.Name will be ignored, although the cloud-provider may use nameHint
|
||||
// to create a more user-meaningful name.
|
||||
func (az *Cloud) CreateRoute(clusterName string, nameHint string, kubeRoute *cloudprovider.Route) error {
|
||||
glog.V(2).Infof("create: creating route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetInstance, kubeRoute.DestinationCIDR)
|
||||
glog.V(2).Infof("create: creating route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
|
||||
routeTable, existsRouteTable, err := az.getRouteTable()
|
||||
if err != nil {
|
||||
@ -107,12 +108,12 @@ func (az *Cloud) CreateRoute(clusterName string, nameHint string, kubeRoute *clo
|
||||
}
|
||||
}
|
||||
|
||||
targetIP, err := az.getIPForMachine(kubeRoute.TargetInstance)
|
||||
targetIP, err := az.getIPForMachine(kubeRoute.TargetNode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
routeName := getRouteName(kubeRoute.TargetInstance)
|
||||
routeName := mapNodeNameToRouteName(kubeRoute.TargetNode)
|
||||
route := network.Route{
|
||||
Name: to.StringPtr(routeName),
|
||||
Properties: &network.RoutePropertiesFormat{
|
||||
@ -122,40 +123,40 @@ func (az *Cloud) CreateRoute(clusterName string, nameHint string, kubeRoute *clo
|
||||
},
|
||||
}
|
||||
|
||||
glog.V(3).Infof("create: creating route: instance=%q cidr=%q", kubeRoute.TargetInstance, kubeRoute.DestinationCIDR)
|
||||
glog.V(3).Infof("create: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
_, err = az.RoutesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, *route.Name, route, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("create: route created. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetInstance, kubeRoute.DestinationCIDR)
|
||||
glog.V(2).Infof("create: route created. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteRoute deletes the specified managed route
|
||||
// Route should be as returned by ListRoutes
|
||||
func (az *Cloud) DeleteRoute(clusterName string, kubeRoute *cloudprovider.Route) error {
|
||||
glog.V(2).Infof("delete: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetInstance, kubeRoute.DestinationCIDR)
|
||||
glog.V(2).Infof("delete: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
|
||||
routeName := getRouteName(kubeRoute.TargetInstance)
|
||||
routeName := mapNodeNameToRouteName(kubeRoute.TargetNode)
|
||||
_, err := az.RoutesClient.Delete(az.ResourceGroup, az.RouteTableName, routeName, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("delete: route deleted. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetInstance, kubeRoute.DestinationCIDR)
|
||||
glog.V(2).Infof("delete: route deleted. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
return nil
|
||||
}
|
||||
|
||||
// This must be kept in sync with getInstanceName.
|
||||
// This must be kept in sync with mapRouteNameToNodeName.
|
||||
// These two functions enable stashing the instance name in the route
|
||||
// and then retrieving it later when listing. This is needed because
|
||||
// Azure does not let you put tags/descriptions on the Route itself.
|
||||
func getRouteName(instanceName string) string {
|
||||
return fmt.Sprintf("%s", instanceName)
|
||||
func mapNodeNameToRouteName(nodeName types.NodeName) string {
|
||||
return fmt.Sprintf("%s", nodeName)
|
||||
}
|
||||
|
||||
// Used with getRouteName. See comment on getRouteName.
|
||||
func getInstanceName(routeName string) string {
|
||||
return fmt.Sprintf("%s", routeName)
|
||||
// Used with mapNodeNameToRouteName. See comment on mapNodeNameToRouteName.
|
||||
func mapRouteNameToNodeName(routeName string) types.NodeName {
|
||||
return types.NodeName(fmt.Sprintf("%s", routeName))
|
||||
}
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -31,8 +32,8 @@ const (
|
||||
|
||||
// AttachDisk attaches a vhd to vm
|
||||
// the vhd must exist, can be identified by diskName, diskURI, and lun.
|
||||
func (az *Cloud) AttachDisk(diskName, diskURI, vmName string, lun int32, cachingMode compute.CachingTypes) error {
|
||||
vm, exists, err := az.getVirtualMachine(vmName)
|
||||
func (az *Cloud) AttachDisk(diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error {
|
||||
vm, exists, err := az.getVirtualMachine(nodeName)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if !exists {
|
||||
@ -58,6 +59,7 @@ func (az *Cloud) AttachDisk(diskName, diskURI, vmName string, lun int32, caching
|
||||
},
|
||||
},
|
||||
}
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
_, err = az.VirtualMachinesClient.CreateOrUpdate(az.ResourceGroup, vmName, newVM, nil)
|
||||
if err != nil {
|
||||
glog.Errorf("azure attach failed, err: %v", err)
|
||||
@ -65,7 +67,7 @@ func (az *Cloud) AttachDisk(diskName, diskURI, vmName string, lun int32, caching
|
||||
if strings.Contains(detail, "Code=\"AcquireDiskLeaseFailed\"") {
|
||||
// if lease cannot be acquired, immediately detach the disk and return the original error
|
||||
glog.Infof("failed to acquire disk lease, try detach")
|
||||
az.DetachDiskByName(diskName, diskURI, vmName)
|
||||
az.DetachDiskByName(diskName, diskURI, nodeName)
|
||||
}
|
||||
} else {
|
||||
glog.V(4).Infof("azure attach succeeded")
|
||||
@ -75,11 +77,11 @@ func (az *Cloud) AttachDisk(diskName, diskURI, vmName string, lun int32, caching
|
||||
|
||||
// DetachDiskByName detaches a vhd from host
|
||||
// the vhd can be identified by diskName or diskURI
|
||||
func (az *Cloud) DetachDiskByName(diskName, diskURI, vmName string) error {
|
||||
vm, exists, err := az.getVirtualMachine(vmName)
|
||||
func (az *Cloud) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error {
|
||||
vm, exists, err := az.getVirtualMachine(nodeName)
|
||||
if err != nil || !exists {
|
||||
// if host doesn't exist, no need to detach
|
||||
glog.Warningf("cannot find node %s, skip detaching disk %s", vmName, diskName)
|
||||
glog.Warningf("cannot find node %s, skip detaching disk %s", nodeName, diskName)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -100,6 +102,7 @@ func (az *Cloud) DetachDiskByName(diskName, diskURI, vmName string) error {
|
||||
},
|
||||
},
|
||||
}
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
_, err = az.VirtualMachinesClient.CreateOrUpdate(az.ResourceGroup, vmName, newVM, nil)
|
||||
if err != nil {
|
||||
glog.Errorf("azure disk detach failed, err: %v", err)
|
||||
@ -110,8 +113,8 @@ func (az *Cloud) DetachDiskByName(diskName, diskURI, vmName string) error {
|
||||
}
|
||||
|
||||
// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI
|
||||
func (az *Cloud) GetDiskLun(diskName, diskURI, vmName string) (int32, error) {
|
||||
vm, exists, err := az.getVirtualMachine(vmName)
|
||||
func (az *Cloud) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) {
|
||||
vm, exists, err := az.getVirtualMachine(nodeName)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
} else if !exists {
|
||||
@ -130,8 +133,8 @@ func (az *Cloud) GetDiskLun(diskName, diskURI, vmName string) (int32, error) {
|
||||
|
||||
// GetNextDiskLun searches all vhd attachment on the host and find unused lun
|
||||
// return -1 if all luns are used
|
||||
func (az *Cloud) GetNextDiskLun(vmName string) (int32, error) {
|
||||
vm, exists, err := az.getVirtualMachine(vmName)
|
||||
func (az *Cloud) GetNextDiskLun(nodeName types.NodeName) (int32, error) {
|
||||
vm, exists, err := az.getVirtualMachine(nodeName)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
} else if !exists {
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -202,8 +203,8 @@ outer:
|
||||
return -1, fmt.Errorf("SecurityGroup priorities are exhausted")
|
||||
}
|
||||
|
||||
func (az *Cloud) getIPForMachine(machineName string) (string, error) {
|
||||
machine, exists, err := az.getVirtualMachine(machineName)
|
||||
func (az *Cloud) getIPForMachine(nodeName types.NodeName) (string, error) {
|
||||
machine, exists, err := az.getVirtualMachine(nodeName)
|
||||
if !exists {
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
}
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
)
|
||||
|
||||
// checkExistsFromError inspects an error and returns a true if err is nil,
|
||||
@ -38,10 +39,11 @@ func checkResourceExistsFromError(err error) (bool, error) {
|
||||
return false, v
|
||||
}
|
||||
|
||||
func (az *Cloud) getVirtualMachine(machineName string) (vm compute.VirtualMachine, exists bool, err error) {
|
||||
func (az *Cloud) getVirtualMachine(nodeName types.NodeName) (vm compute.VirtualMachine, exists bool, err error) {
|
||||
var realErr error
|
||||
|
||||
vm, err = az.VirtualMachinesClient.Get(az.ResourceGroup, machineName, "")
|
||||
vmName := string(nodeName)
|
||||
vm, err = az.VirtualMachinesClient.Get(az.ResourceGroup, vmName, "")
|
||||
|
||||
exists, realErr = checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
)
|
||||
|
||||
const ProviderName = "fake"
|
||||
@ -49,9 +50,9 @@ type FakeCloud struct {
|
||||
Err error
|
||||
Calls []string
|
||||
Addresses []api.NodeAddress
|
||||
ExtID map[string]string
|
||||
InstanceTypes map[string]string
|
||||
Machines []string
|
||||
ExtID map[types.NodeName]string
|
||||
InstanceTypes map[types.NodeName]string
|
||||
Machines []types.NodeName
|
||||
NodeResources *api.NodeResources
|
||||
ClusterList []string
|
||||
MasterName string
|
||||
@ -173,13 +174,13 @@ func (f *FakeCloud) AddSSHKeyToAllInstances(user string, keyData []byte) error {
|
||||
}
|
||||
|
||||
// Implementation of Instances.CurrentNodeName
|
||||
func (f *FakeCloud) CurrentNodeName(hostname string) (string, error) {
|
||||
return hostname, nil
|
||||
func (f *FakeCloud) CurrentNodeName(hostname string) (types.NodeName, error) {
|
||||
return types.NodeName(hostname), nil
|
||||
}
|
||||
|
||||
// NodeAddresses is a test-spy implementation of Instances.NodeAddresses.
|
||||
// It adds an entry "node-addresses" into the internal method call record.
|
||||
func (f *FakeCloud) NodeAddresses(instance string) ([]api.NodeAddress, error) {
|
||||
func (f *FakeCloud) NodeAddresses(instance types.NodeName) ([]api.NodeAddress, error) {
|
||||
f.addCall("node-addresses")
|
||||
return f.Addresses, f.Err
|
||||
}
|
||||
@ -187,30 +188,30 @@ func (f *FakeCloud) NodeAddresses(instance string) ([]api.NodeAddress, error) {
|
||||
// ExternalID is a test-spy implementation of Instances.ExternalID.
|
||||
// It adds an entry "external-id" into the internal method call record.
|
||||
// It returns an external id to the mapped instance name, if not found, it will return "ext-{instance}"
|
||||
func (f *FakeCloud) ExternalID(instance string) (string, error) {
|
||||
func (f *FakeCloud) ExternalID(nodeName types.NodeName) (string, error) {
|
||||
f.addCall("external-id")
|
||||
return f.ExtID[instance], f.Err
|
||||
return f.ExtID[nodeName], f.Err
|
||||
}
|
||||
|
||||
// InstanceID returns the cloud provider ID of the specified instance.
|
||||
func (f *FakeCloud) InstanceID(instance string) (string, error) {
|
||||
// InstanceID returns the cloud provider ID of the node with the specified Name.
|
||||
func (f *FakeCloud) InstanceID(nodeName types.NodeName) (string, error) {
|
||||
f.addCall("instance-id")
|
||||
return f.ExtID[instance], nil
|
||||
return f.ExtID[nodeName], nil
|
||||
}
|
||||
|
||||
// InstanceType returns the type of the specified instance.
|
||||
func (f *FakeCloud) InstanceType(instance string) (string, error) {
|
||||
func (f *FakeCloud) InstanceType(instance types.NodeName) (string, error) {
|
||||
f.addCall("instance-type")
|
||||
return f.InstanceTypes[instance], nil
|
||||
}
|
||||
|
||||
// List is a test-spy implementation of Instances.List.
|
||||
// It adds an entry "list" into the internal method call record.
|
||||
func (f *FakeCloud) List(filter string) ([]string, error) {
|
||||
func (f *FakeCloud) List(filter string) ([]types.NodeName, error) {
|
||||
f.addCall("list")
|
||||
result := []string{}
|
||||
result := []types.NodeName{}
|
||||
for _, machine := range f.Machines {
|
||||
if match, _ := regexp.MatchString(filter, machine); match {
|
||||
if match, _ := regexp.MatchString(filter, string(machine)); match {
|
||||
result = append(result, machine)
|
||||
}
|
||||
}
|
||||
|
@ -122,16 +122,16 @@ const (
|
||||
|
||||
// Disks is interface for manipulation with GCE PDs.
|
||||
type Disks interface {
|
||||
// AttachDisk attaches given disk to given instance. Current instance
|
||||
// is used when instanceID is empty string.
|
||||
AttachDisk(diskName, instanceID string, readOnly bool) error
|
||||
// AttachDisk attaches given disk to the node with the specified NodeName.
|
||||
// Current instance is used when instanceID is empty string.
|
||||
AttachDisk(diskName string, nodeName types.NodeName, readOnly bool) error
|
||||
|
||||
// DetachDisk detaches given disk to given instance. Current instance
|
||||
// is used when instanceID is empty string.
|
||||
DetachDisk(devicePath, instanceID string) error
|
||||
// DetachDisk detaches given disk to the node with the specified NodeName.
|
||||
// Current instance is used when nodeName is empty string.
|
||||
DetachDisk(devicePath string, nodeName types.NodeName) error
|
||||
|
||||
// DiskIsAttached checks if a disk is attached to the given node.
|
||||
DiskIsAttached(diskName, instanceID string) (bool, error)
|
||||
// DiskIsAttached checks if a disk is attached to the node with the specified NodeName.
|
||||
DiskIsAttached(diskName string, nodeName types.NodeName) (bool, error)
|
||||
|
||||
// CreateDisk creates a new PD with given properties. Tags are serialized
|
||||
// as JSON into Description field.
|
||||
@ -2095,8 +2095,8 @@ func canonicalizeInstanceName(name string) string {
|
||||
}
|
||||
|
||||
// Implementation of Instances.CurrentNodeName
|
||||
func (gce *GCECloud) CurrentNodeName(hostname string) (string, error) {
|
||||
return hostname, nil
|
||||
func (gce *GCECloud) CurrentNodeName(hostname string) (types.NodeName, error) {
|
||||
return types.NodeName(hostname), nil
|
||||
}
|
||||
|
||||
func (gce *GCECloud) AddSSHKeyToAllInstances(user string, keyData []byte) error {
|
||||
@ -2145,7 +2145,7 @@ func (gce *GCECloud) AddSSHKeyToAllInstances(user string, keyData []byte) error
|
||||
}
|
||||
|
||||
// NodeAddresses is an implementation of Instances.NodeAddresses.
|
||||
func (gce *GCECloud) NodeAddresses(_ string) ([]api.NodeAddress, error) {
|
||||
func (gce *GCECloud) NodeAddresses(_ types.NodeName) ([]api.NodeAddress, error) {
|
||||
internalIP, err := metadata.Get("instance/network-interfaces/0/ip")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't get internal IP: %v", err)
|
||||
@ -2172,11 +2172,23 @@ func (gce *GCECloud) isCurrentInstance(instanceID string) bool {
|
||||
return currentInstanceID == canonicalizeInstanceName(instanceID)
|
||||
}
|
||||
|
||||
// ExternalID returns the cloud provider ID of the specified instance (deprecated).
|
||||
func (gce *GCECloud) ExternalID(instance string) (string, error) {
|
||||
// mapNodeNameToInstanceName maps a k8s NodeName to a GCE Instance Name
|
||||
// This is a simple string cast.
|
||||
func mapNodeNameToInstanceName(nodeName types.NodeName) string {
|
||||
return string(nodeName)
|
||||
}
|
||||
|
||||
// mapInstanceToNodeName maps a GCE Instance to a k8s NodeName
|
||||
func mapInstanceToNodeName(instance *compute.Instance) types.NodeName {
|
||||
return types.NodeName(instance.Name)
|
||||
}
|
||||
|
||||
// ExternalID returns the cloud provider ID of the node with the specified NodeName (deprecated).
|
||||
func (gce *GCECloud) ExternalID(nodeName types.NodeName) (string, error) {
|
||||
instanceName := mapNodeNameToInstanceName(nodeName)
|
||||
if gce.useMetadataServer {
|
||||
// Use metadata, if possible, to fetch ID. See issue #12000
|
||||
if gce.isCurrentInstance(instance) {
|
||||
if gce.isCurrentInstance(instanceName) {
|
||||
externalInstanceID, err := getCurrentExternalIDViaMetadata()
|
||||
if err == nil {
|
||||
return externalInstanceID, nil
|
||||
@ -2185,15 +2197,16 @@ func (gce *GCECloud) ExternalID(instance string) (string, error) {
|
||||
}
|
||||
|
||||
// Fallback to GCE API call if metadata server fails to retrieve ID
|
||||
inst, err := gce.getInstanceByName(instance)
|
||||
inst, err := gce.getInstanceByName(instanceName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strconv.FormatUint(inst.ID, 10), nil
|
||||
}
|
||||
|
||||
// InstanceID returns the cloud provider ID of the specified instance.
|
||||
func (gce *GCECloud) InstanceID(instanceName string) (string, error) {
|
||||
// InstanceID returns the cloud provider ID of the node with the specified NodeName.
|
||||
func (gce *GCECloud) InstanceID(nodeName types.NodeName) (string, error) {
|
||||
instanceName := mapNodeNameToInstanceName(nodeName)
|
||||
if gce.useMetadataServer {
|
||||
// Use metadata, if possible, to fetch ID. See issue #12000
|
||||
if gce.isCurrentInstance(instanceName) {
|
||||
@ -2210,8 +2223,9 @@ func (gce *GCECloud) InstanceID(instanceName string) (string, error) {
|
||||
return gce.projectID + "/" + instance.Zone + "/" + instance.Name, nil
|
||||
}
|
||||
|
||||
// InstanceType returns the type of the specified instance.
|
||||
func (gce *GCECloud) InstanceType(instanceName string) (string, error) {
|
||||
// InstanceType returns the type of the specified node with the specified NodeName.
|
||||
func (gce *GCECloud) InstanceType(nodeName types.NodeName) (string, error) {
|
||||
instanceName := mapNodeNameToInstanceName(nodeName)
|
||||
if gce.useMetadataServer {
|
||||
// Use metadata, if possible, to fetch ID. See issue #12000
|
||||
if gce.isCurrentInstance(instanceName) {
|
||||
@ -2229,8 +2243,8 @@ func (gce *GCECloud) InstanceType(instanceName string) (string, error) {
|
||||
}
|
||||
|
||||
// List is an implementation of Instances.List.
|
||||
func (gce *GCECloud) List(filter string) ([]string, error) {
|
||||
var instances []string
|
||||
func (gce *GCECloud) List(filter string) ([]types.NodeName, error) {
|
||||
var instances []types.NodeName
|
||||
// TODO: Parallelize, although O(zones) so not too bad (N <= 3 typically)
|
||||
for _, zone := range gce.managedZones {
|
||||
pageToken := ""
|
||||
@ -2249,7 +2263,7 @@ func (gce *GCECloud) List(filter string) ([]string, error) {
|
||||
}
|
||||
pageToken = res.NextPageToken
|
||||
for _, instance := range res.Items {
|
||||
instances = append(instances, instance.Name)
|
||||
instances = append(instances, mapInstanceToNodeName(instance))
|
||||
}
|
||||
}
|
||||
if page >= maxPages {
|
||||
@ -2349,7 +2363,9 @@ func (gce *GCECloud) ListRoutes(clusterName string) ([]*cloudprovider.Route, err
|
||||
}
|
||||
|
||||
target := path.Base(r.NextHopInstance)
|
||||
routes = append(routes, &cloudprovider.Route{Name: r.Name, TargetInstance: target, DestinationCIDR: r.DestRange})
|
||||
// TODO: Should we lastComponent(target) this?
|
||||
targetNodeName := types.NodeName(target) // NodeName == Instance Name on GCE
|
||||
routes = append(routes, &cloudprovider.Route{Name: r.Name, TargetNode: targetNodeName, DestinationCIDR: r.DestRange})
|
||||
}
|
||||
}
|
||||
if page >= maxPages {
|
||||
@ -2365,7 +2381,8 @@ func gceNetworkURL(project, network string) string {
|
||||
func (gce *GCECloud) CreateRoute(clusterName string, nameHint string, route *cloudprovider.Route) error {
|
||||
routeName := truncateClusterName(clusterName) + "-" + nameHint
|
||||
|
||||
targetInstance, err := gce.getInstanceByName(route.TargetInstance)
|
||||
instanceName := mapNodeNameToInstanceName(route.TargetNode)
|
||||
targetInstance, err := gce.getInstanceByName(instanceName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -2545,10 +2562,11 @@ func (gce *GCECloud) GetAutoLabelsForPD(name string, zone string) (map[string]st
|
||||
return labels, nil
|
||||
}
|
||||
|
||||
func (gce *GCECloud) AttachDisk(diskName, instanceID string, readOnly bool) error {
|
||||
instance, err := gce.getInstanceByName(instanceID)
|
||||
func (gce *GCECloud) AttachDisk(diskName string, nodeName types.NodeName, readOnly bool) error {
|
||||
instanceName := mapNodeNameToInstanceName(nodeName)
|
||||
instance, err := gce.getInstanceByName(instanceName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting instance %q", instanceID)
|
||||
return fmt.Errorf("error getting instance %q", instanceName)
|
||||
}
|
||||
disk, err := gce.getDiskByName(diskName, instance.Zone)
|
||||
if err != nil {
|
||||
@ -2560,7 +2578,7 @@ func (gce *GCECloud) AttachDisk(diskName, instanceID string, readOnly bool) erro
|
||||
}
|
||||
attachedDisk := gce.convertDiskToAttachedDisk(disk, readWrite)
|
||||
|
||||
attachOp, err := gce.service.Instances.AttachDisk(gce.projectID, disk.Zone, instanceID, attachedDisk).Do()
|
||||
attachOp, err := gce.service.Instances.AttachDisk(gce.projectID, disk.Zone, instanceName, attachedDisk).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -2568,19 +2586,20 @@ func (gce *GCECloud) AttachDisk(diskName, instanceID string, readOnly bool) erro
|
||||
return gce.waitForZoneOp(attachOp, disk.Zone)
|
||||
}
|
||||
|
||||
func (gce *GCECloud) DetachDisk(devicePath, instanceID string) error {
|
||||
inst, err := gce.getInstanceByName(instanceID)
|
||||
func (gce *GCECloud) DetachDisk(devicePath string, nodeName types.NodeName) error {
|
||||
instanceName := mapNodeNameToInstanceName(nodeName)
|
||||
inst, err := gce.getInstanceByName(instanceName)
|
||||
if err != nil {
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
// If instance no longer exists, safe to assume volume is not attached.
|
||||
glog.Warningf(
|
||||
"Instance %q does not exist. DetachDisk will assume PD %q is not attached to it.",
|
||||
instanceID,
|
||||
instanceName,
|
||||
devicePath)
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("error getting instance %q", instanceID)
|
||||
return fmt.Errorf("error getting instance %q", instanceName)
|
||||
}
|
||||
|
||||
detachOp, err := gce.service.Instances.DetachDisk(gce.projectID, inst.Zone, inst.Name, devicePath).Do()
|
||||
@ -2591,14 +2610,15 @@ func (gce *GCECloud) DetachDisk(devicePath, instanceID string) error {
|
||||
return gce.waitForZoneOp(detachOp, inst.Zone)
|
||||
}
|
||||
|
||||
func (gce *GCECloud) DiskIsAttached(diskName, instanceID string) (bool, error) {
|
||||
instance, err := gce.getInstanceByName(instanceID)
|
||||
func (gce *GCECloud) DiskIsAttached(diskName string, nodeName types.NodeName) (bool, error) {
|
||||
instanceName := mapNodeNameToInstanceName(nodeName)
|
||||
instance, err := gce.getInstanceByName(instanceName)
|
||||
if err != nil {
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
// If instance no longer exists, safe to assume volume is not attached.
|
||||
glog.Warningf(
|
||||
"Instance %q does not exist. DiskIsAttached will assume PD %q is not attached to it.",
|
||||
instanceID,
|
||||
instanceName,
|
||||
diskName)
|
||||
return false, nil
|
||||
}
|
||||
|
@ -30,6 +30,7 @@ import (
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -89,8 +90,8 @@ func newMesosCloud(configReader io.Reader) (*MesosCloud, error) {
|
||||
}
|
||||
|
||||
// Implementation of Instances.CurrentNodeName
|
||||
func (c *MesosCloud) CurrentNodeName(hostname string) (string, error) {
|
||||
return hostname, nil
|
||||
func (c *MesosCloud) CurrentNodeName(hostname string) (types.NodeName, error) {
|
||||
return types.NodeName(hostname), nil
|
||||
}
|
||||
|
||||
func (c *MesosCloud) AddSSHKeyToAllInstances(user string, keyData []byte) error {
|
||||
@ -190,8 +191,15 @@ func ipAddress(name string) (net.IP, error) {
|
||||
return ipaddr, nil
|
||||
}
|
||||
|
||||
// ExternalID returns the cloud provider ID of the specified instance (deprecated).
|
||||
func (c *MesosCloud) ExternalID(instance string) (string, error) {
|
||||
// mapNodeNameToPrivateDNSName maps a k8s NodeName to an mesos hostname.
|
||||
// This is a simple string cast
|
||||
func mapNodeNameToHostname(nodeName types.NodeName) string {
|
||||
return string(nodeName)
|
||||
}
|
||||
|
||||
// ExternalID returns the cloud provider ID of the instance with the specified nodeName (deprecated).
|
||||
func (c *MesosCloud) ExternalID(nodeName types.NodeName) (string, error) {
|
||||
hostname := mapNodeNameToHostname(nodeName)
|
||||
//TODO(jdef) use a timeout here? 15s?
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
@ -201,7 +209,7 @@ func (c *MesosCloud) ExternalID(instance string) (string, error) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
node := nodes[instance]
|
||||
node := nodes[hostname]
|
||||
if node == nil {
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
}
|
||||
@ -213,13 +221,13 @@ func (c *MesosCloud) ExternalID(instance string) (string, error) {
|
||||
return ip.String(), nil
|
||||
}
|
||||
|
||||
// InstanceID returns the cloud provider ID of the specified instance.
|
||||
func (c *MesosCloud) InstanceID(name string) (string, error) {
|
||||
// InstanceID returns the cloud provider ID of the instance with the specified nodeName.
|
||||
func (c *MesosCloud) InstanceID(nodeName types.NodeName) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// InstanceType returns the type of the specified instance.
|
||||
func (c *MesosCloud) InstanceType(name string) (string, error) {
|
||||
// InstanceType returns the type of the instance with the specified nodeName.
|
||||
func (c *MesosCloud) InstanceType(nodeName types.NodeName) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
@ -241,7 +249,7 @@ func (c *MesosCloud) listNodes() (map[string]*slaveNode, error) {
|
||||
|
||||
// List lists instances that match 'filter' which is a regular expression
|
||||
// which must match the entire instance name (fqdn).
|
||||
func (c *MesosCloud) List(filter string) ([]string, error) {
|
||||
func (c *MesosCloud) List(filter string) ([]types.NodeName, error) {
|
||||
nodes, err := c.listNodes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -250,13 +258,13 @@ func (c *MesosCloud) List(filter string) ([]string, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
addr := []string{}
|
||||
names := []types.NodeName{}
|
||||
for _, node := range nodes {
|
||||
if filterRegex.MatchString(node.hostname) {
|
||||
addr = append(addr, node.hostname)
|
||||
names = append(names, types.NodeName(node.hostname))
|
||||
}
|
||||
}
|
||||
return addr, nil
|
||||
return names, nil
|
||||
}
|
||||
|
||||
// ListWithKubelet list those instance which have no running kubelet, i.e. the
|
||||
@ -275,8 +283,9 @@ func (c *MesosCloud) ListWithoutKubelet() ([]string, error) {
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
// NodeAddresses returns the addresses of the specified instance.
|
||||
func (c *MesosCloud) NodeAddresses(name string) ([]api.NodeAddress, error) {
|
||||
// NodeAddresses returns the addresses of the instance with the specified nodeName.
|
||||
func (c *MesosCloud) NodeAddresses(nodeName types.NodeName) ([]api.NodeAddress, error) {
|
||||
name := mapNodeNameToHostname(nodeName)
|
||||
ip, err := ipAddress(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
|
||||
log "github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
)
|
||||
|
||||
func TestIPAddress(t *testing.T) {
|
||||
@ -268,7 +269,7 @@ func Test_ExternalID(t *testing.T) {
|
||||
t.Fatalf("ExternalID did not return InstanceNotFound on an unknown instance")
|
||||
}
|
||||
|
||||
slaveName := "mesos3.internal.company.com"
|
||||
slaveName := types.NodeName("mesos3.internal.company.com")
|
||||
id, err := mesosCloud.ExternalID(slaveName)
|
||||
if id != "" {
|
||||
t.Fatalf("ExternalID should not be able to resolve %q", slaveName)
|
||||
|
@ -37,6 +37,7 @@ import (
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
)
|
||||
|
||||
const ProviderName = "openstack"
|
||||
@ -237,9 +238,20 @@ func newOpenStack(cfg Config) (*OpenStack, error) {
|
||||
return &os, nil
|
||||
}
|
||||
|
||||
func getServerByName(client *gophercloud.ServiceClient, name string) (*servers.Server, error) {
|
||||
// mapNodeNameToServerName maps a k8s NodeName to an OpenStack Server Name
|
||||
// This is a simple string cast.
|
||||
func mapNodeNameToServerName(nodeName types.NodeName) string {
|
||||
return string(nodeName)
|
||||
}
|
||||
|
||||
// mapServerToNodeName maps an OpenStack Server to a k8s NodeName
|
||||
func mapServerToNodeName(server *servers.Server) types.NodeName {
|
||||
return types.NodeName(server.Name)
|
||||
}
|
||||
|
||||
func getServerByName(client *gophercloud.ServiceClient, name types.NodeName) (*servers.Server, error) {
|
||||
opts := servers.ListOpts{
|
||||
Name: fmt.Sprintf("^%s$", regexp.QuoteMeta(name)),
|
||||
Name: fmt.Sprintf("^%s$", regexp.QuoteMeta(mapNodeNameToServerName(name))),
|
||||
Status: "ACTIVE",
|
||||
}
|
||||
pager := servers.List(client, opts)
|
||||
@ -270,7 +282,7 @@ func getServerByName(client *gophercloud.ServiceClient, name string) (*servers.S
|
||||
return &serverList[0], nil
|
||||
}
|
||||
|
||||
func getAddressesByName(client *gophercloud.ServiceClient, name string) ([]api.NodeAddress, error) {
|
||||
func getAddressesByName(client *gophercloud.ServiceClient, name types.NodeName) ([]api.NodeAddress, error) {
|
||||
srv, err := getServerByName(client, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -339,7 +351,7 @@ func getAddressesByName(client *gophercloud.ServiceClient, name string) ([]api.N
|
||||
return addrs, nil
|
||||
}
|
||||
|
||||
func getAddressByName(client *gophercloud.ServiceClient, name string) (string, error) {
|
||||
func getAddressByName(client *gophercloud.ServiceClient, name types.NodeName) (string, error) {
|
||||
addrs, err := getAddressesByName(client, name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
@ -29,6 +29,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
)
|
||||
|
||||
type Instances struct {
|
||||
@ -81,7 +82,7 @@ func (os *OpenStack) Instances() (cloudprovider.Instances, bool) {
|
||||
return &Instances{compute, flavor_to_resource}, true
|
||||
}
|
||||
|
||||
func (i *Instances) List(name_filter string) ([]string, error) {
|
||||
func (i *Instances) List(name_filter string) ([]types.NodeName, error) {
|
||||
glog.V(4).Infof("openstack List(%v) called", name_filter)
|
||||
|
||||
opts := servers.ListOpts{
|
||||
@ -90,14 +91,14 @@ func (i *Instances) List(name_filter string) ([]string, error) {
|
||||
}
|
||||
pager := servers.List(i.compute, opts)
|
||||
|
||||
ret := make([]string, 0)
|
||||
ret := make([]types.NodeName, 0)
|
||||
err := pager.EachPage(func(page pagination.Page) (bool, error) {
|
||||
sList, err := servers.ExtractServers(page)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, server := range sList {
|
||||
ret = append(ret, server.Name)
|
||||
for i := range sList {
|
||||
ret = append(ret, mapServerToNodeName(&sList[i]))
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
@ -112,15 +113,15 @@ func (i *Instances) List(name_filter string) ([]string, error) {
|
||||
}
|
||||
|
||||
// Implementation of Instances.CurrentNodeName
|
||||
func (i *Instances) CurrentNodeName(hostname string) (string, error) {
|
||||
return hostname, nil
|
||||
func (i *Instances) CurrentNodeName(hostname string) (types.NodeName, error) {
|
||||
return types.NodeName(hostname), nil
|
||||
}
|
||||
|
||||
func (i *Instances) AddSSHKeyToAllInstances(user string, keyData []byte) error {
|
||||
return errors.New("unimplemented")
|
||||
}
|
||||
|
||||
func (i *Instances) NodeAddresses(name string) ([]api.NodeAddress, error) {
|
||||
func (i *Instances) NodeAddresses(name types.NodeName) ([]api.NodeAddress, error) {
|
||||
glog.V(4).Infof("NodeAddresses(%v) called", name)
|
||||
|
||||
addrs, err := getAddressesByName(i.compute, name)
|
||||
@ -133,7 +134,7 @@ func (i *Instances) NodeAddresses(name string) ([]api.NodeAddress, error) {
|
||||
}
|
||||
|
||||
// ExternalID returns the cloud provider ID of the specified instance (deprecated).
|
||||
func (i *Instances) ExternalID(name string) (string, error) {
|
||||
func (i *Instances) ExternalID(name types.NodeName) (string, error) {
|
||||
srv, err := getServerByName(i.compute, name)
|
||||
if err != nil {
|
||||
if err == ErrNotFound {
|
||||
@ -150,7 +151,7 @@ func (os *OpenStack) InstanceID() (string, error) {
|
||||
}
|
||||
|
||||
// InstanceID returns the cloud provider ID of the specified instance.
|
||||
func (i *Instances) InstanceID(name string) (string, error) {
|
||||
func (i *Instances) InstanceID(name types.NodeName) (string, error) {
|
||||
srv, err := getServerByName(i.compute, name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@ -161,6 +162,6 @@ func (i *Instances) InstanceID(name string) (string, error) {
|
||||
}
|
||||
|
||||
// InstanceType returns the type of the specified instance.
|
||||
func (i *Instances) InstanceType(name string) (string, error) {
|
||||
func (i *Instances) InstanceType(name types.NodeName) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
@ -39,6 +39,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/service"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
)
|
||||
|
||||
// Note: when creating a new Loadbalancer (VM), it can take some time before it is ready for use,
|
||||
@ -303,8 +304,8 @@ func (lbaas *LbaasV2) GetLoadBalancer(clusterName string, service *api.Service)
|
||||
// a list of regions (from config) and query/create loadbalancers in
|
||||
// each region.
|
||||
|
||||
func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *api.Service, hosts []string) (*api.LoadBalancerStatus, error) {
|
||||
glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v, %v)", clusterName, apiService.Namespace, apiService.Name, apiService.Spec.LoadBalancerIP, apiService.Spec.Ports, hosts, apiService.Annotations)
|
||||
func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *api.Service, nodeNames []string) (*api.LoadBalancerStatus, error) {
|
||||
glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v, %v)", clusterName, apiService.Namespace, apiService.Name, apiService.Spec.LoadBalancerIP, apiService.Spec.Ports, nodeNames, apiService.Annotations)
|
||||
|
||||
ports := apiService.Spec.Ports
|
||||
if len(ports) == 0 {
|
||||
@ -410,8 +411,8 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *api.Ser
|
||||
|
||||
waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID)
|
||||
|
||||
for _, host := range hosts {
|
||||
addr, err := getAddressByName(lbaas.compute, host)
|
||||
for _, nodeName := range nodeNames {
|
||||
addr, err := getAddressByName(lbaas.compute, types.NodeName(nodeName))
|
||||
if err != nil {
|
||||
// cleanup what was created so far
|
||||
_ = lbaas.EnsureLoadBalancerDeleted(clusterName, apiService)
|
||||
@ -478,9 +479,9 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *api.Ser
|
||||
return status, nil
|
||||
}
|
||||
|
||||
func (lbaas *LbaasV2) UpdateLoadBalancer(clusterName string, service *api.Service, hosts []string) error {
|
||||
func (lbaas *LbaasV2) UpdateLoadBalancer(clusterName string, service *api.Service, nodeNames []string) error {
|
||||
loadBalancerName := cloudprovider.GetLoadBalancerName(service)
|
||||
glog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v)", clusterName, loadBalancerName, hosts)
|
||||
glog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v)", clusterName, loadBalancerName, nodeNames)
|
||||
|
||||
ports := service.Spec.Ports
|
||||
if len(ports) == 0 {
|
||||
@ -536,8 +537,8 @@ func (lbaas *LbaasV2) UpdateLoadBalancer(clusterName string, service *api.Servic
|
||||
|
||||
// Compose Set of member (addresses) that _should_ exist
|
||||
addrs := map[string]empty{}
|
||||
for _, host := range hosts {
|
||||
addr, err := getAddressByName(lbaas.compute, host)
|
||||
for _, nodeName := range nodeNames {
|
||||
addr, err := getAddressByName(lbaas.compute, types.NodeName(nodeName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -765,8 +766,8 @@ func (lb *LbaasV1) GetLoadBalancer(clusterName string, service *api.Service) (*a
|
||||
// a list of regions (from config) and query/create loadbalancers in
|
||||
// each region.
|
||||
|
||||
func (lb *LbaasV1) EnsureLoadBalancer(clusterName string, apiService *api.Service, hosts []string) (*api.LoadBalancerStatus, error) {
|
||||
glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v, %v)", clusterName, apiService.Namespace, apiService.Name, apiService.Spec.LoadBalancerIP, apiService.Spec.Ports, hosts, apiService.Annotations)
|
||||
func (lb *LbaasV1) EnsureLoadBalancer(clusterName string, apiService *api.Service, nodeNames []string) (*api.LoadBalancerStatus, error) {
|
||||
glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v, %v)", clusterName, apiService.Namespace, apiService.Name, apiService.Spec.LoadBalancerIP, apiService.Spec.Ports, nodeNames, apiService.Annotations)
|
||||
|
||||
ports := apiService.Spec.Ports
|
||||
if len(ports) > 1 {
|
||||
@ -831,8 +832,8 @@ func (lb *LbaasV1) EnsureLoadBalancer(clusterName string, apiService *api.Servic
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, host := range hosts {
|
||||
addr, err := getAddressByName(lb.compute, host)
|
||||
for _, nodeName := range nodeNames {
|
||||
addr, err := getAddressByName(lb.compute, types.NodeName(nodeName))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -914,9 +915,9 @@ func (lb *LbaasV1) EnsureLoadBalancer(clusterName string, apiService *api.Servic
|
||||
|
||||
}
|
||||
|
||||
func (lb *LbaasV1) UpdateLoadBalancer(clusterName string, service *api.Service, hosts []string) error {
|
||||
func (lb *LbaasV1) UpdateLoadBalancer(clusterName string, service *api.Service, nodeNames []string) error {
|
||||
loadBalancerName := cloudprovider.GetLoadBalancerName(service)
|
||||
glog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v)", clusterName, loadBalancerName, hosts)
|
||||
glog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v)", clusterName, loadBalancerName, nodeNames)
|
||||
|
||||
vip, err := getVipByName(lb.network, loadBalancerName)
|
||||
if err != nil {
|
||||
@ -925,8 +926,8 @@ func (lb *LbaasV1) UpdateLoadBalancer(clusterName string, service *api.Service,
|
||||
|
||||
// Set of member (addresses) that _should_ exist
|
||||
addrs := map[string]bool{}
|
||||
for _, host := range hosts {
|
||||
addr, err := getAddressByName(lb.compute, host)
|
||||
for _, nodeName := range nodeNames {
|
||||
addr, err := getAddressByName(lb.compute, types.NodeName(nodeName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -33,6 +33,7 @@ import (
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
)
|
||||
|
||||
const ProviderName = "ovirt"
|
||||
@ -149,8 +150,9 @@ func (v *OVirtCloud) Routes() (cloudprovider.Routes, bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// NodeAddresses returns the NodeAddresses of a particular machine instance
|
||||
func (v *OVirtCloud) NodeAddresses(name string) ([]api.NodeAddress, error) {
|
||||
// NodeAddresses returns the NodeAddresses of the instance with the specified nodeName.
|
||||
func (v *OVirtCloud) NodeAddresses(nodeName types.NodeName) ([]api.NodeAddress, error) {
|
||||
name := mapNodeNameToInstanceName(nodeName)
|
||||
instance, err := v.fetchInstance(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -174,8 +176,15 @@ func (v *OVirtCloud) NodeAddresses(name string) ([]api.NodeAddress, error) {
|
||||
return []api.NodeAddress{{Type: api.NodeLegacyHostIP, Address: address.String()}}, nil
|
||||
}
|
||||
|
||||
// ExternalID returns the cloud provider ID of the specified instance (deprecated).
|
||||
func (v *OVirtCloud) ExternalID(name string) (string, error) {
|
||||
// mapNodeNameToInstanceName maps from a k8s NodeName to an ovirt instance name (the hostname)
|
||||
// This is a simple string cast
|
||||
func mapNodeNameToInstanceName(nodeName types.NodeName) string {
|
||||
return string(nodeName)
|
||||
}
|
||||
|
||||
// ExternalID returns the cloud provider ID of the specified node with the specified NodeName (deprecated).
|
||||
func (v *OVirtCloud) ExternalID(nodeName types.NodeName) (string, error) {
|
||||
name := mapNodeNameToInstanceName(nodeName)
|
||||
instance, err := v.fetchInstance(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@ -183,8 +192,9 @@ func (v *OVirtCloud) ExternalID(name string) (string, error) {
|
||||
return instance.UUID, nil
|
||||
}
|
||||
|
||||
// InstanceID returns the cloud provider ID of the specified instance.
|
||||
func (v *OVirtCloud) InstanceID(name string) (string, error) {
|
||||
// InstanceID returns the cloud provider ID of the node with the specified NodeName.
|
||||
func (v *OVirtCloud) InstanceID(nodeName types.NodeName) (string, error) {
|
||||
name := mapNodeNameToInstanceName(nodeName)
|
||||
instance, err := v.fetchInstance(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@ -195,7 +205,7 @@ func (v *OVirtCloud) InstanceID(name string) (string, error) {
|
||||
}
|
||||
|
||||
// InstanceType returns the type of the specified instance.
|
||||
func (v *OVirtCloud) InstanceType(name string) (string, error) {
|
||||
func (v *OVirtCloud) InstanceType(name types.NodeName) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
@ -274,17 +284,21 @@ func (m *OVirtInstanceMap) ListSortedNames() []string {
|
||||
}
|
||||
|
||||
// List enumerates the set of minions instances known by the cloud provider
|
||||
func (v *OVirtCloud) List(filter string) ([]string, error) {
|
||||
func (v *OVirtCloud) List(filter string) ([]types.NodeName, error) {
|
||||
instances, err := v.fetchAllInstances()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return instances.ListSortedNames(), nil
|
||||
var nodeNames []types.NodeName
|
||||
for _, s := range instances.ListSortedNames() {
|
||||
nodeNames = append(nodeNames, types.NodeName(s))
|
||||
}
|
||||
return nodeNames, nil
|
||||
}
|
||||
|
||||
// Implementation of Instances.CurrentNodeName
|
||||
func (v *OVirtCloud) CurrentNodeName(hostname string) (string, error) {
|
||||
return hostname, nil
|
||||
func (v *OVirtCloud) CurrentNodeName(hostname string) (types.NodeName, error) {
|
||||
return types.NodeName(hostname), nil
|
||||
}
|
||||
|
||||
func (v *OVirtCloud) AddSSHKeyToAllInstances(user string, keyData []byte) error {
|
||||
|
@ -42,6 +42,7 @@ import (
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
)
|
||||
|
||||
const ProviderName = "rackspace"
|
||||
@ -230,7 +231,7 @@ func (os *Rackspace) Instances() (cloudprovider.Instances, bool) {
|
||||
return &Instances{compute}, true
|
||||
}
|
||||
|
||||
func (i *Instances) List(name_filter string) ([]string, error) {
|
||||
func (i *Instances) List(name_filter string) ([]types.NodeName, error) {
|
||||
glog.V(2).Infof("rackspace List(%v) called", name_filter)
|
||||
|
||||
opts := osservers.ListOpts{
|
||||
@ -239,14 +240,14 @@ func (i *Instances) List(name_filter string) ([]string, error) {
|
||||
}
|
||||
pager := servers.List(i.compute, opts)
|
||||
|
||||
ret := make([]string, 0)
|
||||
ret := make([]types.NodeName, 0)
|
||||
err := pager.EachPage(func(page pagination.Page) (bool, error) {
|
||||
sList, err := servers.ExtractServers(page)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, server := range sList {
|
||||
ret = append(ret, server.Name)
|
||||
for i := range sList {
|
||||
ret = append(ret, mapServerToNodeName(&sList[i]))
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
@ -396,23 +397,35 @@ func getAddressByName(api *gophercloud.ServiceClient, name string) (string, erro
|
||||
return getAddressByServer(srv)
|
||||
}
|
||||
|
||||
func (i *Instances) NodeAddresses(name string) ([]api.NodeAddress, error) {
|
||||
glog.V(2).Infof("NodeAddresses(%v) called", name)
|
||||
|
||||
ip, err := probeNodeAddress(i.compute, name)
|
||||
func (i *Instances) NodeAddresses(nodeName types.NodeName) ([]api.NodeAddress, error) {
|
||||
glog.V(2).Infof("NodeAddresses(%v) called", nodeName)
|
||||
serverName := mapNodeNameToServerName(nodeName)
|
||||
ip, err := probeNodeAddress(i.compute, serverName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("NodeAddresses(%v) => %v", name, ip)
|
||||
glog.V(2).Infof("NodeAddresses(%v) => %v", serverName, ip)
|
||||
|
||||
// net.ParseIP().String() is to maintain compatibility with the old code
|
||||
return []api.NodeAddress{{Type: api.NodeLegacyHostIP, Address: net.ParseIP(ip).String()}}, nil
|
||||
}
|
||||
|
||||
// ExternalID returns the cloud provider ID of the specified instance (deprecated).
|
||||
func (i *Instances) ExternalID(name string) (string, error) {
|
||||
return probeInstanceID(i.compute, name)
|
||||
// mapNodeNameToServerName maps from a k8s NodeName to a rackspace Server Name
|
||||
// This is a simple string cast.
|
||||
func mapNodeNameToServerName(nodeName types.NodeName) string {
|
||||
return string(nodeName)
|
||||
}
|
||||
|
||||
// mapServerToNodeName maps a rackspace Server to an k8s NodeName
|
||||
func mapServerToNodeName(s *osservers.Server) types.NodeName {
|
||||
return types.NodeName(s.Name)
|
||||
}
|
||||
|
||||
// ExternalID returns the cloud provider ID of the node with the specified Name (deprecated).
|
||||
func (i *Instances) ExternalID(nodeName types.NodeName) (string, error) {
|
||||
serverName := mapNodeNameToServerName(nodeName)
|
||||
return probeInstanceID(i.compute, serverName)
|
||||
}
|
||||
|
||||
// InstanceID returns the cloud provider ID of the kubelet's instance.
|
||||
@ -420,13 +433,14 @@ func (rs *Rackspace) InstanceID() (string, error) {
|
||||
return readInstanceID()
|
||||
}
|
||||
|
||||
// InstanceID returns the cloud provider ID of the specified instance.
|
||||
func (i *Instances) InstanceID(name string) (string, error) {
|
||||
return probeInstanceID(i.compute, name)
|
||||
// InstanceID returns the cloud provider ID of the node with the specified Name.
|
||||
func (i *Instances) InstanceID(nodeName types.NodeName) (string, error) {
|
||||
serverName := mapNodeNameToServerName(nodeName)
|
||||
return probeInstanceID(i.compute, serverName)
|
||||
}
|
||||
|
||||
// InstanceType returns the type of the specified instance.
|
||||
func (i *Instances) InstanceType(name string) (string, error) {
|
||||
func (i *Instances) InstanceType(name types.NodeName) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
@ -435,10 +449,10 @@ func (i *Instances) AddSSHKeyToAllInstances(user string, keyData []byte) error {
|
||||
}
|
||||
|
||||
// Implementation of Instances.CurrentNodeName
|
||||
func (i *Instances) CurrentNodeName(hostname string) (string, error) {
|
||||
func (i *Instances) CurrentNodeName(hostname string) (types.NodeName, error) {
|
||||
// Beware when changing this, nodename == hostname assumption is crucial to
|
||||
// apiserver => kubelet communication.
|
||||
return hostname, nil
|
||||
return types.NodeName(hostname), nil
|
||||
}
|
||||
|
||||
func (os *Rackspace) Clusters() (cloudprovider.Clusters, bool) {
|
||||
|
@ -40,6 +40,7 @@ import (
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
k8stypes "k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/runtime"
|
||||
)
|
||||
|
||||
@ -128,16 +129,16 @@ type VSphereConfig struct {
|
||||
type Volumes interface {
|
||||
// AttachDisk attaches given disk to given node. Current node
|
||||
// is used when nodeName is empty string.
|
||||
AttachDisk(vmDiskPath string, nodeName string) (diskID string, diskUUID string, err error)
|
||||
AttachDisk(vmDiskPath string, nodeName k8stypes.NodeName) (diskID string, diskUUID string, err error)
|
||||
|
||||
// DetachDisk detaches given disk to given node. Current node
|
||||
// is used when nodeName is empty string.
|
||||
// Assumption: If node doesn't exist, disk is already detached from node.
|
||||
DetachDisk(volPath string, nodeName string) error
|
||||
DetachDisk(volPath string, nodeName k8stypes.NodeName) error
|
||||
|
||||
// DiskIsAttached checks if a disk is attached to the given node.
|
||||
// Assumption: If node doesn't exist, disk is not attached to the node.
|
||||
DiskIsAttached(volPath, nodeName string) (bool, error)
|
||||
DiskIsAttached(volPath string, nodeName k8stypes.NodeName) (bool, error)
|
||||
|
||||
// CreateVolume creates a new vmdk with specified parameters.
|
||||
CreateVolume(volumeOptions *VolumeOptions) (volumePath string, err error)
|
||||
@ -319,7 +320,9 @@ func vsphereLogin(cfg *VSphereConfig, ctx context.Context) (*govmomi.Client, err
|
||||
}
|
||||
|
||||
// Returns vSphere object `virtual machine` by its name.
|
||||
func getVirtualMachineByName(cfg *VSphereConfig, ctx context.Context, c *govmomi.Client, name string) (*object.VirtualMachine, error) {
|
||||
func getVirtualMachineByName(cfg *VSphereConfig, ctx context.Context, c *govmomi.Client, nodeName k8stypes.NodeName) (*object.VirtualMachine, error) {
|
||||
name := nodeNameToVMName(nodeName)
|
||||
|
||||
// Create a new finder
|
||||
f := find.NewFinder(c.Client, true)
|
||||
|
||||
@ -406,7 +409,7 @@ func (vs *VSphere) Instances() (cloudprovider.Instances, bool) {
|
||||
}
|
||||
|
||||
// List returns names of VMs (inside vm folder) by applying filter and which are currently running.
|
||||
func (i *Instances) List(filter string) ([]string, error) {
|
||||
func (i *Instances) List(filter string) ([]k8stypes.NodeName, error) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
c, err := vsphereLogin(i.cfg, ctx)
|
||||
@ -423,11 +426,15 @@ func (i *Instances) List(filter string) ([]string, error) {
|
||||
glog.V(3).Infof("Found %d instances matching %s: %s",
|
||||
len(vmList), filter, vmList)
|
||||
|
||||
return vmList, nil
|
||||
var nodeNames []k8stypes.NodeName
|
||||
for _, n := range vmList {
|
||||
nodeNames = append(nodeNames, k8stypes.NodeName(n))
|
||||
}
|
||||
return nodeNames, nil
|
||||
}
|
||||
|
||||
// NodeAddresses is an implementation of Instances.NodeAddresses.
|
||||
func (i *Instances) NodeAddresses(name string) ([]api.NodeAddress, error) {
|
||||
func (i *Instances) NodeAddresses(nodeName k8stypes.NodeName) ([]api.NodeAddress, error) {
|
||||
addrs := []api.NodeAddress{}
|
||||
|
||||
// Create context
|
||||
@ -441,7 +448,7 @@ func (i *Instances) NodeAddresses(name string) ([]api.NodeAddress, error) {
|
||||
}
|
||||
defer c.Logout(ctx)
|
||||
|
||||
vm, err := getVirtualMachineByName(i.cfg, ctx, c, name)
|
||||
vm, err := getVirtualMachineByName(i.cfg, ctx, c, nodeName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -476,12 +483,22 @@ func (i *Instances) AddSSHKeyToAllInstances(user string, keyData []byte) error {
|
||||
return errors.New("unimplemented")
|
||||
}
|
||||
|
||||
func (i *Instances) CurrentNodeName(hostname string) (string, error) {
|
||||
return i.localInstanceID, nil
|
||||
func (i *Instances) CurrentNodeName(hostname string) (k8stypes.NodeName, error) {
|
||||
return k8stypes.NodeName(i.localInstanceID), nil
|
||||
}
|
||||
|
||||
// ExternalID returns the cloud provider ID of the specified instance (deprecated).
|
||||
func (i *Instances) ExternalID(name string) (string, error) {
|
||||
// nodeNameToVMName maps a NodeName to the vmware infrastructure name
|
||||
func nodeNameToVMName(nodeName k8stypes.NodeName) string {
|
||||
return string(nodeName)
|
||||
}
|
||||
|
||||
// nodeNameToVMName maps a vmware infrastructure name to a NodeName
|
||||
func vmNameToNodeName(vmName string) k8stypes.NodeName {
|
||||
return k8stypes.NodeName(vmName)
|
||||
}
|
||||
|
||||
// ExternalID returns the cloud provider ID of the node with the specified Name (deprecated).
|
||||
func (i *Instances) ExternalID(nodeName k8stypes.NodeName) (string, error) {
|
||||
// Create context
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
@ -493,7 +510,7 @@ func (i *Instances) ExternalID(name string) (string, error) {
|
||||
}
|
||||
defer c.Logout(ctx)
|
||||
|
||||
vm, err := getVirtualMachineByName(i.cfg, ctx, c, name)
|
||||
vm, err := getVirtualMachineByName(i.cfg, ctx, c, nodeName)
|
||||
if err != nil {
|
||||
if _, ok := err.(*find.NotFoundError); ok {
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
@ -512,16 +529,16 @@ func (i *Instances) ExternalID(name string) (string, error) {
|
||||
}
|
||||
|
||||
if mvm.Summary.Config.Template == false {
|
||||
glog.Warningf("VM %s, is not in %s state", name, ActivePowerState)
|
||||
glog.Warningf("VM %s, is not in %s state", nodeName, ActivePowerState)
|
||||
} else {
|
||||
glog.Warningf("VM %s, is a template", name)
|
||||
glog.Warningf("VM %s, is a template", nodeName)
|
||||
}
|
||||
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
}
|
||||
|
||||
// InstanceID returns the cloud provider ID of the specified instance.
|
||||
func (i *Instances) InstanceID(name string) (string, error) {
|
||||
// InstanceID returns the cloud provider ID of the node with the specified Name.
|
||||
func (i *Instances) InstanceID(nodeName k8stypes.NodeName) (string, error) {
|
||||
// Create context
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
@ -533,7 +550,7 @@ func (i *Instances) InstanceID(name string) (string, error) {
|
||||
}
|
||||
defer c.Logout(ctx)
|
||||
|
||||
vm, err := getVirtualMachineByName(i.cfg, ctx, c, name)
|
||||
vm, err := getVirtualMachineByName(i.cfg, ctx, c, nodeName)
|
||||
if err != nil {
|
||||
if _, ok := err.(*find.NotFoundError); ok {
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
@ -552,15 +569,15 @@ func (i *Instances) InstanceID(name string) (string, error) {
|
||||
}
|
||||
|
||||
if mvm.Summary.Config.Template == false {
|
||||
glog.Warningf("VM %s, is not in %s state", name, ActivePowerState)
|
||||
glog.Warningf("VM %s, is not in %s state", nodeName, ActivePowerState)
|
||||
} else {
|
||||
glog.Warningf("VM %s, is a template", name)
|
||||
glog.Warningf("VM %s, is a template", nodeName)
|
||||
}
|
||||
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
}
|
||||
|
||||
func (i *Instances) InstanceType(name string) (string, error) {
|
||||
func (i *Instances) InstanceType(name k8stypes.NodeName) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
@ -657,7 +674,7 @@ func cleanUpController(newSCSIController types.BaseVirtualDevice, vmDevices obje
|
||||
}
|
||||
|
||||
// Attaches given virtual disk volume to the compute running kubelet.
|
||||
func (vs *VSphere) AttachDisk(vmDiskPath string, nodeName string) (diskID string, diskUUID string, err error) {
|
||||
func (vs *VSphere) AttachDisk(vmDiskPath string, nodeName k8stypes.NodeName) (diskID string, diskUUID string, err error) {
|
||||
// Create context
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
@ -673,8 +690,9 @@ func (vs *VSphere) AttachDisk(vmDiskPath string, nodeName string) (diskID string
|
||||
var vSphereInstance string
|
||||
if nodeName == "" {
|
||||
vSphereInstance = vs.localInstanceID
|
||||
nodeName = vmNameToNodeName(vSphereInstance)
|
||||
} else {
|
||||
vSphereInstance = nodeName
|
||||
vSphereInstance = nodeNameToVMName(nodeName)
|
||||
}
|
||||
|
||||
// Get VM device list
|
||||
@ -790,7 +808,7 @@ func (vs *VSphere) AttachDisk(vmDiskPath string, nodeName string) (diskID string
|
||||
if newSCSICreated {
|
||||
cleanUpController(newSCSIController, vmDevices, vm, ctx)
|
||||
}
|
||||
vs.DetachDisk(deviceName, vSphereInstance)
|
||||
vs.DetachDisk(deviceName, nodeName)
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
@ -872,7 +890,7 @@ func getAvailableSCSIController(scsiControllers []*types.VirtualController) *typ
|
||||
}
|
||||
|
||||
// DiskIsAttached returns if disk is attached to the VM using controllers supported by the plugin.
|
||||
func (vs *VSphere) DiskIsAttached(volPath string, nodeName string) (bool, error) {
|
||||
func (vs *VSphere) DiskIsAttached(volPath string, nodeName k8stypes.NodeName) (bool, error) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
@ -884,15 +902,16 @@ func (vs *VSphere) DiskIsAttached(volPath string, nodeName string) (bool, error)
|
||||
}
|
||||
defer c.Logout(ctx)
|
||||
|
||||
// Find virtual machine to attach disk to
|
||||
// Find VM to detach disk from
|
||||
var vSphereInstance string
|
||||
if nodeName == "" {
|
||||
vSphereInstance = vs.localInstanceID
|
||||
nodeName = vmNameToNodeName(vSphereInstance)
|
||||
} else {
|
||||
vSphereInstance = nodeName
|
||||
vSphereInstance = nodeNameToVMName(nodeName)
|
||||
}
|
||||
|
||||
nodeExist, err := vs.NodeExists(c, vSphereInstance)
|
||||
nodeExist, err := vs.NodeExists(c, nodeName)
|
||||
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to check whether node exist. err: %s.", err)
|
||||
@ -1043,7 +1062,7 @@ func getVirtualDiskID(volPath string, vmDevices object.VirtualDeviceList, dc *ob
|
||||
}
|
||||
|
||||
// DetachDisk detaches given virtual disk volume from the compute running kubelet.
|
||||
func (vs *VSphere) DetachDisk(volPath string, nodeName string) error {
|
||||
func (vs *VSphere) DetachDisk(volPath string, nodeName k8stypes.NodeName) error {
|
||||
// Create context
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
@ -1055,15 +1074,16 @@ func (vs *VSphere) DetachDisk(volPath string, nodeName string) error {
|
||||
}
|
||||
defer c.Logout(ctx)
|
||||
|
||||
// Find VM to detach disk from
|
||||
// Find virtual machine to attach disk to
|
||||
var vSphereInstance string
|
||||
if nodeName == "" {
|
||||
vSphereInstance = vs.localInstanceID
|
||||
nodeName = vmNameToNodeName(vSphereInstance)
|
||||
} else {
|
||||
vSphereInstance = nodeName
|
||||
vSphereInstance = nodeNameToVMName(nodeName)
|
||||
}
|
||||
|
||||
nodeExist, err := vs.NodeExists(c, vSphereInstance)
|
||||
nodeExist, err := vs.NodeExists(c, nodeName)
|
||||
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to check whether node exist. err: %s.", err)
|
||||
@ -1073,7 +1093,7 @@ func (vs *VSphere) DetachDisk(volPath string, nodeName string) error {
|
||||
if !nodeExist {
|
||||
glog.Warningf(
|
||||
"Node %q does not exist. DetachDisk will assume vmdk %q is not attached to it.",
|
||||
vSphereInstance,
|
||||
nodeName,
|
||||
volPath)
|
||||
return nil
|
||||
}
|
||||
@ -1214,8 +1234,7 @@ func (vs *VSphere) DeleteVolume(vmDiskPath string) error {
|
||||
|
||||
// NodeExists checks if the node with given nodeName exist.
|
||||
// Returns false if VM doesn't exist or VM is in powerOff state.
|
||||
func (vs *VSphere) NodeExists(c *govmomi.Client, nodeName string) (bool, error) {
|
||||
|
||||
func (vs *VSphere) NodeExists(c *govmomi.Client, nodeName k8stypes.NodeName) (bool, error) {
|
||||
if nodeName == "" {
|
||||
return false, nil
|
||||
}
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/rand"
|
||||
)
|
||||
|
||||
@ -186,7 +187,7 @@ func TestInstances(t *testing.T) {
|
||||
}
|
||||
t.Logf("Found ExternalID(%s) = %s\n", srvs[0], externalId)
|
||||
|
||||
nonExistingVM := rand.String(15)
|
||||
nonExistingVM := types.NodeName(rand.String(15))
|
||||
externalId, err = i.ExternalID(nonExistingVM)
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
t.Logf("VM %s was not found as expected\n", nonExistingVM)
|
||||
|
@ -244,7 +244,7 @@ func nodeRunningOutdatedKubelet(node *api.Node) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func nodeExistsInCloudProvider(cloud cloudprovider.Interface, nodeName string) (bool, error) {
|
||||
func nodeExistsInCloudProvider(cloud cloudprovider.Interface, nodeName types.NodeName) (bool, error) {
|
||||
instances, ok := cloud.Instances()
|
||||
if !ok {
|
||||
return false, fmt.Errorf("%v", ErrCloudInstance)
|
||||
|
@ -37,6 +37,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
||||
@ -147,7 +148,7 @@ type NodeController struct {
|
||||
cidrAllocator CIDRAllocator
|
||||
|
||||
forcefullyDeletePod func(*api.Pod) error
|
||||
nodeExistsInCloudProvider func(string) (bool, error)
|
||||
nodeExistsInCloudProvider func(types.NodeName) (bool, error)
|
||||
computeZoneStateFunc func(nodeConditions []*api.NodeCondition) (int, zoneState)
|
||||
enterPartialDisruptionFunc func(nodeNum int) float32
|
||||
enterFullDisruptionFunc func(nodeNum int) float32
|
||||
@ -229,7 +230,7 @@ func NewNodeController(
|
||||
serviceCIDR: serviceCIDR,
|
||||
allocateNodeCIDRs: allocateNodeCIDRs,
|
||||
forcefullyDeletePod: func(p *api.Pod) error { return forcefullyDeletePod(kubeClient, p) },
|
||||
nodeExistsInCloudProvider: func(nodeName string) (bool, error) { return nodeExistsInCloudProvider(cloud, nodeName) },
|
||||
nodeExistsInCloudProvider: func(nodeName types.NodeName) (bool, error) { return nodeExistsInCloudProvider(cloud, nodeName) },
|
||||
evictionLimiterQPS: evictionLimiterQPS,
|
||||
secondaryEvictionLimiterQPS: secondaryEvictionLimiterQPS,
|
||||
largeClusterThreshold: largeClusterThreshold,
|
||||
@ -576,7 +577,7 @@ func (nc *NodeController) monitorNodeStatus() error {
|
||||
// Check with the cloud provider to see if the node still exists. If it
|
||||
// doesn't, delete the node immediately.
|
||||
if currentReadyCondition.Status != api.ConditionTrue && nc.cloud != nil {
|
||||
exists, err := nc.nodeExistsInCloudProvider(node.Name)
|
||||
exists, err := nc.nodeExistsInCloudProvider(types.NodeName(node.Name))
|
||||
if err != nil {
|
||||
glog.Errorf("Error determining if node %v exists in cloud: %v", node.Name, err)
|
||||
continue
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/diff"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
)
|
||||
@ -1078,7 +1079,7 @@ func TestCloudProviderNoRateLimit(t *testing.T) {
|
||||
testNodeMonitorPeriod, nil, nil, 0, false)
|
||||
nodeController.cloud = &fakecloud.FakeCloud{}
|
||||
nodeController.now = func() unversioned.Time { return unversioned.Date(2016, 1, 1, 12, 0, 0, 0, time.UTC) }
|
||||
nodeController.nodeExistsInCloudProvider = func(nodeName string) (bool, error) {
|
||||
nodeController.nodeExistsInCloudProvider = func(nodeName types.NodeName) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
// monitorNodeStatus should allow this node to be immediately deleted
|
||||
|
@ -31,6 +31,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
nodeutil "k8s.io/kubernetes/pkg/util/node"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
@ -117,11 +118,11 @@ func (rc *RouteController) reconcileNodeRoutes() error {
|
||||
|
||||
func (rc *RouteController) reconcile(nodes []api.Node, routes []*cloudprovider.Route) error {
|
||||
// nodeCIDRs maps nodeName->nodeCIDR
|
||||
nodeCIDRs := make(map[string]string)
|
||||
// routeMap maps routeTargetInstance->route
|
||||
routeMap := make(map[string]*cloudprovider.Route)
|
||||
nodeCIDRs := make(map[types.NodeName]string)
|
||||
// routeMap maps routeTargetNode->route
|
||||
routeMap := make(map[types.NodeName]*cloudprovider.Route)
|
||||
for _, route := range routes {
|
||||
routeMap[route.TargetInstance] = route
|
||||
routeMap[route.TargetNode] = route
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
@ -132,17 +133,18 @@ func (rc *RouteController) reconcile(nodes []api.Node, routes []*cloudprovider.R
|
||||
if node.Spec.PodCIDR == "" {
|
||||
continue
|
||||
}
|
||||
nodeName := types.NodeName(node.Name)
|
||||
// Check if we have a route for this node w/ the correct CIDR.
|
||||
r := routeMap[node.Name]
|
||||
r := routeMap[nodeName]
|
||||
if r == nil || r.DestinationCIDR != node.Spec.PodCIDR {
|
||||
// If not, create the route.
|
||||
route := &cloudprovider.Route{
|
||||
TargetInstance: node.Name,
|
||||
TargetNode: nodeName,
|
||||
DestinationCIDR: node.Spec.PodCIDR,
|
||||
}
|
||||
nameHint := string(node.UID)
|
||||
wg.Add(1)
|
||||
go func(nodeName string, nameHint string, route *cloudprovider.Route) {
|
||||
go func(nodeName types.NodeName, nameHint string, route *cloudprovider.Route) {
|
||||
defer wg.Done()
|
||||
for i := 0; i < maxRetries; i++ {
|
||||
startTime := time.Now()
|
||||
@ -161,20 +163,20 @@ func (rc *RouteController) reconcile(nodes []api.Node, routes []*cloudprovider.R
|
||||
return
|
||||
}
|
||||
}
|
||||
}(node.Name, nameHint, route)
|
||||
}(nodeName, nameHint, route)
|
||||
} else {
|
||||
// Update condition only if it doesn't reflect the current state.
|
||||
_, condition := api.GetNodeCondition(&node.Status, api.NodeNetworkUnavailable)
|
||||
if condition == nil || condition.Status != api.ConditionFalse {
|
||||
rc.updateNetworkingCondition(node.Name, true)
|
||||
rc.updateNetworkingCondition(types.NodeName(node.Name), true)
|
||||
}
|
||||
}
|
||||
nodeCIDRs[node.Name] = node.Spec.PodCIDR
|
||||
nodeCIDRs[nodeName] = node.Spec.PodCIDR
|
||||
}
|
||||
for _, route := range routes {
|
||||
if rc.isResponsibleForRoute(route) {
|
||||
// Check if this route applies to a node we know about & has correct CIDR.
|
||||
if nodeCIDRs[route.TargetInstance] != route.DestinationCIDR {
|
||||
if nodeCIDRs[route.TargetNode] != route.DestinationCIDR {
|
||||
wg.Add(1)
|
||||
// Delete the route.
|
||||
go func(route *cloudprovider.Route, startTime time.Time) {
|
||||
@ -194,7 +196,7 @@ func (rc *RouteController) reconcile(nodes []api.Node, routes []*cloudprovider.R
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rc *RouteController) updateNetworkingCondition(nodeName string, routeCreated bool) error {
|
||||
func (rc *RouteController) updateNetworkingCondition(nodeName types.NodeName, routeCreated bool) error {
|
||||
var err error
|
||||
for i := 0; i < updateNodeStatusMaxRetries; i++ {
|
||||
// Patch could also fail, even though the chance is very slim. So we still do
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
)
|
||||
|
||||
func TestIsResponsibleForRoute(t *testing.T) {
|
||||
@ -58,7 +59,7 @@ func TestIsResponsibleForRoute(t *testing.T) {
|
||||
rc := New(nil, nil, myClusterName, cidr)
|
||||
route := &cloudprovider.Route{
|
||||
Name: testCase.routeName,
|
||||
TargetInstance: "doesnt-matter-for-this-test",
|
||||
TargetNode: types.NodeName("doesnt-matter-for-this-test"),
|
||||
DestinationCIDR: testCase.routeCIDR,
|
||||
}
|
||||
if resp := rc.isResponsibleForRoute(route); resp != testCase.expectedResponsible {
|
||||
|
@ -237,7 +237,7 @@ func (adc *attachDetachController) nodeAdd(obj interface{}) {
|
||||
return
|
||||
}
|
||||
|
||||
nodeName := node.Name
|
||||
nodeName := types.NodeName(node.Name)
|
||||
if _, exists := node.Annotations[volumehelper.ControllerManagedAttachAnnotation]; exists {
|
||||
// Node specifies annotation indicating it should be managed by attach
|
||||
// detach controller. Add it to desired state of world.
|
||||
@ -258,7 +258,7 @@ func (adc *attachDetachController) nodeDelete(obj interface{}) {
|
||||
return
|
||||
}
|
||||
|
||||
nodeName := node.Name
|
||||
nodeName := types.NodeName(node.Name)
|
||||
if err := adc.desiredStateOfWorld.DeleteNode(nodeName); err != nil {
|
||||
glog.V(10).Infof("%v", err)
|
||||
}
|
||||
@ -278,7 +278,9 @@ func (adc *attachDetachController) processPodVolumes(
|
||||
return
|
||||
}
|
||||
|
||||
if !adc.desiredStateOfWorld.NodeExists(pod.Spec.NodeName) {
|
||||
nodeName := types.NodeName(pod.Spec.NodeName)
|
||||
|
||||
if !adc.desiredStateOfWorld.NodeExists(nodeName) {
|
||||
// If the node the pod is scheduled to does not exist in the desired
|
||||
// state of the world data structure, that indicates the node is not
|
||||
// yet managed by the controller. Therefore, ignore the pod.
|
||||
@ -288,7 +290,7 @@ func (adc *attachDetachController) processPodVolumes(
|
||||
"Skipping processing of pod %q/%q: it is scheduled to node %q which is not managed by the controller.",
|
||||
pod.Namespace,
|
||||
pod.Name,
|
||||
pod.Spec.NodeName)
|
||||
nodeName)
|
||||
return
|
||||
}
|
||||
|
||||
@ -321,7 +323,7 @@ func (adc *attachDetachController) processPodVolumes(
|
||||
if addVolumes {
|
||||
// Add volume to desired state of world
|
||||
_, err := adc.desiredStateOfWorld.AddPod(
|
||||
uniquePodName, pod, volumeSpec, pod.Spec.NodeName)
|
||||
uniquePodName, pod, volumeSpec, nodeName)
|
||||
if err != nil {
|
||||
glog.V(10).Infof(
|
||||
"Failed to add volume %q for pod %q/%q to desiredStateOfWorld. %v",
|
||||
@ -345,7 +347,7 @@ func (adc *attachDetachController) processPodVolumes(
|
||||
continue
|
||||
}
|
||||
adc.desiredStateOfWorld.DeletePod(
|
||||
uniquePodName, uniqueVolumeName, pod.Spec.NodeName)
|
||||
uniquePodName, uniqueVolumeName, nodeName)
|
||||
}
|
||||
}
|
||||
|
||||
@ -516,7 +518,7 @@ func (adc *attachDetachController) getPVSpecFromCache(
|
||||
// corresponding volume in the actual state of the world to indicate that it is
|
||||
// mounted.
|
||||
func (adc *attachDetachController) processVolumesInUse(
|
||||
nodeName string, volumesInUse []api.UniqueVolumeName) {
|
||||
nodeName types.NodeName, volumesInUse []api.UniqueVolumeName) {
|
||||
glog.V(4).Infof("processVolumesInUse for node %q", nodeName)
|
||||
for _, attachedVolume := range adc.actualStateOfWorld.GetAttachedVolumesForNode(nodeName) {
|
||||
mounted := false
|
||||
|
@ -29,6 +29,7 @@ import (
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||
@ -55,7 +56,7 @@ type ActualStateOfWorld interface {
|
||||
// added.
|
||||
// If no node with the name nodeName exists in list of attached nodes for
|
||||
// the specified volume, the node is added.
|
||||
AddVolumeNode(volumeSpec *volume.Spec, nodeName string, devicePath string) (api.UniqueVolumeName, error)
|
||||
AddVolumeNode(volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) (api.UniqueVolumeName, error)
|
||||
|
||||
// SetVolumeMountedByNode sets the MountedByNode value for the given volume
|
||||
// and node. When set to true this value indicates the volume is mounted by
|
||||
@ -64,23 +65,23 @@ type ActualStateOfWorld interface {
|
||||
// returned.
|
||||
// If no node with the name nodeName exists in list of attached nodes for
|
||||
// the specified volume, an error is returned.
|
||||
SetVolumeMountedByNode(volumeName api.UniqueVolumeName, nodeName string, mounted bool) error
|
||||
SetVolumeMountedByNode(volumeName api.UniqueVolumeName, nodeName types.NodeName, mounted bool) error
|
||||
|
||||
// SetNodeStatusUpdateNeeded sets statusUpdateNeeded for the specified
|
||||
// node to true indicating the AttachedVolume field in the Node's Status
|
||||
// object needs to be updated by the node updater again.
|
||||
// If the specifed node does not exist in the nodesToUpdateStatusFor list,
|
||||
// log the error and return
|
||||
SetNodeStatusUpdateNeeded(nodeName string)
|
||||
SetNodeStatusUpdateNeeded(nodeName types.NodeName)
|
||||
|
||||
// ResetDetachRequestTime resets the detachRequestTime to 0 which indicates there is no detach
|
||||
// request any more for the volume
|
||||
ResetDetachRequestTime(volumeName api.UniqueVolumeName, nodeName string)
|
||||
ResetDetachRequestTime(volumeName api.UniqueVolumeName, nodeName types.NodeName)
|
||||
|
||||
// SetDetachRequestTime sets the detachRequestedTime to current time if this is no
|
||||
// previous request (the previous detachRequestedTime is zero) and return the time elapsed
|
||||
// since last request
|
||||
SetDetachRequestTime(volumeName api.UniqueVolumeName, nodeName string) (time.Duration, error)
|
||||
SetDetachRequestTime(volumeName api.UniqueVolumeName, nodeName types.NodeName) (time.Duration, error)
|
||||
|
||||
// DeleteVolumeNode removes the given volume and node from the underlying
|
||||
// store indicating the specified volume is no longer attached to the
|
||||
@ -88,12 +89,12 @@ type ActualStateOfWorld interface {
|
||||
// If the volume/node combo does not exist, this is a no-op.
|
||||
// If after deleting the node, the specified volume contains no other child
|
||||
// nodes, the volume is also deleted.
|
||||
DeleteVolumeNode(volumeName api.UniqueVolumeName, nodeName string)
|
||||
DeleteVolumeNode(volumeName api.UniqueVolumeName, nodeName types.NodeName)
|
||||
|
||||
// VolumeNodeExists returns true if the specified volume/node combo exists
|
||||
// in the underlying store indicating the specified volume is attached to
|
||||
// the specified node.
|
||||
VolumeNodeExists(volumeName api.UniqueVolumeName, nodeName string) bool
|
||||
VolumeNodeExists(volumeName api.UniqueVolumeName, nodeName types.NodeName) bool
|
||||
|
||||
// GetAttachedVolumes generates and returns a list of volumes/node pairs
|
||||
// reflecting which volumes are attached to which nodes based on the
|
||||
@ -103,7 +104,7 @@ type ActualStateOfWorld interface {
|
||||
// GetAttachedVolumes generates and returns a list of volumes attached to
|
||||
// the specified node reflecting which volumes are attached to that node
|
||||
// based on the current actual state of the world.
|
||||
GetAttachedVolumesForNode(nodeName string) []AttachedVolume
|
||||
GetAttachedVolumesForNode(nodeName types.NodeName) []AttachedVolume
|
||||
|
||||
// GetVolumesToReportAttached returns a map containing the set of nodes for
|
||||
// which the VolumesAttached Status field in the Node API object should be
|
||||
@ -112,7 +113,7 @@ type ActualStateOfWorld interface {
|
||||
// this may differ from the actual list of attached volumes for the node
|
||||
// since volumes should be removed from this list as soon a detach operation
|
||||
// is considered, before the detach operation is triggered).
|
||||
GetVolumesToReportAttached() map[string][]api.AttachedVolume
|
||||
GetVolumesToReportAttached() map[types.NodeName][]api.AttachedVolume
|
||||
}
|
||||
|
||||
// AttachedVolume represents a volume that is attached to a node.
|
||||
@ -136,7 +137,7 @@ type AttachedVolume struct {
|
||||
func NewActualStateOfWorld(volumePluginMgr *volume.VolumePluginMgr) ActualStateOfWorld {
|
||||
return &actualStateOfWorld{
|
||||
attachedVolumes: make(map[api.UniqueVolumeName]attachedVolume),
|
||||
nodesToUpdateStatusFor: make(map[string]nodeToUpdateStatusFor),
|
||||
nodesToUpdateStatusFor: make(map[types.NodeName]nodeToUpdateStatusFor),
|
||||
volumePluginMgr: volumePluginMgr,
|
||||
}
|
||||
}
|
||||
@ -152,7 +153,7 @@ type actualStateOfWorld struct {
|
||||
// update the VolumesAttached Status field. The key in this map is the name
|
||||
// of the node and the value is an object containing more information about
|
||||
// the node (including the list of volumes to report attached).
|
||||
nodesToUpdateStatusFor map[string]nodeToUpdateStatusFor
|
||||
nodesToUpdateStatusFor map[types.NodeName]nodeToUpdateStatusFor
|
||||
|
||||
// volumePluginMgr is the volume plugin manager used to create volume
|
||||
// plugin objects.
|
||||
@ -176,7 +177,7 @@ type attachedVolume struct {
|
||||
// successfully been attached to. The key in this map is the name of the
|
||||
// node and the value is a node object containing more information about
|
||||
// the node.
|
||||
nodesAttachedTo map[string]nodeAttachedTo
|
||||
nodesAttachedTo map[types.NodeName]nodeAttachedTo
|
||||
|
||||
// devicePath contains the path on the node where the volume is attached
|
||||
devicePath string
|
||||
@ -185,7 +186,7 @@ type attachedVolume struct {
|
||||
// The nodeAttachedTo object represents a node that has volumes attached to it.
|
||||
type nodeAttachedTo struct {
|
||||
// nodeName contains the name of this node.
|
||||
nodeName string
|
||||
nodeName types.NodeName
|
||||
|
||||
// mountedByNode indicates that this node/volume combo is mounted by the
|
||||
// node and is unsafe to detach
|
||||
@ -206,7 +207,7 @@ type nodeAttachedTo struct {
|
||||
// attached in the Node's Status API object.
|
||||
type nodeToUpdateStatusFor struct {
|
||||
// nodeName contains the name of this node.
|
||||
nodeName string
|
||||
nodeName types.NodeName
|
||||
|
||||
// statusUpdateNeeded indicates that the value of the VolumesAttached field
|
||||
// in the Node's Status API object should be updated. This should be set to
|
||||
@ -224,32 +225,32 @@ type nodeToUpdateStatusFor struct {
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) MarkVolumeAsAttached(
|
||||
_ api.UniqueVolumeName, volumeSpec *volume.Spec, nodeName string, devicePath string) error {
|
||||
_ api.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) error {
|
||||
_, err := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
|
||||
return err
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) MarkVolumeAsDetached(
|
||||
volumeName api.UniqueVolumeName, nodeName string) {
|
||||
volumeName api.UniqueVolumeName, nodeName types.NodeName) {
|
||||
asw.DeleteVolumeNode(volumeName, nodeName)
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) RemoveVolumeFromReportAsAttached(
|
||||
volumeName api.UniqueVolumeName, nodeName string) error {
|
||||
volumeName api.UniqueVolumeName, nodeName types.NodeName) error {
|
||||
asw.Lock()
|
||||
defer asw.Unlock()
|
||||
return asw.removeVolumeFromReportAsAttached(volumeName, nodeName)
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) AddVolumeToReportAsAttached(
|
||||
volumeName api.UniqueVolumeName, nodeName string) {
|
||||
volumeName api.UniqueVolumeName, nodeName types.NodeName) {
|
||||
asw.Lock()
|
||||
defer asw.Unlock()
|
||||
asw.addVolumeToReportAsAttached(volumeName, nodeName)
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) AddVolumeNode(
|
||||
volumeSpec *volume.Spec, nodeName string, devicePath string) (api.UniqueVolumeName, error) {
|
||||
volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) (api.UniqueVolumeName, error) {
|
||||
asw.Lock()
|
||||
defer asw.Unlock()
|
||||
|
||||
@ -275,7 +276,7 @@ func (asw *actualStateOfWorld) AddVolumeNode(
|
||||
volumeObj = attachedVolume{
|
||||
volumeName: volumeName,
|
||||
spec: volumeSpec,
|
||||
nodesAttachedTo: make(map[string]nodeAttachedTo),
|
||||
nodesAttachedTo: make(map[types.NodeName]nodeAttachedTo),
|
||||
devicePath: devicePath,
|
||||
}
|
||||
asw.attachedVolumes[volumeName] = volumeObj
|
||||
@ -301,7 +302,7 @@ func (asw *actualStateOfWorld) AddVolumeNode(
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) SetVolumeMountedByNode(
|
||||
volumeName api.UniqueVolumeName, nodeName string, mounted bool) error {
|
||||
volumeName api.UniqueVolumeName, nodeName types.NodeName, mounted bool) error {
|
||||
asw.Lock()
|
||||
defer asw.Unlock()
|
||||
|
||||
@ -330,7 +331,7 @@ func (asw *actualStateOfWorld) SetVolumeMountedByNode(
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) ResetDetachRequestTime(
|
||||
volumeName api.UniqueVolumeName, nodeName string) {
|
||||
volumeName api.UniqueVolumeName, nodeName types.NodeName) {
|
||||
asw.Lock()
|
||||
defer asw.Unlock()
|
||||
|
||||
@ -344,7 +345,7 @@ func (asw *actualStateOfWorld) ResetDetachRequestTime(
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) SetDetachRequestTime(
|
||||
volumeName api.UniqueVolumeName, nodeName string) (time.Duration, error) {
|
||||
volumeName api.UniqueVolumeName, nodeName types.NodeName) (time.Duration, error) {
|
||||
asw.Lock()
|
||||
defer asw.Unlock()
|
||||
|
||||
@ -366,7 +367,7 @@ func (asw *actualStateOfWorld) SetDetachRequestTime(
|
||||
// Get the volume and node object from actual state of world
|
||||
// This is an internal function and caller should acquire and release the lock
|
||||
func (asw *actualStateOfWorld) getNodeAndVolume(
|
||||
volumeName api.UniqueVolumeName, nodeName string) (attachedVolume, nodeAttachedTo, error) {
|
||||
volumeName api.UniqueVolumeName, nodeName types.NodeName) (attachedVolume, nodeAttachedTo, error) {
|
||||
|
||||
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
|
||||
if volumeExists {
|
||||
@ -384,7 +385,7 @@ func (asw *actualStateOfWorld) getNodeAndVolume(
|
||||
// Remove the volumeName from the node's volumesToReportAsAttached list
|
||||
// This is an internal function and caller should acquire and release the lock
|
||||
func (asw *actualStateOfWorld) removeVolumeFromReportAsAttached(
|
||||
volumeName api.UniqueVolumeName, nodeName string) error {
|
||||
volumeName api.UniqueVolumeName, nodeName types.NodeName) error {
|
||||
|
||||
nodeToUpdate, nodeToUpdateExists := asw.nodesToUpdateStatusFor[nodeName]
|
||||
if nodeToUpdateExists {
|
||||
@ -406,7 +407,7 @@ func (asw *actualStateOfWorld) removeVolumeFromReportAsAttached(
|
||||
// Add the volumeName to the node's volumesToReportAsAttached list
|
||||
// This is an internal function and caller should acquire and release the lock
|
||||
func (asw *actualStateOfWorld) addVolumeToReportAsAttached(
|
||||
volumeName api.UniqueVolumeName, nodeName string) {
|
||||
volumeName api.UniqueVolumeName, nodeName types.NodeName) {
|
||||
// In case the volume/node entry is no longer in attachedVolume list, skip the rest
|
||||
if _, _, err := asw.getNodeAndVolume(volumeName, nodeName); err != nil {
|
||||
glog.V(4).Infof("Volume %q is no longer attached to node %q", volumeName, nodeName)
|
||||
@ -437,7 +438,7 @@ func (asw *actualStateOfWorld) addVolumeToReportAsAttached(
|
||||
// needs to be updated again by the node status updater.
|
||||
// If the specifed node does not exist in the nodesToUpdateStatusFor list, log the error and return
|
||||
// This is an internal function and caller should acquire and release the lock
|
||||
func (asw *actualStateOfWorld) updateNodeStatusUpdateNeeded(nodeName string, needed bool) {
|
||||
func (asw *actualStateOfWorld) updateNodeStatusUpdateNeeded(nodeName types.NodeName, needed bool) {
|
||||
nodeToUpdate, nodeToUpdateExists := asw.nodesToUpdateStatusFor[nodeName]
|
||||
if !nodeToUpdateExists {
|
||||
// should not happen
|
||||
@ -451,14 +452,14 @@ func (asw *actualStateOfWorld) updateNodeStatusUpdateNeeded(nodeName string, nee
|
||||
asw.nodesToUpdateStatusFor[nodeName] = nodeToUpdate
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) SetNodeStatusUpdateNeeded(nodeName string) {
|
||||
func (asw *actualStateOfWorld) SetNodeStatusUpdateNeeded(nodeName types.NodeName) {
|
||||
asw.Lock()
|
||||
defer asw.Unlock()
|
||||
asw.updateNodeStatusUpdateNeeded(nodeName, true)
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) DeleteVolumeNode(
|
||||
volumeName api.UniqueVolumeName, nodeName string) {
|
||||
volumeName api.UniqueVolumeName, nodeName types.NodeName) {
|
||||
asw.Lock()
|
||||
defer asw.Unlock()
|
||||
|
||||
@ -481,7 +482,7 @@ func (asw *actualStateOfWorld) DeleteVolumeNode(
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) VolumeNodeExists(
|
||||
volumeName api.UniqueVolumeName, nodeName string) bool {
|
||||
volumeName api.UniqueVolumeName, nodeName types.NodeName) bool {
|
||||
asw.RLock()
|
||||
defer asw.RUnlock()
|
||||
|
||||
@ -512,7 +513,7 @@ func (asw *actualStateOfWorld) GetAttachedVolumes() []AttachedVolume {
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) GetAttachedVolumesForNode(
|
||||
nodeName string) []AttachedVolume {
|
||||
nodeName types.NodeName) []AttachedVolume {
|
||||
asw.RLock()
|
||||
defer asw.RUnlock()
|
||||
|
||||
@ -531,11 +532,11 @@ func (asw *actualStateOfWorld) GetAttachedVolumesForNode(
|
||||
return attachedVolumes
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) GetVolumesToReportAttached() map[string][]api.AttachedVolume {
|
||||
func (asw *actualStateOfWorld) GetVolumesToReportAttached() map[types.NodeName][]api.AttachedVolume {
|
||||
asw.RLock()
|
||||
defer asw.RUnlock()
|
||||
|
||||
volumesToReportAttached := make(map[string][]api.AttachedVolume)
|
||||
volumesToReportAttached := make(map[types.NodeName][]api.AttachedVolume)
|
||||
for nodeName, nodeToUpdateObj := range asw.nodesToUpdateStatusFor {
|
||||
if nodeToUpdateObj.statusUpdateNeeded {
|
||||
attachedVolumes := make(
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
|
||||
)
|
||||
|
||||
@ -34,7 +35,7 @@ func Test_AddVolumeNode_Positive_NewVolumeNewNode(t *testing.T) {
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
|
||||
nodeName := "node-name"
|
||||
nodeName := types.NodeName("node-name")
|
||||
devicePath := "fake/device/path"
|
||||
|
||||
// Act
|
||||
@ -66,8 +67,8 @@ func Test_AddVolumeNode_Positive_ExistingVolumeNewNode(t *testing.T) {
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
node1Name := "node1-name"
|
||||
node2Name := "node2-name"
|
||||
node1Name := types.NodeName("node1-name")
|
||||
node2Name := types.NodeName("node2-name")
|
||||
devicePath := "fake/device/path"
|
||||
|
||||
// Act
|
||||
@ -116,7 +117,7 @@ func Test_AddVolumeNode_Positive_ExistingVolumeExistingNode(t *testing.T) {
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := "node-name"
|
||||
nodeName := types.NodeName("node-name")
|
||||
devicePath := "fake/device/path"
|
||||
|
||||
// Act
|
||||
@ -160,7 +161,7 @@ func Test_DeleteVolumeNode_Positive_VolumeExistsNodeExists(t *testing.T) {
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := "node-name"
|
||||
nodeName := types.NodeName("node-name")
|
||||
devicePath := "fake/device/path"
|
||||
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
|
||||
if addErr != nil {
|
||||
@ -189,7 +190,7 @@ func Test_DeleteVolumeNode_Positive_VolumeDoesntExistNodeDoesntExist(t *testing.
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
nodeName := "node-name"
|
||||
nodeName := types.NodeName("node-name")
|
||||
|
||||
// Act
|
||||
asw.DeleteVolumeNode(volumeName, nodeName)
|
||||
@ -216,8 +217,8 @@ func Test_DeleteVolumeNode_Positive_TwoNodesOneDeleted(t *testing.T) {
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
node1Name := "node1-name"
|
||||
node2Name := "node2-name"
|
||||
node1Name := types.NodeName("node1-name")
|
||||
node2Name := types.NodeName("node2-name")
|
||||
devicePath := "fake/device/path"
|
||||
generatedVolumeName1, add1Err := asw.AddVolumeNode(volumeSpec, node1Name, devicePath)
|
||||
if add1Err != nil {
|
||||
@ -265,7 +266,7 @@ func Test_VolumeNodeExists_Positive_VolumeExistsNodeExists(t *testing.T) {
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := "node-name"
|
||||
nodeName := types.NodeName("node-name")
|
||||
devicePath := "fake/device/path"
|
||||
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
|
||||
if addErr != nil {
|
||||
@ -297,8 +298,8 @@ func Test_VolumeNodeExists_Positive_VolumeExistsNodeDoesntExist(t *testing.T) {
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
node1Name := "node1-name"
|
||||
node2Name := "node2-name"
|
||||
node1Name := types.NodeName("node1-name")
|
||||
node2Name := types.NodeName("node2-name")
|
||||
devicePath := "fake/device/path"
|
||||
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, node1Name, devicePath)
|
||||
if addErr != nil {
|
||||
@ -328,7 +329,7 @@ func Test_VolumeNodeExists_Positive_VolumeAndNodeDontExist(t *testing.T) {
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
nodeName := "node-name"
|
||||
nodeName := types.NodeName("node-name")
|
||||
|
||||
// Act
|
||||
volumeNodeComboExists := asw.VolumeNodeExists(volumeName, nodeName)
|
||||
@ -369,7 +370,7 @@ func Test_GetAttachedVolumes_Positive_OneVolumeOneNode(t *testing.T) {
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := "node-name"
|
||||
nodeName := types.NodeName("node-name")
|
||||
devicePath := "fake/device/path"
|
||||
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
|
||||
if addErr != nil {
|
||||
@ -396,7 +397,7 @@ func Test_GetAttachedVolumes_Positive_TwoVolumeTwoNodes(t *testing.T) {
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volume1Name := api.UniqueVolumeName("volume1-name")
|
||||
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
|
||||
node1Name := "node1-name"
|
||||
node1Name := types.NodeName("node1-name")
|
||||
devicePath := "fake/device/path"
|
||||
generatedVolumeName1, add1Err := asw.AddVolumeNode(volume1Spec, node1Name, devicePath)
|
||||
if add1Err != nil {
|
||||
@ -404,7 +405,7 @@ func Test_GetAttachedVolumes_Positive_TwoVolumeTwoNodes(t *testing.T) {
|
||||
}
|
||||
volume2Name := api.UniqueVolumeName("volume2-name")
|
||||
volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name)
|
||||
node2Name := "node2-name"
|
||||
node2Name := types.NodeName("node2-name")
|
||||
generatedVolumeName2, add2Err := asw.AddVolumeNode(volume2Spec, node2Name, devicePath)
|
||||
if add2Err != nil {
|
||||
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add2Err)
|
||||
@ -431,13 +432,13 @@ func Test_GetAttachedVolumes_Positive_OneVolumeTwoNodes(t *testing.T) {
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
node1Name := "node1-name"
|
||||
node1Name := types.NodeName("node1-name")
|
||||
devicePath := "fake/device/path"
|
||||
generatedVolumeName1, add1Err := asw.AddVolumeNode(volumeSpec, node1Name, devicePath)
|
||||
if add1Err != nil {
|
||||
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add1Err)
|
||||
}
|
||||
node2Name := "node2-name"
|
||||
node2Name := types.NodeName("node2-name")
|
||||
generatedVolumeName2, add2Err := asw.AddVolumeNode(volumeSpec, node2Name, devicePath)
|
||||
if add2Err != nil {
|
||||
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add2Err)
|
||||
@ -470,7 +471,7 @@ func Test_SetVolumeMountedByNode_Positive_Set(t *testing.T) {
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := "node-name"
|
||||
nodeName := types.NodeName("node-name")
|
||||
devicePath := "fake/device/path"
|
||||
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
|
||||
if addErr != nil {
|
||||
@ -497,7 +498,7 @@ func Test_SetVolumeMountedByNode_Positive_UnsetWithInitialSet(t *testing.T) {
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := "node-name"
|
||||
nodeName := types.NodeName("node-name")
|
||||
devicePath := "fake/device/path"
|
||||
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
|
||||
if addErr != nil {
|
||||
@ -533,7 +534,7 @@ func Test_SetVolumeMountedByNode_Positive_UnsetWithoutInitialSet(t *testing.T) {
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := "node-name"
|
||||
nodeName := types.NodeName("node-name")
|
||||
devicePath := "fake/device/path"
|
||||
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
|
||||
if addErr != nil {
|
||||
@ -566,7 +567,7 @@ func Test_SetVolumeMountedByNode_Positive_UnsetWithInitialSetAddVolumeNodeNotRes
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := "node-name"
|
||||
nodeName := types.NodeName("node-name")
|
||||
devicePath := "fake/device/path"
|
||||
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
|
||||
if addErr != nil {
|
||||
@ -607,7 +608,7 @@ func Test_SetVolumeMountedByNode_Positive_UnsetWithInitialSetVerifyDetachRequest
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := "node-name"
|
||||
nodeName := types.NodeName("node-name")
|
||||
devicePath := "fake/device/path"
|
||||
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
|
||||
if addErr != nil {
|
||||
@ -655,7 +656,7 @@ func Test_RemoveVolumeFromReportAsAttached_Positive_Set(t *testing.T) {
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
devicePath := "fake/device/path"
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := "node-name"
|
||||
nodeName := types.NodeName("node-name")
|
||||
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
|
||||
if addErr != nil {
|
||||
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
|
||||
@ -681,7 +682,7 @@ func Test_RemoveVolumeFromReportAsAttached_Positive_Marked(t *testing.T) {
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := "node-name"
|
||||
nodeName := types.NodeName("node-name")
|
||||
devicePath := "fake/device/path"
|
||||
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
|
||||
if addErr != nil {
|
||||
@ -717,7 +718,7 @@ func Test_MarkDesireToDetach_Positive_MarkedAddVolumeNodeReset(t *testing.T) {
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := "node-name"
|
||||
nodeName := types.NodeName("node-name")
|
||||
devicePath := "fake/device/path"
|
||||
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
|
||||
if addErr != nil {
|
||||
@ -760,7 +761,7 @@ func Test_RemoveVolumeFromReportAsAttached_Positive_UnsetWithInitialSetVolumeMou
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := "node-name"
|
||||
nodeName := types.NodeName("node-name")
|
||||
devicePath := "fake/device/path"
|
||||
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
|
||||
if addErr != nil {
|
||||
@ -803,7 +804,7 @@ func Test_RemoveVolumeFromReportAsAttached(t *testing.T) {
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := "node-name"
|
||||
nodeName := types.NodeName("node-name")
|
||||
devicePath := "fake/device/path"
|
||||
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
|
||||
if addErr != nil {
|
||||
@ -836,7 +837,7 @@ func Test_RemoveVolumeFromReportAsAttached_AddVolumeToReportAsAttached_Positive(
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := "node-name"
|
||||
nodeName := types.NodeName("node-name")
|
||||
devicePath := "fake/device/path"
|
||||
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
|
||||
if addErr != nil {
|
||||
@ -879,7 +880,7 @@ func Test_RemoveVolumeFromReportAsAttached_Delete_AddVolumeNode(t *testing.T) {
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := "node-name"
|
||||
nodeName := types.NodeName("node-name")
|
||||
devicePath := "fake/device/path"
|
||||
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
|
||||
if addErr != nil {
|
||||
@ -924,7 +925,7 @@ func Test_SetDetachRequestTime_Positive(t *testing.T) {
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := "node-name"
|
||||
nodeName := types.NodeName("node-name")
|
||||
devicePath := "fake/device/path"
|
||||
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
|
||||
if addErr != nil {
|
||||
@ -955,7 +956,7 @@ func verifyAttachedVolume(
|
||||
attachedVolumes []AttachedVolume,
|
||||
expectedVolumeName api.UniqueVolumeName,
|
||||
expectedVolumeSpecName string,
|
||||
expectedNodeName string,
|
||||
expectedNodeName types.NodeName,
|
||||
expectedMountedByNode,
|
||||
expectNonZeroDetachRequestedTime bool) {
|
||||
for _, attachedVolume := range attachedVolumes {
|
||||
@ -981,7 +982,7 @@ func Test_GetAttachedVolumesForNode_Positive_NoVolumesOrNodes(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
node := "random"
|
||||
node := types.NodeName("random")
|
||||
|
||||
// Act
|
||||
attachedVolumes := asw.GetAttachedVolumesForNode(node)
|
||||
@ -998,7 +999,7 @@ func Test_GetAttachedVolumesForNode_Positive_OneVolumeOneNode(t *testing.T) {
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := "node-name"
|
||||
nodeName := types.NodeName("node-name")
|
||||
devicePath := "fake/device/path"
|
||||
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
|
||||
if addErr != nil {
|
||||
@ -1022,7 +1023,7 @@ func Test_GetAttachedVolumesForNode_Positive_TwoVolumeTwoNodes(t *testing.T) {
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volume1Name := api.UniqueVolumeName("volume1-name")
|
||||
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
|
||||
node1Name := "node1-name"
|
||||
node1Name := types.NodeName("node1-name")
|
||||
devicePath := "fake/device/path"
|
||||
_, add1Err := asw.AddVolumeNode(volume1Spec, node1Name, devicePath)
|
||||
if add1Err != nil {
|
||||
@ -1030,7 +1031,7 @@ func Test_GetAttachedVolumesForNode_Positive_TwoVolumeTwoNodes(t *testing.T) {
|
||||
}
|
||||
volume2Name := api.UniqueVolumeName("volume2-name")
|
||||
volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name)
|
||||
node2Name := "node2-name"
|
||||
node2Name := types.NodeName("node2-name")
|
||||
generatedVolumeName2, add2Err := asw.AddVolumeNode(volume2Spec, node2Name, devicePath)
|
||||
if add2Err != nil {
|
||||
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add2Err)
|
||||
@ -1053,13 +1054,13 @@ func Test_GetAttachedVolumesForNode_Positive_OneVolumeTwoNodes(t *testing.T) {
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
node1Name := "node1-name"
|
||||
node1Name := types.NodeName("node1-name")
|
||||
devicePath := "fake/device/path"
|
||||
generatedVolumeName1, add1Err := asw.AddVolumeNode(volumeSpec, node1Name, devicePath)
|
||||
if add1Err != nil {
|
||||
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add1Err)
|
||||
}
|
||||
node2Name := "node2-name"
|
||||
node2Name := types.NodeName("node2-name")
|
||||
generatedVolumeName2, add2Err := asw.AddVolumeNode(volumeSpec, node2Name, devicePath)
|
||||
if add2Err != nil {
|
||||
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add2Err)
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
"sync"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
k8stypes "k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
||||
"k8s.io/kubernetes/pkg/volume/util/types"
|
||||
@ -45,7 +46,7 @@ type DesiredStateOfWorld interface {
|
||||
// AddNode adds the given node to the list of nodes managed by the attach/
|
||||
// detach controller.
|
||||
// If the node already exists this is a no-op.
|
||||
AddNode(nodeName string)
|
||||
AddNode(nodeName k8stypes.NodeName)
|
||||
|
||||
// AddPod adds the given pod to the list of pods that reference the
|
||||
// specified volume and is scheduled to the specified node.
|
||||
@ -57,13 +58,13 @@ type DesiredStateOfWorld interface {
|
||||
// should be attached to the specified node, the volume is implicitly added.
|
||||
// If no node with the name nodeName exists in list of nodes managed by the
|
||||
// attach/detach attached controller, an error is returned.
|
||||
AddPod(podName types.UniquePodName, pod *api.Pod, volumeSpec *volume.Spec, nodeName string) (api.UniqueVolumeName, error)
|
||||
AddPod(podName types.UniquePodName, pod *api.Pod, volumeSpec *volume.Spec, nodeName k8stypes.NodeName) (api.UniqueVolumeName, error)
|
||||
|
||||
// DeleteNode removes the given node from the list of nodes managed by the
|
||||
// attach/detach controller.
|
||||
// If the node does not exist this is a no-op.
|
||||
// If the node exists but has 1 or more child volumes, an error is returned.
|
||||
DeleteNode(nodeName string) error
|
||||
DeleteNode(nodeName k8stypes.NodeName) error
|
||||
|
||||
// DeletePod removes the given pod from the list of pods that reference the
|
||||
// specified volume and are scheduled to the specified node.
|
||||
@ -75,16 +76,16 @@ type DesiredStateOfWorld interface {
|
||||
// volumes under the specified node, this is a no-op.
|
||||
// If after deleting the pod, the specified volume contains no other child
|
||||
// pods, the volume is also deleted.
|
||||
DeletePod(podName types.UniquePodName, volumeName api.UniqueVolumeName, nodeName string)
|
||||
DeletePod(podName types.UniquePodName, volumeName api.UniqueVolumeName, nodeName k8stypes.NodeName)
|
||||
|
||||
// NodeExists returns true if the node with the specified name exists in
|
||||
// the list of nodes managed by the attach/detach controller.
|
||||
NodeExists(nodeName string) bool
|
||||
NodeExists(nodeName k8stypes.NodeName) bool
|
||||
|
||||
// VolumeExists returns true if the volume with the specified name exists
|
||||
// in the list of volumes that should be attached to the specified node by
|
||||
// the attach detach controller.
|
||||
VolumeExists(volumeName api.UniqueVolumeName, nodeName string) bool
|
||||
VolumeExists(volumeName api.UniqueVolumeName, nodeName k8stypes.NodeName) bool
|
||||
|
||||
// GetVolumesToAttach generates and returns a list of volumes to attach
|
||||
// and the nodes they should be attached to based on the current desired
|
||||
@ -111,13 +112,13 @@ type PodToAdd struct {
|
||||
VolumeName api.UniqueVolumeName
|
||||
|
||||
// nodeName contains the name of this node.
|
||||
NodeName string
|
||||
NodeName k8stypes.NodeName
|
||||
}
|
||||
|
||||
// NewDesiredStateOfWorld returns a new instance of DesiredStateOfWorld.
|
||||
func NewDesiredStateOfWorld(volumePluginMgr *volume.VolumePluginMgr) DesiredStateOfWorld {
|
||||
return &desiredStateOfWorld{
|
||||
nodesManaged: make(map[string]nodeManaged),
|
||||
nodesManaged: make(map[k8stypes.NodeName]nodeManaged),
|
||||
volumePluginMgr: volumePluginMgr,
|
||||
}
|
||||
}
|
||||
@ -126,7 +127,7 @@ type desiredStateOfWorld struct {
|
||||
// nodesManaged is a map containing the set of nodes managed by the attach/
|
||||
// detach controller. The key in this map is the name of the node and the
|
||||
// value is a node object containing more information about the node.
|
||||
nodesManaged map[string]nodeManaged
|
||||
nodesManaged map[k8stypes.NodeName]nodeManaged
|
||||
// volumePluginMgr is the volume plugin manager used to create volume
|
||||
// plugin objects.
|
||||
volumePluginMgr *volume.VolumePluginMgr
|
||||
@ -137,7 +138,7 @@ type desiredStateOfWorld struct {
|
||||
// controller.
|
||||
type nodeManaged struct {
|
||||
// nodeName contains the name of this node.
|
||||
nodeName string
|
||||
nodeName k8stypes.NodeName
|
||||
|
||||
// volumesToAttach is a map containing the set of volumes that should be
|
||||
// attached to this node. The key in the map is the name of the volume and
|
||||
@ -172,7 +173,7 @@ type pod struct {
|
||||
podObj *api.Pod
|
||||
}
|
||||
|
||||
func (dsw *desiredStateOfWorld) AddNode(nodeName string) {
|
||||
func (dsw *desiredStateOfWorld) AddNode(nodeName k8stypes.NodeName) {
|
||||
dsw.Lock()
|
||||
defer dsw.Unlock()
|
||||
|
||||
@ -188,7 +189,7 @@ func (dsw *desiredStateOfWorld) AddPod(
|
||||
podName types.UniquePodName,
|
||||
podToAdd *api.Pod,
|
||||
volumeSpec *volume.Spec,
|
||||
nodeName string) (api.UniqueVolumeName, error) {
|
||||
nodeName k8stypes.NodeName) (api.UniqueVolumeName, error) {
|
||||
dsw.Lock()
|
||||
defer dsw.Unlock()
|
||||
|
||||
@ -236,7 +237,7 @@ func (dsw *desiredStateOfWorld) AddPod(
|
||||
return volumeName, nil
|
||||
}
|
||||
|
||||
func (dsw *desiredStateOfWorld) DeleteNode(nodeName string) error {
|
||||
func (dsw *desiredStateOfWorld) DeleteNode(nodeName k8stypes.NodeName) error {
|
||||
dsw.Lock()
|
||||
defer dsw.Unlock()
|
||||
|
||||
@ -261,7 +262,7 @@ func (dsw *desiredStateOfWorld) DeleteNode(nodeName string) error {
|
||||
func (dsw *desiredStateOfWorld) DeletePod(
|
||||
podName types.UniquePodName,
|
||||
volumeName api.UniqueVolumeName,
|
||||
nodeName string) {
|
||||
nodeName k8stypes.NodeName) {
|
||||
dsw.Lock()
|
||||
defer dsw.Unlock()
|
||||
|
||||
@ -289,7 +290,7 @@ func (dsw *desiredStateOfWorld) DeletePod(
|
||||
}
|
||||
}
|
||||
|
||||
func (dsw *desiredStateOfWorld) NodeExists(nodeName string) bool {
|
||||
func (dsw *desiredStateOfWorld) NodeExists(nodeName k8stypes.NodeName) bool {
|
||||
dsw.RLock()
|
||||
defer dsw.RUnlock()
|
||||
|
||||
@ -298,7 +299,7 @@ func (dsw *desiredStateOfWorld) NodeExists(nodeName string) bool {
|
||||
}
|
||||
|
||||
func (dsw *desiredStateOfWorld) VolumeExists(
|
||||
volumeName api.UniqueVolumeName, nodeName string) bool {
|
||||
volumeName api.UniqueVolumeName, nodeName k8stypes.NodeName) bool {
|
||||
dsw.RLock()
|
||||
defer dsw.RUnlock()
|
||||
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing"
|
||||
k8stypes "k8s.io/kubernetes/pkg/types"
|
||||
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
|
||||
"k8s.io/kubernetes/pkg/volume/util/types"
|
||||
)
|
||||
@ -31,7 +32,7 @@ func Test_AddNode_Positive_NewNode(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||
nodeName := "node-name"
|
||||
nodeName := k8stypes.NodeName("node-name")
|
||||
|
||||
// Act
|
||||
dsw.AddNode(nodeName)
|
||||
@ -56,7 +57,7 @@ func Test_AddNode_Positive_ExistingNode(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||
nodeName := "node-name"
|
||||
nodeName := k8stypes.NodeName("node-name")
|
||||
|
||||
// Act
|
||||
dsw.AddNode(nodeName)
|
||||
@ -92,7 +93,7 @@ func Test_AddPod_Positive_NewPodNodeExistsVolumeDoesntExist(t *testing.T) {
|
||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := "node-name"
|
||||
nodeName := k8stypes.NodeName("node-name")
|
||||
dsw.AddNode(nodeName)
|
||||
volumeExists := dsw.VolumeExists(volumeName, nodeName)
|
||||
if volumeExists {
|
||||
@ -140,7 +141,7 @@ func Test_AddPod_Positive_NewPodNodeExistsVolumeExists(t *testing.T) {
|
||||
pod2Name := "pod2-uid"
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := "node-name"
|
||||
nodeName := k8stypes.NodeName("node-name")
|
||||
dsw.AddNode(nodeName)
|
||||
volumeExists := dsw.VolumeExists(volumeName, nodeName)
|
||||
if volumeExists {
|
||||
@ -213,7 +214,7 @@ func Test_AddPod_Positive_PodExistsNodeExistsVolumeExists(t *testing.T) {
|
||||
podName := "pod-uid"
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := "node-name"
|
||||
nodeName := k8stypes.NodeName("node-name")
|
||||
dsw.AddNode(nodeName)
|
||||
volumeExists := dsw.VolumeExists(volumeName, nodeName)
|
||||
if volumeExists {
|
||||
@ -279,7 +280,7 @@ func Test_AddPod_Negative_NewPodNodeDoesntExistVolumeDoesntExist(t *testing.T) {
|
||||
podName := "pod-uid"
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := "node-name"
|
||||
nodeName := k8stypes.NodeName("node-name")
|
||||
volumeExists := dsw.VolumeExists(volumeName, nodeName)
|
||||
if volumeExists {
|
||||
t.Fatalf(
|
||||
@ -317,7 +318,7 @@ func Test_DeleteNode_Positive_NodeExists(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||
nodeName := "node-name"
|
||||
nodeName := k8stypes.NodeName("node-name")
|
||||
dsw.AddNode(nodeName)
|
||||
|
||||
// Act
|
||||
@ -345,7 +346,7 @@ func Test_DeleteNode_Positive_NodeDoesntExist(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||
notAddedNodeName := "node-not-added-name"
|
||||
notAddedNodeName := k8stypes.NodeName("node-not-added-name")
|
||||
|
||||
// Act
|
||||
err := dsw.DeleteNode(notAddedNodeName)
|
||||
@ -373,7 +374,7 @@ func Test_DeleteNode_Negative_NodeExistsHasChildVolumes(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||
nodeName := "node-name"
|
||||
nodeName := k8stypes.NodeName("node-name")
|
||||
dsw.AddNode(nodeName)
|
||||
podName := "pod-uid"
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
@ -417,7 +418,7 @@ func Test_DeletePod_Positive_PodExistsNodeExistsVolumeExists(t *testing.T) {
|
||||
podName := "pod-uid"
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := "node-name"
|
||||
nodeName := k8stypes.NodeName("node-name")
|
||||
dsw.AddNode(nodeName)
|
||||
generatedVolumeName, podAddErr := dsw.AddPod(types.UniquePodName(podName), controllervolumetesting.NewPod(podName, podName), volumeSpec, nodeName)
|
||||
if podAddErr != nil {
|
||||
@ -465,7 +466,7 @@ func Test_DeletePod_Positive_2PodsExistNodeExistsVolumesExist(t *testing.T) {
|
||||
pod2Name := "pod2-uid"
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := "node-name"
|
||||
nodeName := k8stypes.NodeName("node-name")
|
||||
dsw.AddNode(nodeName)
|
||||
generatedVolumeName1, pod1AddErr := dsw.AddPod(types.UniquePodName(pod1Name), controllervolumetesting.NewPod(pod1Name, pod1Name), volumeSpec, nodeName)
|
||||
if pod1AddErr != nil {
|
||||
@ -526,7 +527,7 @@ func Test_DeletePod_Positive_PodDoesNotExist(t *testing.T) {
|
||||
pod2Name := "pod2-uid"
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := "node-name"
|
||||
nodeName := k8stypes.NodeName("node-name")
|
||||
dsw.AddNode(nodeName)
|
||||
generatedVolumeName, pod1AddErr := dsw.AddPod(types.UniquePodName(pod1Name), controllervolumetesting.NewPod(pod1Name, pod1Name), volumeSpec, nodeName)
|
||||
if pod1AddErr != nil {
|
||||
@ -574,7 +575,7 @@ func Test_DeletePod_Positive_NodeDoesNotExist(t *testing.T) {
|
||||
podName := "pod-uid"
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
node1Name := "node1-name"
|
||||
node1Name := k8stypes.NodeName("node1-name")
|
||||
dsw.AddNode(node1Name)
|
||||
generatedVolumeName, podAddErr := dsw.AddPod(types.UniquePodName(podName), controllervolumetesting.NewPod(podName, podName), volumeSpec, node1Name)
|
||||
if podAddErr != nil {
|
||||
@ -591,7 +592,7 @@ func Test_DeletePod_Positive_NodeDoesNotExist(t *testing.T) {
|
||||
generatedVolumeName,
|
||||
node1Name)
|
||||
}
|
||||
node2Name := "node2-name"
|
||||
node2Name := k8stypes.NodeName("node2-name")
|
||||
|
||||
// Act
|
||||
dsw.DeletePod(types.UniquePodName(podName), generatedVolumeName, node2Name)
|
||||
@ -629,7 +630,7 @@ func Test_DeletePod_Positive_VolumeDoesNotExist(t *testing.T) {
|
||||
podName := "pod-uid"
|
||||
volume1Name := api.UniqueVolumeName("volume1-name")
|
||||
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
|
||||
nodeName := "node-name"
|
||||
nodeName := k8stypes.NodeName("node-name")
|
||||
dsw.AddNode(nodeName)
|
||||
generatedVolume1Name, podAddErr := dsw.AddPod(types.UniquePodName(podName), controllervolumetesting.NewPod(podName, podName), volume1Spec, nodeName)
|
||||
if podAddErr != nil {
|
||||
@ -680,7 +681,7 @@ func Test_NodeExists_Positive_NodeExists(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||
notAddedNodeName := "node-not-added-name"
|
||||
notAddedNodeName := k8stypes.NodeName("node-not-added-name")
|
||||
|
||||
// Act
|
||||
notAddedNodeExists := dsw.NodeExists(notAddedNodeName)
|
||||
@ -703,7 +704,7 @@ func Test_NodeExists_Positive_NodeDoesntExist(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||
nodeName := "node-name"
|
||||
nodeName := k8stypes.NodeName("node-name")
|
||||
dsw.AddNode(nodeName)
|
||||
|
||||
// Act
|
||||
@ -727,7 +728,7 @@ func Test_VolumeExists_Positive_VolumeExistsNodeExists(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||
nodeName := "node-name"
|
||||
nodeName := k8stypes.NodeName("node-name")
|
||||
dsw.AddNode(nodeName)
|
||||
podName := "pod-uid"
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
@ -757,7 +758,7 @@ func Test_VolumeExists_Positive_VolumeDoesntExistNodeExists(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||
nodeName := "node-name"
|
||||
nodeName := k8stypes.NodeName("node-name")
|
||||
dsw.AddNode(nodeName)
|
||||
podName := "pod-uid"
|
||||
volume1Name := api.UniqueVolumeName("volume1-name")
|
||||
@ -793,7 +794,7 @@ func Test_VolumeExists_Positive_VolumeDoesntExistNodeDoesntExists(t *testing.T)
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||
nodeName := "node-name"
|
||||
nodeName := k8stypes.NodeName("node-name")
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
|
||||
// Act
|
||||
@ -833,8 +834,8 @@ func Test_GetVolumesToAttach_Positive_TwoNodes(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||
node1Name := "node1-name"
|
||||
node2Name := "node2-name"
|
||||
node1Name := k8stypes.NodeName("node1-name")
|
||||
node2Name := k8stypes.NodeName("node2-name")
|
||||
dsw.AddNode(node1Name)
|
||||
dsw.AddNode(node2Name)
|
||||
|
||||
@ -854,7 +855,7 @@ func Test_GetVolumesToAttach_Positive_TwoNodesOneVolumeEach(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||
node1Name := "node1-name"
|
||||
node1Name := k8stypes.NodeName("node1-name")
|
||||
pod1Name := "pod1-uid"
|
||||
volume1Name := api.UniqueVolumeName("volume1-name")
|
||||
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
|
||||
@ -866,7 +867,7 @@ func Test_GetVolumesToAttach_Positive_TwoNodesOneVolumeEach(t *testing.T) {
|
||||
pod1Name,
|
||||
podAddErr)
|
||||
}
|
||||
node2Name := "node2-name"
|
||||
node2Name := k8stypes.NodeName("node2-name")
|
||||
pod2Name := "pod2-uid"
|
||||
volume2Name := api.UniqueVolumeName("volume2-name")
|
||||
volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name)
|
||||
@ -899,7 +900,7 @@ func Test_GetVolumesToAttach_Positive_TwoNodesOneVolumeEachExtraPod(t *testing.T
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||
node1Name := "node1-name"
|
||||
node1Name := k8stypes.NodeName("node1-name")
|
||||
pod1Name := "pod1-uid"
|
||||
volume1Name := api.UniqueVolumeName("volume1-name")
|
||||
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
|
||||
@ -911,7 +912,7 @@ func Test_GetVolumesToAttach_Positive_TwoNodesOneVolumeEachExtraPod(t *testing.T
|
||||
pod1Name,
|
||||
podAddErr)
|
||||
}
|
||||
node2Name := "node2-name"
|
||||
node2Name := k8stypes.NodeName("node2-name")
|
||||
pod2Name := "pod2-uid"
|
||||
volume2Name := api.UniqueVolumeName("volume2-name")
|
||||
volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name)
|
||||
@ -953,7 +954,7 @@ func Test_GetVolumesToAttach_Positive_TwoNodesThreeVolumes(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||
node1Name := "node1-name"
|
||||
node1Name := k8stypes.NodeName("node1-name")
|
||||
pod1Name := "pod1-uid"
|
||||
volume1Name := api.UniqueVolumeName("volume1-name")
|
||||
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
|
||||
@ -965,7 +966,7 @@ func Test_GetVolumesToAttach_Positive_TwoNodesThreeVolumes(t *testing.T) {
|
||||
pod1Name,
|
||||
podAddErr)
|
||||
}
|
||||
node2Name := "node2-name"
|
||||
node2Name := k8stypes.NodeName("node2-name")
|
||||
pod2aName := "pod2a-name"
|
||||
volume2Name := api.UniqueVolumeName("volume2-name")
|
||||
volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name)
|
||||
@ -1018,7 +1019,7 @@ func Test_GetVolumesToAttach_Positive_TwoNodesThreeVolumes(t *testing.T) {
|
||||
func verifyVolumeToAttach(
|
||||
t *testing.T,
|
||||
volumesToAttach []VolumeToAttach,
|
||||
expectedNodeName string,
|
||||
expectedNodeName k8stypes.NodeName,
|
||||
expectedVolumeName api.UniqueVolumeName,
|
||||
expectedVolumeSpecName string) {
|
||||
for _, volumeToAttach := range volumesToAttach {
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater"
|
||||
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing"
|
||||
k8stypes "k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
|
||||
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
||||
@ -86,7 +87,7 @@ func Test_Run_Positive_OneDesiredVolumeAttach(t *testing.T) {
|
||||
podName := "pod-uid"
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := "node-name"
|
||||
nodeName := k8stypes.NodeName("node-name")
|
||||
dsw.AddNode(nodeName)
|
||||
volumeExists := dsw.VolumeExists(volumeName, nodeName)
|
||||
if volumeExists {
|
||||
@ -132,7 +133,7 @@ func Test_Run_Positive_OneDesiredVolumeAttachThenDetachWithUnmountedVolume(t *te
|
||||
podName := "pod-uid"
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := "node-name"
|
||||
nodeName := k8stypes.NodeName("node-name")
|
||||
dsw.AddNode(nodeName)
|
||||
volumeExists := dsw.VolumeExists(volumeName, nodeName)
|
||||
if volumeExists {
|
||||
@ -199,7 +200,7 @@ func Test_Run_Positive_OneDesiredVolumeAttachThenDetachWithMountedVolume(t *test
|
||||
podName := "pod-uid"
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := "node-name"
|
||||
nodeName := k8stypes.NodeName("node-name")
|
||||
dsw.AddNode(nodeName)
|
||||
volumeExists := dsw.VolumeExists(volumeName, nodeName)
|
||||
if volumeExists {
|
||||
@ -266,7 +267,7 @@ func Test_Run_Negative_OneDesiredVolumeAttachThenDetachWithUnmountedVolumeUpdate
|
||||
podName := "pod-uid"
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := "node-name"
|
||||
nodeName := k8stypes.NodeName("node-name")
|
||||
dsw.AddNode(nodeName)
|
||||
volumeExists := dsw.VolumeExists(volumeName, nodeName)
|
||||
if volumeExists {
|
||||
|
@ -60,7 +60,7 @@ type nodeStatusUpdater struct {
|
||||
func (nsu *nodeStatusUpdater) UpdateNodeStatuses() error {
|
||||
nodesToUpdate := nsu.actualStateOfWorld.GetVolumesToReportAttached()
|
||||
for nodeName, attachedVolumes := range nodesToUpdate {
|
||||
nodeObj, exists, err := nsu.nodeInformer.GetStore().GetByKey(nodeName)
|
||||
nodeObj, exists, err := nsu.nodeInformer.GetStore().GetByKey(string(nodeName))
|
||||
if nodeObj == nil || !exists || err != nil {
|
||||
// If node does not exist, its status cannot be updated, log error and move on.
|
||||
glog.V(5).Infof(
|
||||
@ -105,7 +105,7 @@ func (nsu *nodeStatusUpdater) UpdateNodeStatuses() error {
|
||||
err)
|
||||
}
|
||||
|
||||
_, err = nsu.kubeClient.Core().Nodes().PatchStatus(nodeName, patchBytes)
|
||||
_, err = nsu.kubeClient.Core().Nodes().PatchStatus(string(nodeName), patchBytes)
|
||||
if err != nil {
|
||||
// If update node status fails, reset flag statusUpdateNeeded back to true
|
||||
// to indicate this node status needs to be udpated again
|
||||
|
@ -7735,7 +7735,7 @@ var OpenAPIDefinitions *common.OpenAPIDefinitions = &common.OpenAPIDefinitions{
|
||||
},
|
||||
"host": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Host name on which the event is generated.",
|
||||
Description: "Node name on which the event is generated.",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
|
@ -424,11 +424,15 @@ func DefaultAndValidateRunOptions(options *options.ServerRunOptions) {
|
||||
if !supported {
|
||||
glog.Fatalf("GCE cloud provider has no instances. this shouldn't happen. exiting.")
|
||||
}
|
||||
name, err := os.Hostname()
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to get hostname: %v", err)
|
||||
}
|
||||
addrs, err := instances.NodeAddresses(name)
|
||||
nodeName, err := instances.CurrentNodeName(hostname)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to get NodeName: %v", err)
|
||||
}
|
||||
addrs, err := instances.NodeAddresses(nodeName)
|
||||
if err != nil {
|
||||
glog.Warningf("Unable to obtain external host address from cloud provider: %v", err)
|
||||
} else {
|
||||
|
@ -23,11 +23,12 @@ import (
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
)
|
||||
|
||||
// NewSourceApiserver creates a config source that watches and pulls from the apiserver.
|
||||
func NewSourceApiserver(c *clientset.Clientset, nodeName string, updates chan<- interface{}) {
|
||||
lw := cache.NewListWatchFromClient(c.CoreClient, "pods", api.NamespaceAll, fields.OneTermEqualSelector(api.PodHostField, nodeName))
|
||||
func NewSourceApiserver(c *clientset.Clientset, nodeName types.NodeName, updates chan<- interface{}) {
|
||||
lw := cache.NewListWatchFromClient(c.CoreClient, "pods", api.NamespaceAll, fields.OneTermEqualSelector(api.PodHostField, string(nodeName)))
|
||||
newSourceApiserverFromLW(lw, updates)
|
||||
}
|
||||
|
||||
|
@ -35,11 +35,11 @@ import (
|
||||
)
|
||||
|
||||
// Generate a pod name that is unique among nodes by appending the nodeName.
|
||||
func generatePodName(name, nodeName string) string {
|
||||
func generatePodName(name string, nodeName types.NodeName) string {
|
||||
return fmt.Sprintf("%s-%s", name, nodeName)
|
||||
}
|
||||
|
||||
func applyDefaults(pod *api.Pod, source string, isFile bool, nodeName string) error {
|
||||
func applyDefaults(pod *api.Pod, source string, isFile bool, nodeName types.NodeName) error {
|
||||
if len(pod.UID) == 0 {
|
||||
hasher := md5.New()
|
||||
if isFile {
|
||||
@ -62,7 +62,7 @@ func applyDefaults(pod *api.Pod, source string, isFile bool, nodeName string) er
|
||||
glog.V(5).Infof("Using namespace %q for pod %q from %s", pod.Namespace, pod.Name, source)
|
||||
|
||||
// Set the Host field to indicate this pod is scheduled on the current node.
|
||||
pod.Spec.NodeName = nodeName
|
||||
pod.Spec.NodeName = string(nodeName)
|
||||
|
||||
pod.ObjectMeta.SelfLink = getSelfLink(pod.Name, pod.Namespace)
|
||||
|
||||
|
@ -30,15 +30,16 @@ import (
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
)
|
||||
|
||||
type sourceFile struct {
|
||||
path string
|
||||
nodeName string
|
||||
nodeName types.NodeName
|
||||
updates chan<- interface{}
|
||||
}
|
||||
|
||||
func NewSourceFile(path string, nodeName string, period time.Duration, updates chan<- interface{}) {
|
||||
func NewSourceFile(path string, nodeName types.NodeName, period time.Duration, updates chan<- interface{}) {
|
||||
config := &sourceFile{
|
||||
path: path,
|
||||
nodeName: nodeName,
|
||||
|
@ -29,6 +29,7 @@ import (
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/securitycontext"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
utiltesting "k8s.io/kubernetes/pkg/util/testing"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
)
|
||||
@ -71,7 +72,7 @@ func writeTestFile(t *testing.T, dir, name string, contents string) *os.File {
|
||||
}
|
||||
|
||||
func TestReadPodsFromFile(t *testing.T) {
|
||||
hostname := "random-test-hostname"
|
||||
nodeName := "random-test-hostname"
|
||||
grace := int64(30)
|
||||
var testCases = []struct {
|
||||
desc string
|
||||
@ -100,14 +101,14 @@ func TestReadPodsFromFile(t *testing.T) {
|
||||
},
|
||||
expected: CreatePodUpdate(kubetypes.SET, kubetypes.FileSource, &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "test-" + hostname,
|
||||
Name: "test-" + nodeName,
|
||||
UID: "12345",
|
||||
Namespace: "mynamespace",
|
||||
Annotations: map[string]string{kubetypes.ConfigHashAnnotationKey: "12345"},
|
||||
SelfLink: getSelfLink("test-"+hostname, "mynamespace"),
|
||||
SelfLink: getSelfLink("test-"+nodeName, "mynamespace"),
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
NodeName: hostname,
|
||||
NodeName: nodeName,
|
||||
RestartPolicy: api.RestartPolicyAlways,
|
||||
DNSPolicy: api.DNSClusterFirst,
|
||||
TerminationGracePeriodSeconds: &grace,
|
||||
@ -142,7 +143,7 @@ func TestReadPodsFromFile(t *testing.T) {
|
||||
defer os.Remove(file.Name())
|
||||
|
||||
ch := make(chan interface{})
|
||||
NewSourceFile(file.Name(), hostname, time.Millisecond, ch)
|
||||
NewSourceFile(file.Name(), types.NodeName(nodeName), time.Millisecond, ch)
|
||||
select {
|
||||
case got := <-ch:
|
||||
update := got.(kubetypes.PodUpdate)
|
||||
|
@ -29,19 +29,20 @@ import (
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
)
|
||||
|
||||
type sourceURL struct {
|
||||
url string
|
||||
header http.Header
|
||||
nodeName string
|
||||
nodeName types.NodeName
|
||||
updates chan<- interface{}
|
||||
data []byte
|
||||
failureLogs int
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
func NewSourceURL(url string, header http.Header, nodeName string, period time.Duration, updates chan<- interface{}) {
|
||||
func NewSourceURL(url string, header http.Header, nodeName types.NodeName, period time.Duration, updates chan<- interface{}) {
|
||||
config := &sourceURL{
|
||||
url: url,
|
||||
header: header,
|
||||
|
@ -29,6 +29,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api/validation"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
utiltesting "k8s.io/kubernetes/pkg/util/testing"
|
||||
)
|
||||
|
||||
@ -121,7 +122,7 @@ func TestExtractInvalidPods(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestExtractPodsFromHTTP(t *testing.T) {
|
||||
hostname := "different-value"
|
||||
nodeName := "different-value"
|
||||
|
||||
grace := int64(30)
|
||||
var testCases = []struct {
|
||||
@ -142,7 +143,7 @@ func TestExtractPodsFromHTTP(t *testing.T) {
|
||||
Namespace: "mynamespace",
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
NodeName: hostname,
|
||||
NodeName: string(nodeName),
|
||||
Containers: []api.Container{{Name: "1", Image: "foo", ImagePullPolicy: api.PullAlways}},
|
||||
SecurityContext: &api.PodSecurityContext{},
|
||||
},
|
||||
@ -155,13 +156,13 @@ func TestExtractPodsFromHTTP(t *testing.T) {
|
||||
&api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "111",
|
||||
Name: "foo" + "-" + hostname,
|
||||
Name: "foo" + "-" + nodeName,
|
||||
Namespace: "mynamespace",
|
||||
Annotations: map[string]string{kubetypes.ConfigHashAnnotationKey: "111"},
|
||||
SelfLink: getSelfLink("foo-"+hostname, "mynamespace"),
|
||||
SelfLink: getSelfLink("foo-"+nodeName, "mynamespace"),
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
NodeName: hostname,
|
||||
NodeName: nodeName,
|
||||
RestartPolicy: api.RestartPolicyAlways,
|
||||
DNSPolicy: api.DNSClusterFirst,
|
||||
SecurityContext: &api.PodSecurityContext{},
|
||||
@ -193,7 +194,7 @@ func TestExtractPodsFromHTTP(t *testing.T) {
|
||||
UID: "111",
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
NodeName: hostname,
|
||||
NodeName: nodeName,
|
||||
Containers: []api.Container{{Name: "1", Image: "foo", ImagePullPolicy: api.PullAlways}},
|
||||
SecurityContext: &api.PodSecurityContext{},
|
||||
},
|
||||
@ -207,7 +208,7 @@ func TestExtractPodsFromHTTP(t *testing.T) {
|
||||
UID: "222",
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
NodeName: hostname,
|
||||
NodeName: nodeName,
|
||||
Containers: []api.Container{{Name: "2", Image: "bar:bartag", ImagePullPolicy: ""}},
|
||||
SecurityContext: &api.PodSecurityContext{},
|
||||
},
|
||||
@ -222,13 +223,13 @@ func TestExtractPodsFromHTTP(t *testing.T) {
|
||||
&api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "111",
|
||||
Name: "foo" + "-" + hostname,
|
||||
Name: "foo" + "-" + nodeName,
|
||||
Namespace: "default",
|
||||
Annotations: map[string]string{kubetypes.ConfigHashAnnotationKey: "111"},
|
||||
SelfLink: getSelfLink("foo-"+hostname, kubetypes.NamespaceDefault),
|
||||
SelfLink: getSelfLink("foo-"+nodeName, kubetypes.NamespaceDefault),
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
NodeName: hostname,
|
||||
NodeName: nodeName,
|
||||
RestartPolicy: api.RestartPolicyAlways,
|
||||
DNSPolicy: api.DNSClusterFirst,
|
||||
TerminationGracePeriodSeconds: &grace,
|
||||
@ -248,13 +249,13 @@ func TestExtractPodsFromHTTP(t *testing.T) {
|
||||
&api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "222",
|
||||
Name: "bar" + "-" + hostname,
|
||||
Name: "bar" + "-" + nodeName,
|
||||
Namespace: "default",
|
||||
Annotations: map[string]string{kubetypes.ConfigHashAnnotationKey: "222"},
|
||||
SelfLink: getSelfLink("bar-"+hostname, kubetypes.NamespaceDefault),
|
||||
SelfLink: getSelfLink("bar-"+nodeName, kubetypes.NamespaceDefault),
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
NodeName: hostname,
|
||||
NodeName: nodeName,
|
||||
RestartPolicy: api.RestartPolicyAlways,
|
||||
DNSPolicy: api.DNSClusterFirst,
|
||||
TerminationGracePeriodSeconds: &grace,
|
||||
@ -291,7 +292,7 @@ func TestExtractPodsFromHTTP(t *testing.T) {
|
||||
testServer := httptest.NewServer(&fakeHandler)
|
||||
defer testServer.Close()
|
||||
ch := make(chan interface{}, 1)
|
||||
c := sourceURL{testServer.URL, http.Header{}, hostname, ch, nil, 0, http.DefaultClient}
|
||||
c := sourceURL{testServer.URL, http.Header{}, types.NodeName(nodeName), ch, nil, 0, http.DefaultClient}
|
||||
if err := c.extractFromURL(); err != nil {
|
||||
t.Errorf("%s: Unexpected error: %v", testCase.desc, err)
|
||||
continue
|
||||
|
@ -222,7 +222,7 @@ type KubeletDeps struct {
|
||||
|
||||
// makePodSourceConfig creates a config.PodConfig from the given
|
||||
// KubeletConfiguration or returns an error.
|
||||
func makePodSourceConfig(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *KubeletDeps, nodeName string) (*config.PodConfig, error) {
|
||||
func makePodSourceConfig(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *KubeletDeps, nodeName types.NodeName) (*config.PodConfig, error) {
|
||||
manifestURLHeader := make(http.Header)
|
||||
if kubeCfg.ManifestURLHeader != "" {
|
||||
pieces := strings.Split(kubeCfg.ManifestURLHeader, ":")
|
||||
@ -277,7 +277,7 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
|
||||
|
||||
hostname := nodeutil.GetHostname(kubeCfg.HostnameOverride)
|
||||
// Query the cloud provider for our node name, default to hostname
|
||||
nodeName := hostname
|
||||
nodeName := types.NodeName(hostname)
|
||||
if kubeDeps.Cloud != nil {
|
||||
var err error
|
||||
instances, ok := kubeDeps.Cloud.Instances()
|
||||
@ -377,7 +377,7 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
|
||||
if kubeClient != nil {
|
||||
// TODO: cache.NewListWatchFromClient is limited as it takes a client implementation rather
|
||||
// than an interface. There is no way to construct a list+watcher using resource name.
|
||||
fieldSelector := fields.Set{api.ObjectNameField: nodeName}.AsSelector()
|
||||
fieldSelector := fields.Set{api.ObjectNameField: string(nodeName)}.AsSelector()
|
||||
listWatch := &cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = fieldSelector
|
||||
@ -398,7 +398,7 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
|
||||
// TODO: what is namespace for node?
|
||||
nodeRef := &api.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: nodeName,
|
||||
Name: string(nodeName),
|
||||
UID: types.UID(nodeName),
|
||||
Namespace: "",
|
||||
}
|
||||
@ -783,7 +783,7 @@ type Kubelet struct {
|
||||
kubeletConfiguration componentconfig.KubeletConfiguration
|
||||
|
||||
hostname string
|
||||
nodeName string
|
||||
nodeName types.NodeName
|
||||
dockerClient dockertools.DockerInterface
|
||||
runtimeCache kubecontainer.RuntimeCache
|
||||
kubeClient clientset.Interface
|
||||
|
@ -191,7 +191,7 @@ func (kl *Kubelet) GetNode() (*api.Node, error) {
|
||||
if kl.standaloneMode {
|
||||
return kl.initialNode()
|
||||
}
|
||||
return kl.nodeInfo.GetNodeInfo(kl.nodeName)
|
||||
return kl.nodeInfo.GetNodeInfo(string(kl.nodeName))
|
||||
}
|
||||
|
||||
// getNodeAnyWay() must return a *api.Node which is required by RunGeneralPredicates().
|
||||
@ -201,7 +201,7 @@ func (kl *Kubelet) GetNode() (*api.Node, error) {
|
||||
// zero capacity, and the default labels.
|
||||
func (kl *Kubelet) getNodeAnyWay() (*api.Node, error) {
|
||||
if !kl.standaloneMode {
|
||||
if n, err := kl.nodeInfo.GetNodeInfo(kl.nodeName); err == nil {
|
||||
if n, err := kl.nodeInfo.GetNodeInfo(string(kl.nodeName)); err == nil {
|
||||
return n, nil
|
||||
}
|
||||
}
|
||||
|
@ -98,7 +98,7 @@ func (kl *Kubelet) tryRegisterWithApiServer(node *api.Node) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
existingNode, err := kl.kubeClient.Core().Nodes().Get(kl.nodeName)
|
||||
existingNode, err := kl.kubeClient.Core().Nodes().Get(string(kl.nodeName))
|
||||
if err != nil {
|
||||
glog.Errorf("Unable to register node %q with API server: error getting existing node: %v", kl.nodeName, err)
|
||||
return false
|
||||
@ -173,7 +173,7 @@ func (kl *Kubelet) reconcileCMADAnnotationWithExistingNode(node, existingNode *a
|
||||
func (kl *Kubelet) initialNode() (*api.Node, error) {
|
||||
node := &api.Node{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: kl.nodeName,
|
||||
Name: string(kl.nodeName),
|
||||
Labels: map[string]string{
|
||||
unversioned.LabelHostname: kl.hostname,
|
||||
unversioned.LabelOS: goRuntime.GOOS,
|
||||
@ -309,7 +309,7 @@ func (kl *Kubelet) updateNodeStatus() error {
|
||||
// tryUpdateNodeStatus tries to update node status to master. If ReconcileCBR0
|
||||
// is set, this function will also confirm that cbr0 is configured correctly.
|
||||
func (kl *Kubelet) tryUpdateNodeStatus() error {
|
||||
node, err := kl.kubeClient.Core().Nodes().Get(kl.nodeName)
|
||||
node, err := kl.kubeClient.Core().Nodes().Get(string(kl.nodeName))
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting node %q: %v", kl.nodeName, err)
|
||||
}
|
||||
|
@ -128,7 +128,7 @@ func newTestKubeletWithImageList(
|
||||
kubelet.os = &containertest.FakeOS{}
|
||||
|
||||
kubelet.hostname = testKubeletHostname
|
||||
kubelet.nodeName = testKubeletHostname
|
||||
kubelet.nodeName = types.NodeName(testKubeletHostname)
|
||||
kubelet.runtimeState = newRuntimeState(maxWaitForContainerRuntime)
|
||||
kubelet.runtimeState.setNetworkState(nil)
|
||||
kubelet.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", nettest.NewFakeHost(nil), componentconfig.HairpinNone, kubelet.nonMasqueradeCIDR, 1440)
|
||||
@ -211,7 +211,7 @@ func newTestKubeletWithImageList(
|
||||
kubelet.resourceAnalyzer = stats.NewResourceAnalyzer(kubelet, volumeStatsAggPeriod, kubelet.containerRuntime)
|
||||
nodeRef := &api.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: kubelet.nodeName,
|
||||
Name: string(kubelet.nodeName),
|
||||
UID: types.UID(kubelet.nodeName),
|
||||
Namespace: "",
|
||||
}
|
||||
@ -232,7 +232,7 @@ func newTestKubeletWithImageList(
|
||||
kubelet.mounter = &mount.FakeMounter{}
|
||||
kubelet.volumeManager, err = kubeletvolume.NewVolumeManager(
|
||||
controllerAttachDetachEnabled,
|
||||
kubelet.hostname,
|
||||
kubelet.nodeName,
|
||||
kubelet.podManager,
|
||||
fakeKubeClient,
|
||||
kubelet.volumePluginMgr,
|
||||
@ -402,7 +402,7 @@ func TestHandlePortConflicts(t *testing.T) {
|
||||
|
||||
kl.nodeLister = testNodeLister{nodes: []api.Node{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: kl.nodeName},
|
||||
ObjectMeta: api.ObjectMeta{Name: string(kl.nodeName)},
|
||||
Status: api.NodeStatus{
|
||||
Allocatable: api.ResourceList{
|
||||
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
|
||||
@ -412,7 +412,7 @@ func TestHandlePortConflicts(t *testing.T) {
|
||||
}}
|
||||
kl.nodeInfo = testNodeInfo{nodes: []api.Node{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: kl.nodeName},
|
||||
ObjectMeta: api.ObjectMeta{Name: string(kl.nodeName)},
|
||||
Status: api.NodeStatus{
|
||||
Allocatable: api.ResourceList{
|
||||
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
|
||||
@ -421,7 +421,7 @@ func TestHandlePortConflicts(t *testing.T) {
|
||||
},
|
||||
}}
|
||||
|
||||
spec := api.PodSpec{NodeName: kl.nodeName, Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}
|
||||
spec := api.PodSpec{NodeName: string(kl.nodeName), Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}
|
||||
pods := []*api.Pod{
|
||||
podWithUidNameNsSpec("123456789", "newpod", "foo", spec),
|
||||
podWithUidNameNsSpec("987654321", "oldpod", "foo", spec),
|
||||
@ -555,7 +555,7 @@ func TestHandleMemExceeded(t *testing.T) {
|
||||
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
|
||||
spec := api.PodSpec{NodeName: kl.nodeName,
|
||||
spec := api.PodSpec{NodeName: string(kl.nodeName),
|
||||
Containers: []api.Container{{Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
"memory": resource.MustParse("90"),
|
||||
@ -1781,7 +1781,7 @@ func TestHandlePodAdditionsInvokesPodAdmitHandlers(t *testing.T) {
|
||||
kl := testKubelet.kubelet
|
||||
kl.nodeLister = testNodeLister{nodes: []api.Node{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: kl.nodeName},
|
||||
ObjectMeta: api.ObjectMeta{Name: string(kl.nodeName)},
|
||||
Status: api.NodeStatus{
|
||||
Allocatable: api.ResourceList{
|
||||
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
|
||||
@ -1791,7 +1791,7 @@ func TestHandlePodAdditionsInvokesPodAdmitHandlers(t *testing.T) {
|
||||
}}
|
||||
kl.nodeInfo = testNodeInfo{nodes: []api.Node{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: kl.nodeName},
|
||||
ObjectMeta: api.ObjectMeta{Name: string(kl.nodeName)},
|
||||
Status: api.NodeStatus{
|
||||
Allocatable: api.ResourceList{
|
||||
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
|
||||
|
@ -94,7 +94,7 @@ func TestRunOnce(t *testing.T) {
|
||||
}
|
||||
kb.volumeManager, err = volumemanager.NewVolumeManager(
|
||||
true,
|
||||
kb.hostname,
|
||||
kb.nodeName,
|
||||
kb.podManager,
|
||||
kb.kubeClient,
|
||||
kb.volumePluginMgr,
|
||||
@ -109,7 +109,7 @@ func TestRunOnce(t *testing.T) {
|
||||
kb.resourceAnalyzer = stats.NewResourceAnalyzer(kb, volumeStatsAggPeriod, kb.containerRuntime)
|
||||
nodeRef := &api.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: kb.nodeName,
|
||||
Name: string(kb.nodeName),
|
||||
UID: types.UID(kb.nodeName),
|
||||
Namespace: "",
|
||||
}
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/apis/certificates"
|
||||
unversionedcertificates "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/unversioned"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
certutil "k8s.io/kubernetes/pkg/util/cert"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
)
|
||||
@ -33,7 +34,7 @@ import (
|
||||
// then it will watch the object's status, once approved by API server, it will return the API
|
||||
// server's issued certificate (pem-encoded). If there is any errors, or the watch timeouts,
|
||||
// it will return an error. This is intended for use on nodes (kubelet and kubeadm).
|
||||
func RequestNodeCertificate(client unversionedcertificates.CertificateSigningRequestInterface, privateKeyData []byte, nodeName string) (certData []byte, err error) {
|
||||
func RequestNodeCertificate(client unversionedcertificates.CertificateSigningRequestInterface, privateKeyData []byte, nodeName types.NodeName) (certData []byte, err error) {
|
||||
subject := &pkix.Name{
|
||||
Organization: []string{"system:nodes"},
|
||||
CommonName: fmt.Sprintf("system:node:%s", nodeName),
|
||||
|
@ -160,7 +160,7 @@ type AttachedVolume struct {
|
||||
|
||||
// NewActualStateOfWorld returns a new instance of ActualStateOfWorld.
|
||||
func NewActualStateOfWorld(
|
||||
nodeName string,
|
||||
nodeName types.NodeName,
|
||||
volumePluginMgr *volume.VolumePluginMgr) ActualStateOfWorld {
|
||||
return &actualStateOfWorld{
|
||||
nodeName: nodeName,
|
||||
@ -185,7 +185,7 @@ func IsRemountRequiredError(err error) bool {
|
||||
|
||||
type actualStateOfWorld struct {
|
||||
// nodeName is the name of this node. This value is passed to Attach/Detach
|
||||
nodeName string
|
||||
nodeName types.NodeName
|
||||
// attachedVolumes is a map containing the set of volumes the kubelet volume
|
||||
// manager believes to be successfully attached to this node. Volume types
|
||||
// that do not implement an attacher interface are assumed to be in this
|
||||
@ -271,12 +271,12 @@ type mountedPod struct {
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) MarkVolumeAsAttached(
|
||||
volumeName api.UniqueVolumeName, volumeSpec *volume.Spec, _, devicePath string) error {
|
||||
volumeName api.UniqueVolumeName, volumeSpec *volume.Spec, _ types.NodeName, devicePath string) error {
|
||||
return asw.addVolume(volumeName, volumeSpec, devicePath)
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) MarkVolumeAsDetached(
|
||||
volumeName api.UniqueVolumeName, nodeName string) {
|
||||
volumeName api.UniqueVolumeName, nodeName types.NodeName) {
|
||||
asw.DeleteVolume(volumeName)
|
||||
}
|
||||
|
||||
@ -296,11 +296,11 @@ func (asw *actualStateOfWorld) MarkVolumeAsMounted(
|
||||
volumeGidValue)
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) AddVolumeToReportAsAttached(volumeName api.UniqueVolumeName, nodeName string) {
|
||||
func (asw *actualStateOfWorld) AddVolumeToReportAsAttached(volumeName api.UniqueVolumeName, nodeName types.NodeName) {
|
||||
// no operation for kubelet side
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) RemoveVolumeFromReportAsAttached(volumeName api.UniqueVolumeName, nodeName string) error {
|
||||
func (asw *actualStateOfWorld) RemoveVolumeFromReportAsAttached(volumeName api.UniqueVolumeName, nodeName types.NodeName) error {
|
||||
// no operation for kubelet side
|
||||
return nil
|
||||
}
|
||||
|
@ -71,7 +71,7 @@ type Reconciler interface {
|
||||
// successive executions
|
||||
// waitForAttachTimeout - the amount of time the Mount function will wait for
|
||||
// the volume to be attached
|
||||
// hostName - the hostname for this node, used by Attach and Detach methods
|
||||
// nodeName - the Name for this node, used by Attach and Detach methods
|
||||
// desiredStateOfWorld - cache containing the desired state of the world
|
||||
// actualStateOfWorld - cache containing the actual state of the world
|
||||
// operationExecutor - used to trigger attach/detach/mount/unmount operations
|
||||
@ -85,7 +85,7 @@ func NewReconciler(
|
||||
loopSleepDuration time.Duration,
|
||||
reconstructDuration time.Duration,
|
||||
waitForAttachTimeout time.Duration,
|
||||
hostName string,
|
||||
nodeName types.NodeName,
|
||||
desiredStateOfWorld cache.DesiredStateOfWorld,
|
||||
actualStateOfWorld cache.ActualStateOfWorld,
|
||||
operationExecutor operationexecutor.OperationExecutor,
|
||||
@ -98,7 +98,7 @@ func NewReconciler(
|
||||
loopSleepDuration: loopSleepDuration,
|
||||
reconstructDuration: reconstructDuration,
|
||||
waitForAttachTimeout: waitForAttachTimeout,
|
||||
hostName: hostName,
|
||||
nodeName: nodeName,
|
||||
desiredStateOfWorld: desiredStateOfWorld,
|
||||
actualStateOfWorld: actualStateOfWorld,
|
||||
operationExecutor: operationExecutor,
|
||||
@ -115,7 +115,7 @@ type reconciler struct {
|
||||
loopSleepDuration time.Duration
|
||||
reconstructDuration time.Duration
|
||||
waitForAttachTimeout time.Duration
|
||||
hostName string
|
||||
nodeName types.NodeName
|
||||
desiredStateOfWorld cache.DesiredStateOfWorld
|
||||
actualStateOfWorld cache.ActualStateOfWorld
|
||||
operationExecutor operationexecutor.OperationExecutor
|
||||
@ -201,7 +201,7 @@ func (rc *reconciler) reconcile() {
|
||||
volumeToMount.Pod.UID)
|
||||
err := rc.operationExecutor.VerifyControllerAttachedVolume(
|
||||
volumeToMount.VolumeToMount,
|
||||
rc.hostName,
|
||||
rc.nodeName,
|
||||
rc.actualStateOfWorld)
|
||||
if err != nil &&
|
||||
!nestedpendingoperations.IsAlreadyExists(err) &&
|
||||
@ -230,7 +230,7 @@ func (rc *reconciler) reconcile() {
|
||||
volumeToAttach := operationexecutor.VolumeToAttach{
|
||||
VolumeName: volumeToMount.VolumeName,
|
||||
VolumeSpec: volumeToMount.VolumeSpec,
|
||||
NodeName: rc.hostName,
|
||||
NodeName: rc.nodeName,
|
||||
}
|
||||
glog.V(12).Infof("Attempting to start AttachVolume for volume %q (spec.Name: %q) pod %q (UID: %q)",
|
||||
volumeToMount.VolumeName,
|
||||
@ -334,7 +334,7 @@ func (rc *reconciler) reconcile() {
|
||||
// Kubelet not responsible for detaching or this volume has a non-attachable volume plugin,
|
||||
// so just remove it to actualStateOfWorld without attach.
|
||||
rc.actualStateOfWorld.MarkVolumeAsDetached(
|
||||
attachedVolume.VolumeName, rc.hostName)
|
||||
attachedVolume.VolumeName, rc.nodeName)
|
||||
} else {
|
||||
// Only detach if kubelet detach is enabled
|
||||
glog.V(12).Infof("Attempting to start DetachVolume for volume %q (spec.Name: %q)",
|
||||
|
@ -29,6 +29,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/config"
|
||||
"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
k8stypes "k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
@ -45,9 +46,9 @@ const (
|
||||
reconcilerReconstructSleepPeriod time.Duration = 10 * time.Minute
|
||||
// waitForAttachTimeout is the maximum amount of time a
|
||||
// operationexecutor.Mount call will wait for a volume to be attached.
|
||||
waitForAttachTimeout time.Duration = 1 * time.Second
|
||||
nodeName string = "myhostname"
|
||||
kubeletPodsDir string = "fake-dir"
|
||||
waitForAttachTimeout time.Duration = 1 * time.Second
|
||||
nodeName k8stypes.NodeName = k8stypes.NodeName("mynodename")
|
||||
kubeletPodsDir string = "fake-dir"
|
||||
)
|
||||
|
||||
// Calls Run()
|
||||
@ -452,7 +453,7 @@ func createTestClient() *fake.Clientset {
|
||||
fakeClient.AddReactor("get", "nodes",
|
||||
func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &api.Node{
|
||||
ObjectMeta: api.ObjectMeta{Name: nodeName},
|
||||
ObjectMeta: api.ObjectMeta{Name: string(nodeName)},
|
||||
Status: api.NodeStatus{
|
||||
VolumesAttached: []api.AttachedVolume{
|
||||
{
|
||||
@ -460,7 +461,7 @@ func createTestClient() *fake.Clientset {
|
||||
DevicePath: "fake/path",
|
||||
},
|
||||
}},
|
||||
Spec: api.NodeSpec{ExternalID: nodeName},
|
||||
Spec: api.NodeSpec{ExternalID: string(nodeName)},
|
||||
}, nil
|
||||
})
|
||||
fakeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
|
||||
|
@ -33,6 +33,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache"
|
||||
"k8s.io/kubernetes/pkg/kubelet/volumemanager/populator"
|
||||
"k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler"
|
||||
k8stypes "k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/util/runtime"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
@ -143,7 +144,7 @@ type VolumeManager interface {
|
||||
// Must be pre-initialized.
|
||||
func NewVolumeManager(
|
||||
controllerAttachDetachEnabled bool,
|
||||
hostName string,
|
||||
nodeName k8stypes.NodeName,
|
||||
podManager pod.Manager,
|
||||
kubeClient internalclientset.Interface,
|
||||
volumePluginMgr *volume.VolumePluginMgr,
|
||||
@ -156,7 +157,7 @@ func NewVolumeManager(
|
||||
kubeClient: kubeClient,
|
||||
volumePluginMgr: volumePluginMgr,
|
||||
desiredStateOfWorld: cache.NewDesiredStateOfWorld(volumePluginMgr),
|
||||
actualStateOfWorld: cache.NewActualStateOfWorld(hostName, volumePluginMgr),
|
||||
actualStateOfWorld: cache.NewActualStateOfWorld(nodeName, volumePluginMgr),
|
||||
operationExecutor: operationexecutor.NewOperationExecutor(
|
||||
kubeClient,
|
||||
volumePluginMgr,
|
||||
@ -169,7 +170,7 @@ func NewVolumeManager(
|
||||
reconcilerLoopSleepPeriod,
|
||||
reconcilerReconstructSleepPeriod,
|
||||
waitForAttachTimeout,
|
||||
hostName,
|
||||
nodeName,
|
||||
vm.desiredStateOfWorld,
|
||||
vm.actualStateOfWorld,
|
||||
vm.operationExecutor,
|
||||
|
43
pkg/types/nodename.go
Normal file
43
pkg/types/nodename.go
Normal file
@ -0,0 +1,43 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package types
|
||||
|
||||
// NodeName is a type that holds a api.Node's Name identifier.
|
||||
// Being a type captures intent and helps make sure that the node name
|
||||
// is not confused with similar concepts (the hostname, the cloud provider id,
|
||||
// the cloud provider name etc)
|
||||
//
|
||||
// To clarify the various types:
|
||||
//
|
||||
// * Node.Name is the Name field of the Node in the API. This should be stored in a NodeName.
|
||||
// Unfortunately, because Name is part of ObjectMeta, we can't store it as a NodeName at the API level.
|
||||
//
|
||||
// * Hostname is the hostname of the local machine (from uname -n).
|
||||
// However, some components allow the user to pass in a --hostname-override flag,
|
||||
// which will override this in most places. In the absence of anything more meaningful,
|
||||
// kubelet will use Hostname as the Node.Name when it creates the Node.
|
||||
//
|
||||
// * The cloudproviders have the own names: GCE has InstanceName, AWS has InstanceId.
|
||||
//
|
||||
// For GCE, InstanceName is the Name of an Instance object in the GCE API. On GCE, Instance.Name becomes the
|
||||
// Hostname, and thus it makes sense also to use it as the Node.Name. But that is GCE specific, and it is up
|
||||
// to the cloudprovider how to do this mapping.
|
||||
//
|
||||
// For AWS, the InstanceID is not yet suitable for use as a Node.Name, so we actually use the
|
||||
// PrivateDnsName for the Node.Name. And this is _not_ always the same as the hostname: if
|
||||
// we are using a custom DHCP domain it won't be.
|
||||
type NodeName string
|
@ -28,6 +28,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
)
|
||||
|
||||
func GetHostname(hostnameOverride string) string {
|
||||
@ -86,7 +87,7 @@ func GetZoneKey(node *api.Node) string {
|
||||
}
|
||||
|
||||
// SetNodeCondition updates specific node condition with patch operation.
|
||||
func SetNodeCondition(c clientset.Interface, node string, condition api.NodeCondition) error {
|
||||
func SetNodeCondition(c clientset.Interface, node types.NodeName, condition api.NodeCondition) error {
|
||||
generatePatch := func(condition api.NodeCondition) ([]byte, error) {
|
||||
raw, err := json.Marshal(&[]api.NodeCondition{condition})
|
||||
if err != nil {
|
||||
@ -99,6 +100,6 @@ func SetNodeCondition(c clientset.Interface, node string, condition api.NodeCond
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
_, err = c.Core().Nodes().PatchStatus(node, patch)
|
||||
_, err = c.Core().Nodes().PatchStatus(string(node), patch)
|
||||
return err
|
||||
}
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/exec"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
@ -57,7 +58,7 @@ func (plugin *awsElasticBlockStorePlugin) GetDeviceMountRefs(deviceMountPath str
|
||||
return mount.GetMountRefs(mounter, deviceMountPath)
|
||||
}
|
||||
|
||||
func (attacher *awsElasticBlockStoreAttacher) Attach(spec *volume.Spec, hostName string) (string, error) {
|
||||
func (attacher *awsElasticBlockStoreAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
|
||||
volumeSource, readOnly, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@ -67,7 +68,7 @@ func (attacher *awsElasticBlockStoreAttacher) Attach(spec *volume.Spec, hostName
|
||||
|
||||
// awsCloud.AttachDisk checks if disk is already attached to node and
|
||||
// succeeds in that case, so no need to do that separately.
|
||||
devicePath, err := attacher.awsVolumes.AttachDisk(volumeID, hostName, readOnly)
|
||||
devicePath, err := attacher.awsVolumes.AttachDisk(volumeID, nodeName, readOnly)
|
||||
if err != nil {
|
||||
glog.Errorf("Error attaching volume %q: %+v", volumeID, err)
|
||||
return "", err
|
||||
@ -185,24 +186,24 @@ func (plugin *awsElasticBlockStorePlugin) NewDetacher() (volume.Detacher, error)
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (detacher *awsElasticBlockStoreDetacher) Detach(deviceMountPath string, hostName string) error {
|
||||
func (detacher *awsElasticBlockStoreDetacher) Detach(deviceMountPath string, nodeName types.NodeName) error {
|
||||
volumeID := path.Base(deviceMountPath)
|
||||
|
||||
attached, err := detacher.awsVolumes.DiskIsAttached(volumeID, hostName)
|
||||
attached, err := detacher.awsVolumes.DiskIsAttached(volumeID, nodeName)
|
||||
if err != nil {
|
||||
// Log error and continue with detach
|
||||
glog.Errorf(
|
||||
"Error checking if volume (%q) is already attached to current node (%q). Will continue and try detach anyway. err=%v",
|
||||
volumeID, hostName, err)
|
||||
volumeID, nodeName, err)
|
||||
}
|
||||
|
||||
if err == nil && !attached {
|
||||
// Volume is already detached from node.
|
||||
glog.Infof("detach operation was successful. volume %q is already detached from node %q.", volumeID, hostName)
|
||||
glog.Infof("detach operation was successful. volume %q is already detached from node %q.", volumeID, nodeName)
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, err = detacher.awsVolumes.DetachDisk(volumeID, hostName); err != nil {
|
||||
if _, err = detacher.awsVolumes.DetachDisk(volumeID, nodeName); err != nil {
|
||||
glog.Errorf("Error detaching volumeID %q: %v", volumeID, err)
|
||||
return err
|
||||
}
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
)
|
||||
|
||||
func TestGetDeviceName_Volume(t *testing.T) {
|
||||
@ -74,7 +75,7 @@ type testcase struct {
|
||||
|
||||
func TestAttachDetach(t *testing.T) {
|
||||
diskName := "disk"
|
||||
instanceID := "instance"
|
||||
nodeName := types.NodeName("instance")
|
||||
readOnly := false
|
||||
spec := createVolSpec(diskName, readOnly)
|
||||
attachError := errors.New("Fake attach error")
|
||||
@ -84,10 +85,10 @@ func TestAttachDetach(t *testing.T) {
|
||||
// Successful Attach call
|
||||
{
|
||||
name: "Attach_Positive",
|
||||
attach: attachCall{diskName, instanceID, readOnly, "/dev/sda", nil},
|
||||
attach: attachCall{diskName, nodeName, readOnly, "/dev/sda", nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
return attacher.Attach(spec, instanceID)
|
||||
return attacher.Attach(spec, nodeName)
|
||||
},
|
||||
expectedDevice: "/dev/sda",
|
||||
},
|
||||
@ -95,10 +96,10 @@ func TestAttachDetach(t *testing.T) {
|
||||
// Attach call fails
|
||||
{
|
||||
name: "Attach_Negative",
|
||||
attach: attachCall{diskName, instanceID, readOnly, "", attachError},
|
||||
attach: attachCall{diskName, nodeName, readOnly, "", attachError},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
return attacher.Attach(spec, instanceID)
|
||||
return attacher.Attach(spec, nodeName)
|
||||
},
|
||||
expectedError: attachError,
|
||||
},
|
||||
@ -106,43 +107,43 @@ func TestAttachDetach(t *testing.T) {
|
||||
// Detach succeeds
|
||||
{
|
||||
name: "Detach_Positive",
|
||||
diskIsAttached: diskIsAttachedCall{diskName, instanceID, true, nil},
|
||||
detach: detachCall{diskName, instanceID, "/dev/sda", nil},
|
||||
diskIsAttached: diskIsAttachedCall{diskName, nodeName, true, nil},
|
||||
detach: detachCall{diskName, nodeName, "/dev/sda", nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
detacher := newDetacher(testcase)
|
||||
return "", detacher.Detach(diskName, instanceID)
|
||||
return "", detacher.Detach(diskName, nodeName)
|
||||
},
|
||||
},
|
||||
|
||||
// Disk is already detached
|
||||
{
|
||||
name: "Detach_Positive_AlreadyDetached",
|
||||
diskIsAttached: diskIsAttachedCall{diskName, instanceID, false, nil},
|
||||
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
detacher := newDetacher(testcase)
|
||||
return "", detacher.Detach(diskName, instanceID)
|
||||
return "", detacher.Detach(diskName, nodeName)
|
||||
},
|
||||
},
|
||||
|
||||
// Detach succeeds when DiskIsAttached fails
|
||||
{
|
||||
name: "Detach_Positive_CheckFails",
|
||||
diskIsAttached: diskIsAttachedCall{diskName, instanceID, false, diskCheckError},
|
||||
detach: detachCall{diskName, instanceID, "/dev/sda", nil},
|
||||
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, diskCheckError},
|
||||
detach: detachCall{diskName, nodeName, "/dev/sda", nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
detacher := newDetacher(testcase)
|
||||
return "", detacher.Detach(diskName, instanceID)
|
||||
return "", detacher.Detach(diskName, nodeName)
|
||||
},
|
||||
},
|
||||
|
||||
// Detach fails
|
||||
{
|
||||
name: "Detach_Negative",
|
||||
diskIsAttached: diskIsAttachedCall{diskName, instanceID, false, diskCheckError},
|
||||
detach: detachCall{diskName, instanceID, "", detachError},
|
||||
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, diskCheckError},
|
||||
detach: detachCall{diskName, nodeName, "", detachError},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
detacher := newDetacher(testcase)
|
||||
return "", detacher.Detach(diskName, instanceID)
|
||||
return "", detacher.Detach(diskName, nodeName)
|
||||
},
|
||||
expectedError: detachError,
|
||||
},
|
||||
@ -216,7 +217,7 @@ func createPVSpec(name string, readOnly bool) *volume.Spec {
|
||||
|
||||
type attachCall struct {
|
||||
diskName string
|
||||
instanceID string
|
||||
nodeName types.NodeName
|
||||
readOnly bool
|
||||
retDeviceName string
|
||||
ret error
|
||||
@ -224,21 +225,22 @@ type attachCall struct {
|
||||
|
||||
type detachCall struct {
|
||||
diskName string
|
||||
instanceID string
|
||||
nodeName types.NodeName
|
||||
retDeviceName string
|
||||
ret error
|
||||
}
|
||||
|
||||
type diskIsAttachedCall struct {
|
||||
diskName, instanceID string
|
||||
isAttached bool
|
||||
ret error
|
||||
diskName string
|
||||
nodeName types.NodeName
|
||||
isAttached bool
|
||||
ret error
|
||||
}
|
||||
|
||||
func (testcase *testcase) AttachDisk(diskName string, instanceID string, readOnly bool) (string, error) {
|
||||
func (testcase *testcase) AttachDisk(diskName string, nodeName types.NodeName, readOnly bool) (string, error) {
|
||||
expected := &testcase.attach
|
||||
|
||||
if expected.diskName == "" && expected.instanceID == "" {
|
||||
if expected.diskName == "" && expected.nodeName == "" {
|
||||
// testcase.attach looks uninitialized, test did not expect to call
|
||||
// AttachDisk
|
||||
testcase.t.Errorf("Unexpected AttachDisk call!")
|
||||
@ -250,9 +252,9 @@ func (testcase *testcase) AttachDisk(diskName string, instanceID string, readOnl
|
||||
return "", errors.New("Unexpected AttachDisk call: wrong diskName")
|
||||
}
|
||||
|
||||
if expected.instanceID != instanceID {
|
||||
testcase.t.Errorf("Unexpected AttachDisk call: expected instanceID %s, got %s", expected.instanceID, instanceID)
|
||||
return "", errors.New("Unexpected AttachDisk call: wrong instanceID")
|
||||
if expected.nodeName != nodeName {
|
||||
testcase.t.Errorf("Unexpected AttachDisk call: expected nodeName %s, got %s", expected.nodeName, nodeName)
|
||||
return "", errors.New("Unexpected AttachDisk call: wrong nodeName")
|
||||
}
|
||||
|
||||
if expected.readOnly != readOnly {
|
||||
@ -260,15 +262,15 @@ func (testcase *testcase) AttachDisk(diskName string, instanceID string, readOnl
|
||||
return "", errors.New("Unexpected AttachDisk call: wrong readOnly")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("AttachDisk call: %s, %s, %v, returning %q, %v", diskName, instanceID, readOnly, expected.retDeviceName, expected.ret)
|
||||
glog.V(4).Infof("AttachDisk call: %s, %s, %v, returning %q, %v", diskName, nodeName, readOnly, expected.retDeviceName, expected.ret)
|
||||
|
||||
return expected.retDeviceName, expected.ret
|
||||
}
|
||||
|
||||
func (testcase *testcase) DetachDisk(diskName string, instanceID string) (string, error) {
|
||||
func (testcase *testcase) DetachDisk(diskName string, nodeName types.NodeName) (string, error) {
|
||||
expected := &testcase.detach
|
||||
|
||||
if expected.diskName == "" && expected.instanceID == "" {
|
||||
if expected.diskName == "" && expected.nodeName == "" {
|
||||
// testcase.detach looks uninitialized, test did not expect to call
|
||||
// DetachDisk
|
||||
testcase.t.Errorf("Unexpected DetachDisk call!")
|
||||
@ -280,20 +282,20 @@ func (testcase *testcase) DetachDisk(diskName string, instanceID string) (string
|
||||
return "", errors.New("Unexpected DetachDisk call: wrong diskName")
|
||||
}
|
||||
|
||||
if expected.instanceID != instanceID {
|
||||
testcase.t.Errorf("Unexpected DetachDisk call: expected instanceID %s, got %s", expected.instanceID, instanceID)
|
||||
return "", errors.New("Unexpected DetachDisk call: wrong instanceID")
|
||||
if expected.nodeName != nodeName {
|
||||
testcase.t.Errorf("Unexpected DetachDisk call: expected nodeName %s, got %s", expected.nodeName, nodeName)
|
||||
return "", errors.New("Unexpected DetachDisk call: wrong nodeName")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("DetachDisk call: %s, %s, returning %q, %v", diskName, instanceID, expected.retDeviceName, expected.ret)
|
||||
glog.V(4).Infof("DetachDisk call: %s, %s, returning %q, %v", diskName, nodeName, expected.retDeviceName, expected.ret)
|
||||
|
||||
return expected.retDeviceName, expected.ret
|
||||
}
|
||||
|
||||
func (testcase *testcase) DiskIsAttached(diskName, instanceID string) (bool, error) {
|
||||
func (testcase *testcase) DiskIsAttached(diskName string, nodeName types.NodeName) (bool, error) {
|
||||
expected := &testcase.diskIsAttached
|
||||
|
||||
if expected.diskName == "" && expected.instanceID == "" {
|
||||
if expected.diskName == "" && expected.nodeName == "" {
|
||||
// testcase.diskIsAttached looks uninitialized, test did not expect to
|
||||
// call DiskIsAttached
|
||||
testcase.t.Errorf("Unexpected DiskIsAttached call!")
|
||||
@ -305,12 +307,12 @@ func (testcase *testcase) DiskIsAttached(diskName, instanceID string) (bool, err
|
||||
return false, errors.New("Unexpected DiskIsAttached call: wrong diskName")
|
||||
}
|
||||
|
||||
if expected.instanceID != instanceID {
|
||||
testcase.t.Errorf("Unexpected DiskIsAttached call: expected instanceID %s, got %s", expected.instanceID, instanceID)
|
||||
return false, errors.New("Unexpected DiskIsAttached call: wrong instanceID")
|
||||
if expected.nodeName != nodeName {
|
||||
testcase.t.Errorf("Unexpected DiskIsAttached call: expected nodeName %s, got %s", expected.nodeName, nodeName)
|
||||
return false, errors.New("Unexpected DiskIsAttached call: wrong nodeName")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("DiskIsAttached call: %s, %s, returning %v, %v", diskName, instanceID, expected.isAttached, expected.ret)
|
||||
glog.V(4).Infof("DiskIsAttached call: %s, %s, returning %v, %v", diskName, nodeName, expected.isAttached, expected.ret)
|
||||
|
||||
return expected.isAttached, expected.ret
|
||||
}
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/exec"
|
||||
"k8s.io/kubernetes/pkg/util/keymutex"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
@ -65,23 +66,23 @@ func (plugin *azureDataDiskPlugin) NewAttacher() (volume.Attacher, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Attach attaches a volume.Spec to a Azure VM referenced by hostname, returning the disk's LUN
|
||||
func (attacher *azureDiskAttacher) Attach(spec *volume.Spec, hostName string) (string, error) {
|
||||
// Attach attaches a volume.Spec to a Azure VM referenced by NodeName, returning the disk's LUN
|
||||
func (attacher *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
|
||||
volumeSource, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
glog.Warningf("failed to get azure disk spec")
|
||||
return "", err
|
||||
}
|
||||
instanceid, err := attacher.azureProvider.InstanceID(hostName)
|
||||
instanceid, err := attacher.azureProvider.InstanceID(nodeName)
|
||||
if err != nil {
|
||||
glog.Warningf("failed to get azure instance id")
|
||||
return "", fmt.Errorf("failed to get azure instance id for host %q", hostName)
|
||||
return "", fmt.Errorf("failed to get azure instance id for node %q", nodeName)
|
||||
}
|
||||
if ind := strings.LastIndex(instanceid, "/"); ind >= 0 {
|
||||
instanceid = instanceid[(ind + 1):]
|
||||
}
|
||||
|
||||
lun, err := attacher.azureProvider.GetDiskLun(volumeSource.DiskName, volumeSource.DataDiskURI, instanceid)
|
||||
lun, err := attacher.azureProvider.GetDiskLun(volumeSource.DiskName, volumeSource.DataDiskURI, nodeName)
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
// Log error and continue with attach
|
||||
glog.Warningf(
|
||||
@ -96,15 +97,15 @@ func (attacher *azureDiskAttacher) Attach(spec *volume.Spec, hostName string) (s
|
||||
getLunMutex.LockKey(instanceid)
|
||||
defer getLunMutex.UnlockKey(instanceid)
|
||||
|
||||
lun, err = attacher.azureProvider.GetNextDiskLun(instanceid)
|
||||
lun, err = attacher.azureProvider.GetNextDiskLun(nodeName)
|
||||
if err != nil {
|
||||
glog.Warningf("no LUN available for instance %q", instanceid)
|
||||
glog.Warningf("no LUN available for instance %q", nodeName)
|
||||
return "", fmt.Errorf("all LUNs are used, cannot attach volume %q to instance %q", volumeSource.DiskName, instanceid)
|
||||
}
|
||||
|
||||
err = attacher.azureProvider.AttachDisk(volumeSource.DiskName, volumeSource.DataDiskURI, instanceid, lun, compute.CachingTypes(*volumeSource.CachingMode))
|
||||
err = attacher.azureProvider.AttachDisk(volumeSource.DiskName, volumeSource.DataDiskURI, nodeName, lun, compute.CachingTypes(*volumeSource.CachingMode))
|
||||
if err == nil {
|
||||
glog.V(4).Infof("Attach operation successful: volume %q attached to node %q.", volumeSource.DataDiskURI, instanceid)
|
||||
glog.V(4).Infof("Attach operation successful: volume %q attached to node %q.", volumeSource.DataDiskURI, nodeName)
|
||||
} else {
|
||||
glog.V(2).Infof("Attach volume %q to instance %q failed with %v", volumeSource.DataDiskURI, instanceid, err)
|
||||
return "", fmt.Errorf("Attach volume %q to instance %q failed with %v", volumeSource.DiskName, instanceid, err)
|
||||
@ -213,21 +214,21 @@ func (plugin *azureDataDiskPlugin) NewDetacher() (volume.Detacher, error) {
|
||||
}
|
||||
|
||||
// Detach detaches disk from Azure VM.
|
||||
func (detacher *azureDiskDetacher) Detach(diskName string, hostName string) error {
|
||||
func (detacher *azureDiskDetacher) Detach(diskName string, nodeName types.NodeName) error {
|
||||
if diskName == "" {
|
||||
return fmt.Errorf("invalid disk to detach: %q", diskName)
|
||||
}
|
||||
instanceid, err := detacher.azureProvider.InstanceID(hostName)
|
||||
instanceid, err := detacher.azureProvider.InstanceID(nodeName)
|
||||
if err != nil {
|
||||
glog.Warningf("no instance id for host %q, skip detaching", hostName)
|
||||
glog.Warningf("no instance id for node %q, skip detaching", nodeName)
|
||||
return nil
|
||||
}
|
||||
if ind := strings.LastIndex(instanceid, "/"); ind >= 0 {
|
||||
instanceid = instanceid[(ind + 1):]
|
||||
}
|
||||
|
||||
glog.V(4).Infof("detach %v from host %q", diskName, instanceid)
|
||||
err = detacher.azureProvider.DetachDiskByName(diskName, "" /* diskURI */, instanceid)
|
||||
glog.V(4).Infof("detach %v from node %q", diskName, nodeName)
|
||||
err = detacher.azureProvider.DetachDiskByName(diskName, "" /* diskURI */, nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to detach azure disk %q, err %v", diskName, err)
|
||||
}
|
||||
|
@ -50,15 +50,15 @@ type azureDataDiskPlugin struct {
|
||||
// azure cloud provider should implement it
|
||||
type azureCloudProvider interface {
|
||||
// Attaches the disk to the host machine.
|
||||
AttachDisk(diskName, diskUri, vmName string, lun int32, cachingMode compute.CachingTypes) error
|
||||
AttachDisk(diskName, diskUri string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error
|
||||
// Detaches the disk, identified by disk name or uri, from the host machine.
|
||||
DetachDiskByName(diskName, diskUri, vmName string) error
|
||||
DetachDiskByName(diskName, diskUri string, nodeName types.NodeName) error
|
||||
// Get the LUN number of the disk that is attached to the host
|
||||
GetDiskLun(diskName, diskUri, vmName string) (int32, error)
|
||||
GetDiskLun(diskName, diskUri string, nodeName types.NodeName) (int32, error)
|
||||
// Get the next available LUN number to attach a new VHD
|
||||
GetNextDiskLun(vmName string) (int32, error)
|
||||
GetNextDiskLun(nodeName types.NodeName) (int32, error)
|
||||
// InstanceID returns the cloud provider ID of the specified instance.
|
||||
InstanceID(name string) (string, error)
|
||||
InstanceID(nodeName types.NodeName) (string, error)
|
||||
}
|
||||
|
||||
var _ volume.VolumePlugin = &azureDataDiskPlugin{}
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/exec"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
@ -59,7 +60,7 @@ func (plugin *cinderPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string
|
||||
return mount.GetMountRefs(mounter, deviceMountPath)
|
||||
}
|
||||
|
||||
func (attacher *cinderDiskAttacher) Attach(spec *volume.Spec, hostName string) (string, error) {
|
||||
func (attacher *cinderDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@ -71,7 +72,7 @@ func (attacher *cinderDiskAttacher) Attach(spec *volume.Spec, hostName string) (
|
||||
if !res {
|
||||
return "", fmt.Errorf("failed to list openstack instances")
|
||||
}
|
||||
instanceid, err := instances.InstanceID(hostName)
|
||||
instanceid, err := instances.InstanceID(nodeName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -208,13 +209,13 @@ func (plugin *cinderPlugin) NewDetacher() (volume.Detacher, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (detacher *cinderDiskDetacher) Detach(deviceMountPath string, hostName string) error {
|
||||
func (detacher *cinderDiskDetacher) Detach(deviceMountPath string, nodeName types.NodeName) error {
|
||||
volumeID := path.Base(deviceMountPath)
|
||||
instances, res := detacher.cinderProvider.Instances()
|
||||
if !res {
|
||||
return fmt.Errorf("failed to list openstack instances")
|
||||
}
|
||||
instanceid, err := instances.InstanceID(hostName)
|
||||
instanceid, err := instances.InstanceID(nodeName)
|
||||
if ind := strings.LastIndex(instanceid, "/"); ind >= 0 {
|
||||
instanceid = instanceid[(ind + 1):]
|
||||
}
|
||||
@ -224,12 +225,12 @@ func (detacher *cinderDiskDetacher) Detach(deviceMountPath string, hostName stri
|
||||
// Log error and continue with detach
|
||||
glog.Errorf(
|
||||
"Error checking if volume (%q) is already attached to current node (%q). Will continue and try detach anyway. err=%v",
|
||||
volumeID, hostName, err)
|
||||
volumeID, nodeName, err)
|
||||
}
|
||||
|
||||
if err == nil && !attached {
|
||||
// Volume is already detached from node.
|
||||
glog.Infof("detach operation was successful. volume %q is already detached from node %q.", volumeID, hostName)
|
||||
glog.Infof("detach operation was successful. volume %q is already detached from node %q.", volumeID, nodeName)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
)
|
||||
|
||||
func TestGetDeviceName_Volume(t *testing.T) {
|
||||
@ -77,6 +78,7 @@ type testcase struct {
|
||||
func TestAttachDetach(t *testing.T) {
|
||||
diskName := "disk"
|
||||
instanceID := "instance"
|
||||
nodeName := types.NodeName(instanceID)
|
||||
readOnly := false
|
||||
spec := createVolSpec(diskName, readOnly)
|
||||
attachError := errors.New("Fake attach error")
|
||||
@ -93,7 +95,7 @@ func TestAttachDetach(t *testing.T) {
|
||||
diskPath: diskPathCall{diskName, instanceID, "/dev/sda", nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
return attacher.Attach(spec, instanceID)
|
||||
return attacher.Attach(spec, nodeName)
|
||||
},
|
||||
expectedDevice: "/dev/sda",
|
||||
},
|
||||
@ -106,7 +108,7 @@ func TestAttachDetach(t *testing.T) {
|
||||
diskPath: diskPathCall{diskName, instanceID, "/dev/sda", nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
return attacher.Attach(spec, instanceID)
|
||||
return attacher.Attach(spec, nodeName)
|
||||
},
|
||||
expectedDevice: "/dev/sda",
|
||||
},
|
||||
@ -120,7 +122,7 @@ func TestAttachDetach(t *testing.T) {
|
||||
diskPath: diskPathCall{diskName, instanceID, "/dev/sda", nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
return attacher.Attach(spec, instanceID)
|
||||
return attacher.Attach(spec, nodeName)
|
||||
},
|
||||
expectedDevice: "/dev/sda",
|
||||
},
|
||||
@ -133,7 +135,7 @@ func TestAttachDetach(t *testing.T) {
|
||||
attach: attachCall{diskName, instanceID, "/dev/sda", attachError},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
return attacher.Attach(spec, instanceID)
|
||||
return attacher.Attach(spec, nodeName)
|
||||
},
|
||||
expectedError: attachError,
|
||||
},
|
||||
@ -147,7 +149,7 @@ func TestAttachDetach(t *testing.T) {
|
||||
diskPath: diskPathCall{diskName, instanceID, "", diskPathError},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
return attacher.Attach(spec, instanceID)
|
||||
return attacher.Attach(spec, nodeName)
|
||||
},
|
||||
expectedError: diskPathError,
|
||||
},
|
||||
@ -160,7 +162,7 @@ func TestAttachDetach(t *testing.T) {
|
||||
detach: detachCall{diskName, instanceID, nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
detacher := newDetacher(testcase)
|
||||
return "", detacher.Detach(diskName, instanceID)
|
||||
return "", detacher.Detach(diskName, nodeName)
|
||||
},
|
||||
},
|
||||
|
||||
@ -171,7 +173,7 @@ func TestAttachDetach(t *testing.T) {
|
||||
diskIsAttached: diskIsAttachedCall{diskName, instanceID, false, nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
detacher := newDetacher(testcase)
|
||||
return "", detacher.Detach(diskName, instanceID)
|
||||
return "", detacher.Detach(diskName, nodeName)
|
||||
},
|
||||
},
|
||||
|
||||
@ -183,7 +185,7 @@ func TestAttachDetach(t *testing.T) {
|
||||
detach: detachCall{diskName, instanceID, nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
detacher := newDetacher(testcase)
|
||||
return "", detacher.Detach(diskName, instanceID)
|
||||
return "", detacher.Detach(diskName, nodeName)
|
||||
},
|
||||
},
|
||||
|
||||
@ -195,7 +197,7 @@ func TestAttachDetach(t *testing.T) {
|
||||
detach: detachCall{diskName, instanceID, detachError},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
detacher := newDetacher(testcase)
|
||||
return "", detacher.Detach(diskName, instanceID)
|
||||
return "", detacher.Detach(diskName, nodeName)
|
||||
},
|
||||
expectedError: detachError,
|
||||
},
|
||||
@ -420,30 +422,30 @@ type instances struct {
|
||||
instanceID string
|
||||
}
|
||||
|
||||
func (instances *instances) NodeAddresses(name string) ([]api.NodeAddress, error) {
|
||||
func (instances *instances) NodeAddresses(name types.NodeName) ([]api.NodeAddress, error) {
|
||||
return []api.NodeAddress{}, errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (instances *instances) ExternalID(name string) (string, error) {
|
||||
func (instances *instances) ExternalID(name types.NodeName) (string, error) {
|
||||
return "", errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (instances *instances) InstanceID(name string) (string, error) {
|
||||
func (instances *instances) InstanceID(name types.NodeName) (string, error) {
|
||||
return instances.instanceID, nil
|
||||
}
|
||||
|
||||
func (instances *instances) InstanceType(name string) (string, error) {
|
||||
func (instances *instances) InstanceType(name types.NodeName) (string, error) {
|
||||
return "", errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (instances *instances) List(filter string) ([]string, error) {
|
||||
return []string{}, errors.New("Not implemented")
|
||||
func (instances *instances) List(filter string) ([]types.NodeName, error) {
|
||||
return []types.NodeName{}, errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (instances *instances) AddSSHKeyToAllInstances(user string, keyData []byte) error {
|
||||
return errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (instances *instances) CurrentNodeName(hostname string) (string, error) {
|
||||
func (instances *instances) CurrentNodeName(hostname string) (types.NodeName, error) {
|
||||
return "", errors.New("Not implemented")
|
||||
}
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/exec"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
@ -60,13 +61,13 @@ func (plugin *gcePersistentDiskPlugin) GetDeviceMountRefs(deviceMountPath string
|
||||
}
|
||||
|
||||
// Attach checks with the GCE cloud provider if the specified volume is already
|
||||
// attached to the specified node. If the volume is attached, it succeeds
|
||||
// (returns nil). If it is not, Attach issues a call to the GCE cloud provider
|
||||
// to attach it.
|
||||
// Callers are responsible for retryinging on failure.
|
||||
// attached to the node with the specified Name.
|
||||
// If the volume is attached, it succeeds (returns nil).
|
||||
// If it is not, Attach issues a call to the GCE cloud provider to attach it.
|
||||
// Callers are responsible for retrying on failure.
|
||||
// Callers are responsible for thread safety between concurrent attach and
|
||||
// detach operations.
|
||||
func (attacher *gcePersistentDiskAttacher) Attach(spec *volume.Spec, hostName string) (string, error) {
|
||||
func (attacher *gcePersistentDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
|
||||
volumeSource, readOnly, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@ -74,20 +75,20 @@ func (attacher *gcePersistentDiskAttacher) Attach(spec *volume.Spec, hostName st
|
||||
|
||||
pdName := volumeSource.PDName
|
||||
|
||||
attached, err := attacher.gceDisks.DiskIsAttached(pdName, hostName)
|
||||
attached, err := attacher.gceDisks.DiskIsAttached(pdName, nodeName)
|
||||
if err != nil {
|
||||
// Log error and continue with attach
|
||||
glog.Errorf(
|
||||
"Error checking if PD (%q) is already attached to current node (%q). Will continue and try attach anyway. err=%v",
|
||||
pdName, hostName, err)
|
||||
pdName, nodeName, err)
|
||||
}
|
||||
|
||||
if err == nil && attached {
|
||||
// Volume is already attached to node.
|
||||
glog.Infof("Attach operation is successful. PD %q is already attached to node %q.", pdName, hostName)
|
||||
glog.Infof("Attach operation is successful. PD %q is already attached to node %q.", pdName, nodeName)
|
||||
} else {
|
||||
if err := attacher.gceDisks.AttachDisk(pdName, hostName, readOnly); err != nil {
|
||||
glog.Errorf("Error attaching PD %q to node %q: %+v", pdName, hostName, err)
|
||||
if err := attacher.gceDisks.AttachDisk(pdName, nodeName, readOnly); err != nil {
|
||||
glog.Errorf("Error attaching PD %q to node %q: %+v", pdName, nodeName, err)
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
@ -210,25 +211,25 @@ func (plugin *gcePersistentDiskPlugin) NewDetacher() (volume.Detacher, error) {
|
||||
// Callers are responsible for retryinging on failure.
|
||||
// Callers are responsible for thread safety between concurrent attach and detach
|
||||
// operations.
|
||||
func (detacher *gcePersistentDiskDetacher) Detach(deviceMountPath string, hostName string) error {
|
||||
func (detacher *gcePersistentDiskDetacher) Detach(deviceMountPath string, nodeName types.NodeName) error {
|
||||
pdName := path.Base(deviceMountPath)
|
||||
|
||||
attached, err := detacher.gceDisks.DiskIsAttached(pdName, hostName)
|
||||
attached, err := detacher.gceDisks.DiskIsAttached(pdName, nodeName)
|
||||
if err != nil {
|
||||
// Log error and continue with detach
|
||||
glog.Errorf(
|
||||
"Error checking if PD (%q) is already attached to current node (%q). Will continue and try detach anyway. err=%v",
|
||||
pdName, hostName, err)
|
||||
pdName, nodeName, err)
|
||||
}
|
||||
|
||||
if err == nil && !attached {
|
||||
// Volume is not attached to node. Success!
|
||||
glog.Infof("Detach operation is successful. PD %q was not attached to node %q.", pdName, hostName)
|
||||
glog.Infof("Detach operation is successful. PD %q was not attached to node %q.", pdName, nodeName)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err = detacher.gceDisks.DetachDisk(pdName, hostName); err != nil {
|
||||
glog.Errorf("Error detaching PD %q from node %q: %v", pdName, hostName, err)
|
||||
if err = detacher.gceDisks.DetachDisk(pdName, nodeName); err != nil {
|
||||
glog.Errorf("Error detaching PD %q from node %q: %v", pdName, nodeName, err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
)
|
||||
|
||||
func TestGetDeviceName_Volume(t *testing.T) {
|
||||
@ -73,7 +74,7 @@ type testcase struct {
|
||||
|
||||
func TestAttachDetach(t *testing.T) {
|
||||
diskName := "disk"
|
||||
instanceID := "instance"
|
||||
nodeName := types.NodeName("instance")
|
||||
readOnly := false
|
||||
spec := createVolSpec(diskName, readOnly)
|
||||
attachError := errors.New("Fake attach error")
|
||||
@ -83,11 +84,11 @@ func TestAttachDetach(t *testing.T) {
|
||||
// Successful Attach call
|
||||
{
|
||||
name: "Attach_Positive",
|
||||
diskIsAttached: diskIsAttachedCall{diskName, instanceID, false, nil},
|
||||
attach: attachCall{diskName, instanceID, readOnly, nil},
|
||||
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, nil},
|
||||
attach: attachCall{diskName, nodeName, readOnly, nil},
|
||||
test: func(testcase *testcase) error {
|
||||
attacher := newAttacher(testcase)
|
||||
devicePath, err := attacher.Attach(spec, instanceID)
|
||||
devicePath, err := attacher.Attach(spec, nodeName)
|
||||
if devicePath != "/dev/disk/by-id/google-disk" {
|
||||
return fmt.Errorf("devicePath incorrect. Expected<\"/dev/disk/by-id/google-disk\"> Actual: <%q>", devicePath)
|
||||
}
|
||||
@ -98,10 +99,10 @@ func TestAttachDetach(t *testing.T) {
|
||||
// Disk is already attached
|
||||
{
|
||||
name: "Attach_Positive_AlreadyAttached",
|
||||
diskIsAttached: diskIsAttachedCall{diskName, instanceID, true, nil},
|
||||
diskIsAttached: diskIsAttachedCall{diskName, nodeName, true, nil},
|
||||
test: func(testcase *testcase) error {
|
||||
attacher := newAttacher(testcase)
|
||||
devicePath, err := attacher.Attach(spec, instanceID)
|
||||
devicePath, err := attacher.Attach(spec, nodeName)
|
||||
if devicePath != "/dev/disk/by-id/google-disk" {
|
||||
return fmt.Errorf("devicePath incorrect. Expected<\"/dev/disk/by-id/google-disk\"> Actual: <%q>", devicePath)
|
||||
}
|
||||
@ -112,11 +113,11 @@ func TestAttachDetach(t *testing.T) {
|
||||
// DiskIsAttached fails and Attach succeeds
|
||||
{
|
||||
name: "Attach_Positive_CheckFails",
|
||||
diskIsAttached: diskIsAttachedCall{diskName, instanceID, false, diskCheckError},
|
||||
attach: attachCall{diskName, instanceID, readOnly, nil},
|
||||
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, diskCheckError},
|
||||
attach: attachCall{diskName, nodeName, readOnly, nil},
|
||||
test: func(testcase *testcase) error {
|
||||
attacher := newAttacher(testcase)
|
||||
devicePath, err := attacher.Attach(spec, instanceID)
|
||||
devicePath, err := attacher.Attach(spec, nodeName)
|
||||
if devicePath != "/dev/disk/by-id/google-disk" {
|
||||
return fmt.Errorf("devicePath incorrect. Expected<\"/dev/disk/by-id/google-disk\"> Actual: <%q>", devicePath)
|
||||
}
|
||||
@ -127,11 +128,11 @@ func TestAttachDetach(t *testing.T) {
|
||||
// Attach call fails
|
||||
{
|
||||
name: "Attach_Negative",
|
||||
diskIsAttached: diskIsAttachedCall{diskName, instanceID, false, diskCheckError},
|
||||
attach: attachCall{diskName, instanceID, readOnly, attachError},
|
||||
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, diskCheckError},
|
||||
attach: attachCall{diskName, nodeName, readOnly, attachError},
|
||||
test: func(testcase *testcase) error {
|
||||
attacher := newAttacher(testcase)
|
||||
devicePath, err := attacher.Attach(spec, instanceID)
|
||||
devicePath, err := attacher.Attach(spec, nodeName)
|
||||
if devicePath != "" {
|
||||
return fmt.Errorf("devicePath incorrect. Expected<\"\"> Actual: <%q>", devicePath)
|
||||
}
|
||||
@ -143,43 +144,43 @@ func TestAttachDetach(t *testing.T) {
|
||||
// Detach succeeds
|
||||
{
|
||||
name: "Detach_Positive",
|
||||
diskIsAttached: diskIsAttachedCall{diskName, instanceID, true, nil},
|
||||
detach: detachCall{diskName, instanceID, nil},
|
||||
diskIsAttached: diskIsAttachedCall{diskName, nodeName, true, nil},
|
||||
detach: detachCall{diskName, nodeName, nil},
|
||||
test: func(testcase *testcase) error {
|
||||
detacher := newDetacher(testcase)
|
||||
return detacher.Detach(diskName, instanceID)
|
||||
return detacher.Detach(diskName, nodeName)
|
||||
},
|
||||
},
|
||||
|
||||
// Disk is already detached
|
||||
{
|
||||
name: "Detach_Positive_AlreadyDetached",
|
||||
diskIsAttached: diskIsAttachedCall{diskName, instanceID, false, nil},
|
||||
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, nil},
|
||||
test: func(testcase *testcase) error {
|
||||
detacher := newDetacher(testcase)
|
||||
return detacher.Detach(diskName, instanceID)
|
||||
return detacher.Detach(diskName, nodeName)
|
||||
},
|
||||
},
|
||||
|
||||
// Detach succeeds when DiskIsAttached fails
|
||||
{
|
||||
name: "Detach_Positive_CheckFails",
|
||||
diskIsAttached: diskIsAttachedCall{diskName, instanceID, false, diskCheckError},
|
||||
detach: detachCall{diskName, instanceID, nil},
|
||||
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, diskCheckError},
|
||||
detach: detachCall{diskName, nodeName, nil},
|
||||
test: func(testcase *testcase) error {
|
||||
detacher := newDetacher(testcase)
|
||||
return detacher.Detach(diskName, instanceID)
|
||||
return detacher.Detach(diskName, nodeName)
|
||||
},
|
||||
},
|
||||
|
||||
// Detach fails
|
||||
{
|
||||
name: "Detach_Negative",
|
||||
diskIsAttached: diskIsAttachedCall{diskName, instanceID, false, diskCheckError},
|
||||
detach: detachCall{diskName, instanceID, detachError},
|
||||
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, diskCheckError},
|
||||
detach: detachCall{diskName, nodeName, detachError},
|
||||
test: func(testcase *testcase) error {
|
||||
detacher := newDetacher(testcase)
|
||||
return detacher.Detach(diskName, instanceID)
|
||||
return detacher.Detach(diskName, nodeName)
|
||||
},
|
||||
expectedReturn: detachError,
|
||||
},
|
||||
@ -253,28 +254,29 @@ func createPVSpec(name string, readOnly bool) *volume.Spec {
|
||||
// Fake GCE implementation
|
||||
|
||||
type attachCall struct {
|
||||
diskName string
|
||||
instanceID string
|
||||
readOnly bool
|
||||
ret error
|
||||
diskName string
|
||||
nodeName types.NodeName
|
||||
readOnly bool
|
||||
ret error
|
||||
}
|
||||
|
||||
type detachCall struct {
|
||||
devicePath string
|
||||
instanceID string
|
||||
nodeName types.NodeName
|
||||
ret error
|
||||
}
|
||||
|
||||
type diskIsAttachedCall struct {
|
||||
diskName, instanceID string
|
||||
isAttached bool
|
||||
ret error
|
||||
diskName string
|
||||
nodeName types.NodeName
|
||||
isAttached bool
|
||||
ret error
|
||||
}
|
||||
|
||||
func (testcase *testcase) AttachDisk(diskName, instanceID string, readOnly bool) error {
|
||||
func (testcase *testcase) AttachDisk(diskName string, nodeName types.NodeName, readOnly bool) error {
|
||||
expected := &testcase.attach
|
||||
|
||||
if expected.diskName == "" && expected.instanceID == "" {
|
||||
if expected.diskName == "" && expected.nodeName == "" {
|
||||
// testcase.attach looks uninitialized, test did not expect to call
|
||||
// AttachDisk
|
||||
testcase.t.Errorf("Unexpected AttachDisk call!")
|
||||
@ -286,9 +288,9 @@ func (testcase *testcase) AttachDisk(diskName, instanceID string, readOnly bool)
|
||||
return errors.New("Unexpected AttachDisk call: wrong diskName")
|
||||
}
|
||||
|
||||
if expected.instanceID != instanceID {
|
||||
testcase.t.Errorf("Unexpected AttachDisk call: expected instanceID %s, got %s", expected.instanceID, instanceID)
|
||||
return errors.New("Unexpected AttachDisk call: wrong instanceID")
|
||||
if expected.nodeName != nodeName {
|
||||
testcase.t.Errorf("Unexpected AttachDisk call: expected nodeName %s, got %s", expected.nodeName, nodeName)
|
||||
return errors.New("Unexpected AttachDisk call: wrong nodeName")
|
||||
}
|
||||
|
||||
if expected.readOnly != readOnly {
|
||||
@ -296,15 +298,15 @@ func (testcase *testcase) AttachDisk(diskName, instanceID string, readOnly bool)
|
||||
return errors.New("Unexpected AttachDisk call: wrong readOnly")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("AttachDisk call: %s, %s, %v, returning %v", diskName, instanceID, readOnly, expected.ret)
|
||||
glog.V(4).Infof("AttachDisk call: %s, %s, %v, returning %v", diskName, nodeName, readOnly, expected.ret)
|
||||
|
||||
return expected.ret
|
||||
}
|
||||
|
||||
func (testcase *testcase) DetachDisk(devicePath, instanceID string) error {
|
||||
func (testcase *testcase) DetachDisk(devicePath string, nodeName types.NodeName) error {
|
||||
expected := &testcase.detach
|
||||
|
||||
if expected.devicePath == "" && expected.instanceID == "" {
|
||||
if expected.devicePath == "" && expected.nodeName == "" {
|
||||
// testcase.detach looks uninitialized, test did not expect to call
|
||||
// DetachDisk
|
||||
testcase.t.Errorf("Unexpected DetachDisk call!")
|
||||
@ -316,20 +318,20 @@ func (testcase *testcase) DetachDisk(devicePath, instanceID string) error {
|
||||
return errors.New("Unexpected DetachDisk call: wrong diskName")
|
||||
}
|
||||
|
||||
if expected.instanceID != instanceID {
|
||||
testcase.t.Errorf("Unexpected DetachDisk call: expected instanceID %s, got %s", expected.instanceID, instanceID)
|
||||
return errors.New("Unexpected DetachDisk call: wrong instanceID")
|
||||
if expected.nodeName != nodeName {
|
||||
testcase.t.Errorf("Unexpected DetachDisk call: expected nodeName %s, got %s", expected.nodeName, nodeName)
|
||||
return errors.New("Unexpected DetachDisk call: wrong nodeName")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("DetachDisk call: %s, %s, returning %v", devicePath, instanceID, expected.ret)
|
||||
glog.V(4).Infof("DetachDisk call: %s, %s, returning %v", devicePath, nodeName, expected.ret)
|
||||
|
||||
return expected.ret
|
||||
}
|
||||
|
||||
func (testcase *testcase) DiskIsAttached(diskName, instanceID string) (bool, error) {
|
||||
func (testcase *testcase) DiskIsAttached(diskName string, nodeName types.NodeName) (bool, error) {
|
||||
expected := &testcase.diskIsAttached
|
||||
|
||||
if expected.diskName == "" && expected.instanceID == "" {
|
||||
if expected.diskName == "" && expected.nodeName == "" {
|
||||
// testcase.diskIsAttached looks uninitialized, test did not expect to
|
||||
// call DiskIsAttached
|
||||
testcase.t.Errorf("Unexpected DiskIsAttached call!")
|
||||
@ -341,12 +343,12 @@ func (testcase *testcase) DiskIsAttached(diskName, instanceID string) (bool, err
|
||||
return false, errors.New("Unexpected DiskIsAttached call: wrong diskName")
|
||||
}
|
||||
|
||||
if expected.instanceID != instanceID {
|
||||
testcase.t.Errorf("Unexpected DiskIsAttached call: expected instanceID %s, got %s", expected.instanceID, instanceID)
|
||||
return false, errors.New("Unexpected DiskIsAttached call: wrong instanceID")
|
||||
if expected.nodeName != nodeName {
|
||||
testcase.t.Errorf("Unexpected DiskIsAttached call: expected nodeName %s, got %s", expected.nodeName, nodeName)
|
||||
return false, errors.New("Unexpected DiskIsAttached call: wrong nodeName")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("DiskIsAttached call: %s, %s, returning %v, %v", diskName, instanceID, expected.isAttached, expected.ret)
|
||||
glog.V(4).Infof("DiskIsAttached call: %s, %s, returning %v, %v", diskName, nodeName, expected.isAttached, expected.ret)
|
||||
|
||||
return expected.isAttached, expected.ret
|
||||
}
|
||||
|
@ -370,7 +370,7 @@ func (fv *FakeVolume) TearDownAt(dir string) error {
|
||||
return os.RemoveAll(dir)
|
||||
}
|
||||
|
||||
func (fv *FakeVolume) Attach(spec *Spec, hostName string) (string, error) {
|
||||
func (fv *FakeVolume) Attach(spec *Spec, nodeName types.NodeName) (string, error) {
|
||||
fv.Lock()
|
||||
defer fv.Unlock()
|
||||
fv.AttachCallCount++
|
||||
@ -416,7 +416,7 @@ func (fv *FakeVolume) GetMountDeviceCallCount() int {
|
||||
return fv.MountDeviceCallCount
|
||||
}
|
||||
|
||||
func (fv *FakeVolume) Detach(deviceMountPath string, hostName string) error {
|
||||
func (fv *FakeVolume) Detach(deviceMountPath string, nodeName types.NodeName) error {
|
||||
fv.Lock()
|
||||
defer fv.Unlock()
|
||||
fv.DetachCallCount++
|
||||
|
@ -101,7 +101,7 @@ type OperationExecutor interface {
|
||||
// If the volume is not found or there is an error (fetching the node
|
||||
// object, for example) then an error is returned which triggers exponential
|
||||
// back off on retries.
|
||||
VerifyControllerAttachedVolume(volumeToMount VolumeToMount, nodeName string, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error
|
||||
VerifyControllerAttachedVolume(volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error
|
||||
|
||||
// IsOperationPending returns true if an operation for the given volumeName and podName is pending,
|
||||
// otherwise it returns false
|
||||
@ -149,18 +149,18 @@ type ActualStateOfWorldAttacherUpdater interface {
|
||||
// TODO: in the future, we should be able to remove the volumeName
|
||||
// argument to this method -- since it is used only for attachable
|
||||
// volumes. See issue 29695.
|
||||
MarkVolumeAsAttached(volumeName api.UniqueVolumeName, volumeSpec *volume.Spec, nodeName, devicePath string) error
|
||||
MarkVolumeAsAttached(volumeName api.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) error
|
||||
|
||||
// Marks the specified volume as detached from the specified node
|
||||
MarkVolumeAsDetached(volumeName api.UniqueVolumeName, nodeName string)
|
||||
MarkVolumeAsDetached(volumeName api.UniqueVolumeName, nodeName types.NodeName)
|
||||
|
||||
// Marks desire to detach the specified volume (remove the volume from the node's
|
||||
// volumesToReportedAsAttached list)
|
||||
RemoveVolumeFromReportAsAttached(volumeName api.UniqueVolumeName, nodeName string) error
|
||||
RemoveVolumeFromReportAsAttached(volumeName api.UniqueVolumeName, nodeName types.NodeName) error
|
||||
|
||||
// Unmarks the desire to detach for the specified volume (add the volume back to
|
||||
// the node's volumesToReportedAsAttached list)
|
||||
AddVolumeToReportAsAttached(volumeName api.UniqueVolumeName, nodeName string)
|
||||
AddVolumeToReportAsAttached(volumeName api.UniqueVolumeName, nodeName types.NodeName)
|
||||
}
|
||||
|
||||
// VolumeToAttach represents a volume that should be attached to a node.
|
||||
@ -175,7 +175,7 @@ type VolumeToAttach struct {
|
||||
|
||||
// NodeName is the identifier for the node that the volume should be
|
||||
// attached to.
|
||||
NodeName string
|
||||
NodeName types.NodeName
|
||||
|
||||
// scheduledPods is a map containing the set of pods that reference this
|
||||
// volume and are scheduled to the underlying node. The key in the map is
|
||||
@ -234,7 +234,7 @@ type AttachedVolume struct {
|
||||
VolumeSpec *volume.Spec
|
||||
|
||||
// NodeName is the identifier for the node that the volume is attached to.
|
||||
NodeName string
|
||||
NodeName types.NodeName
|
||||
|
||||
// PluginIsAttachable indicates that the plugin for this volume implements
|
||||
// the volume.Attacher interface
|
||||
@ -453,7 +453,7 @@ func (oe *operationExecutor) UnmountDevice(
|
||||
|
||||
func (oe *operationExecutor) VerifyControllerAttachedVolume(
|
||||
volumeToMount VolumeToMount,
|
||||
nodeName string,
|
||||
nodeName types.NodeName,
|
||||
actualStateOfWorld ActualStateOfWorldAttacherUpdater) error {
|
||||
verifyControllerAttachedVolumeFunc, err :=
|
||||
oe.generateVerifyControllerAttachedVolumeFunc(volumeToMount, nodeName, actualStateOfWorld)
|
||||
@ -605,7 +605,7 @@ func (oe *operationExecutor) generateDetachVolumeFunc(
|
||||
func (oe *operationExecutor) verifyVolumeIsSafeToDetach(
|
||||
volumeToDetach AttachedVolume) error {
|
||||
// Fetch current node object
|
||||
node, fetchErr := oe.kubeClient.Core().Nodes().Get(volumeToDetach.NodeName)
|
||||
node, fetchErr := oe.kubeClient.Core().Nodes().Get(string(volumeToDetach.NodeName))
|
||||
if fetchErr != nil {
|
||||
if errors.IsNotFound(fetchErr) {
|
||||
glog.Warningf("Node %q not found on API server. DetachVolume will skip safe to detach check.",
|
||||
@ -1001,7 +1001,7 @@ func (oe *operationExecutor) generateUnmountDeviceFunc(
|
||||
|
||||
func (oe *operationExecutor) generateVerifyControllerAttachedVolumeFunc(
|
||||
volumeToMount VolumeToMount,
|
||||
nodeName string,
|
||||
nodeName types.NodeName,
|
||||
actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, error) {
|
||||
return func() error {
|
||||
if !volumeToMount.PluginIsAttachable {
|
||||
@ -1040,7 +1040,7 @@ func (oe *operationExecutor) generateVerifyControllerAttachedVolumeFunc(
|
||||
}
|
||||
|
||||
// Fetch current node object
|
||||
node, fetchErr := oe.kubeClient.Core().Nodes().Get(nodeName)
|
||||
node, fetchErr := oe.kubeClient.Core().Nodes().Get(string(nodeName))
|
||||
if fetchErr != nil {
|
||||
// On failure, return error. Caller will log and retry.
|
||||
return fmt.Errorf(
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
)
|
||||
|
||||
// Volume represents a directory used by pods or hosts on a node. All method
|
||||
@ -140,10 +141,10 @@ type Deleter interface {
|
||||
|
||||
// Attacher can attach a volume to a node.
|
||||
type Attacher interface {
|
||||
// Attaches the volume specified by the given spec to the given host.
|
||||
// Attaches the volume specified by the given spec to the node with the given Name.
|
||||
// On success, returns the device path where the device was attached on the
|
||||
// node.
|
||||
Attach(spec *Spec, hostName string) (string, error)
|
||||
Attach(spec *Spec, nodeName types.NodeName) (string, error)
|
||||
|
||||
// WaitForAttach blocks until the device is attached to this
|
||||
// node. If it successfully attaches, the path to the device
|
||||
@ -163,8 +164,8 @@ type Attacher interface {
|
||||
|
||||
// Detacher can detach a volume from a node.
|
||||
type Detacher interface {
|
||||
// Detach the given device from the given host.
|
||||
Detach(deviceName, hostName string) error
|
||||
// Detach the given device from the node with the given Name.
|
||||
Detach(deviceName string, nodeName types.NodeName) error
|
||||
|
||||
// WaitForDetach blocks until the device is detached from this
|
||||
// node. If the device does not detach within the given timeout
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/exec"
|
||||
"k8s.io/kubernetes/pkg/util/keymutex"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
@ -60,21 +61,21 @@ func (plugin *vsphereVolumePlugin) NewAttacher() (volume.Attacher, error) {
|
||||
// Callers are responsible for retryinging on failure.
|
||||
// Callers are responsible for thread safety between concurrent attach and
|
||||
// detach operations.
|
||||
func (attacher *vsphereVMDKAttacher) Attach(spec *volume.Spec, hostName string) (string, error) {
|
||||
func (attacher *vsphereVMDKAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("vSphere: Attach disk called for host %s", hostName)
|
||||
glog.V(4).Infof("vSphere: Attach disk called for node %s", nodeName)
|
||||
|
||||
// Keeps concurrent attach operations to same host atomic
|
||||
attachdetachMutex.LockKey(hostName)
|
||||
defer attachdetachMutex.UnlockKey(hostName)
|
||||
attachdetachMutex.LockKey(string(nodeName))
|
||||
defer attachdetachMutex.UnlockKey(string(nodeName))
|
||||
|
||||
// vsphereCloud.AttachDisk checks if disk is already attached to host and
|
||||
// succeeds in that case, so no need to do that separately.
|
||||
_, diskUUID, err := attacher.vsphereVolumes.AttachDisk(volumeSource.VolumePath, hostName)
|
||||
_, diskUUID, err := attacher.vsphereVolumes.AttachDisk(volumeSource.VolumePath, nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Error attaching volume %q: %+v", volumeSource.VolumePath, err)
|
||||
return "", err
|
||||
@ -190,27 +191,27 @@ func (plugin *vsphereVolumePlugin) NewDetacher() (volume.Detacher, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Detach the given device from the given host.
|
||||
func (detacher *vsphereVMDKDetacher) Detach(deviceMountPath string, hostName string) error {
|
||||
// Detach the given device from the given node.
|
||||
func (detacher *vsphereVMDKDetacher) Detach(deviceMountPath string, nodeName types.NodeName) error {
|
||||
|
||||
volPath := getVolPathfromDeviceMountPath(deviceMountPath)
|
||||
attached, err := detacher.vsphereVolumes.DiskIsAttached(volPath, hostName)
|
||||
attached, err := detacher.vsphereVolumes.DiskIsAttached(volPath, nodeName)
|
||||
if err != nil {
|
||||
// Log error and continue with detach
|
||||
glog.Errorf(
|
||||
"Error checking if volume (%q) is already attached to current node (%q). Will continue and try detach anyway. err=%v",
|
||||
volPath, hostName, err)
|
||||
volPath, nodeName, err)
|
||||
}
|
||||
|
||||
if err == nil && !attached {
|
||||
// Volume is already detached from node.
|
||||
glog.Infof("detach operation was successful. volume %q is already detached from node %q.", volPath, hostName)
|
||||
glog.Infof("detach operation was successful. volume %q is already detached from node %q.", volPath, nodeName)
|
||||
return nil
|
||||
}
|
||||
|
||||
attachdetachMutex.LockKey(hostName)
|
||||
defer attachdetachMutex.UnlockKey(hostName)
|
||||
if err := detacher.vsphereVolumes.DetachDisk(volPath, hostName); err != nil {
|
||||
attachdetachMutex.LockKey(string(nodeName))
|
||||
defer attachdetachMutex.UnlockKey(string(nodeName))
|
||||
if err := detacher.vsphereVolumes.DetachDisk(volPath, nodeName); err != nil {
|
||||
glog.Errorf("Error detaching volume %q: %v", volPath, err)
|
||||
return err
|
||||
}
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
)
|
||||
|
||||
func TestGetDeviceName_Volume(t *testing.T) {
|
||||
@ -75,7 +76,7 @@ type testcase struct {
|
||||
func TestAttachDetach(t *testing.T) {
|
||||
uuid := "00000000000000"
|
||||
diskName := "[local] volumes/test"
|
||||
hostName := "host"
|
||||
nodeName := types.NodeName("host")
|
||||
spec := createVolSpec(diskName)
|
||||
attachError := errors.New("Fake attach error")
|
||||
detachError := errors.New("Fake detach error")
|
||||
@ -84,10 +85,10 @@ func TestAttachDetach(t *testing.T) {
|
||||
// Successful Attach call
|
||||
{
|
||||
name: "Attach_Positive",
|
||||
attach: attachCall{diskName, hostName, uuid, nil},
|
||||
attach: attachCall{diskName, nodeName, uuid, nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
return attacher.Attach(spec, hostName)
|
||||
return attacher.Attach(spec, nodeName)
|
||||
},
|
||||
expectedDevice: "/dev/disk/by-id/wwn-0x" + uuid,
|
||||
},
|
||||
@ -95,10 +96,10 @@ func TestAttachDetach(t *testing.T) {
|
||||
// Attach call fails
|
||||
{
|
||||
name: "Attach_Negative",
|
||||
attach: attachCall{diskName, hostName, "", attachError},
|
||||
attach: attachCall{diskName, nodeName, "", attachError},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
return attacher.Attach(spec, hostName)
|
||||
return attacher.Attach(spec, nodeName)
|
||||
},
|
||||
expectedError: attachError,
|
||||
},
|
||||
@ -106,43 +107,43 @@ func TestAttachDetach(t *testing.T) {
|
||||
// Detach succeeds
|
||||
{
|
||||
name: "Detach_Positive",
|
||||
diskIsAttached: diskIsAttachedCall{diskName, hostName, true, nil},
|
||||
detach: detachCall{diskName, hostName, nil},
|
||||
diskIsAttached: diskIsAttachedCall{diskName, nodeName, true, nil},
|
||||
detach: detachCall{diskName, nodeName, nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
detacher := newDetacher(testcase)
|
||||
return "", detacher.Detach(diskName, hostName)
|
||||
return "", detacher.Detach(diskName, nodeName)
|
||||
},
|
||||
},
|
||||
|
||||
// Disk is already detached
|
||||
{
|
||||
name: "Detach_Positive_AlreadyDetached",
|
||||
diskIsAttached: diskIsAttachedCall{diskName, hostName, false, nil},
|
||||
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
detacher := newDetacher(testcase)
|
||||
return "", detacher.Detach(diskName, hostName)
|
||||
return "", detacher.Detach(diskName, nodeName)
|
||||
},
|
||||
},
|
||||
|
||||
// Detach succeeds when DiskIsAttached fails
|
||||
{
|
||||
name: "Detach_Positive_CheckFails",
|
||||
diskIsAttached: diskIsAttachedCall{diskName, hostName, false, diskCheckError},
|
||||
detach: detachCall{diskName, hostName, nil},
|
||||
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, diskCheckError},
|
||||
detach: detachCall{diskName, nodeName, nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
detacher := newDetacher(testcase)
|
||||
return "", detacher.Detach(diskName, hostName)
|
||||
return "", detacher.Detach(diskName, nodeName)
|
||||
},
|
||||
},
|
||||
|
||||
// Detach fails
|
||||
{
|
||||
name: "Detach_Negative",
|
||||
diskIsAttached: diskIsAttachedCall{diskName, hostName, false, diskCheckError},
|
||||
detach: detachCall{diskName, hostName, detachError},
|
||||
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, diskCheckError},
|
||||
detach: detachCall{diskName, nodeName, detachError},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
detacher := newDetacher(testcase)
|
||||
return "", detacher.Detach(diskName, hostName)
|
||||
return "", detacher.Detach(diskName, nodeName)
|
||||
},
|
||||
expectedError: detachError,
|
||||
},
|
||||
@ -214,27 +215,28 @@ func createPVSpec(name string) *volume.Spec {
|
||||
|
||||
type attachCall struct {
|
||||
diskName string
|
||||
hostName string
|
||||
nodeName types.NodeName
|
||||
retDeviceUUID string
|
||||
ret error
|
||||
}
|
||||
|
||||
type detachCall struct {
|
||||
diskName string
|
||||
hostName string
|
||||
nodeName types.NodeName
|
||||
ret error
|
||||
}
|
||||
|
||||
type diskIsAttachedCall struct {
|
||||
diskName, hostName string
|
||||
isAttached bool
|
||||
ret error
|
||||
diskName string
|
||||
nodeName types.NodeName
|
||||
isAttached bool
|
||||
ret error
|
||||
}
|
||||
|
||||
func (testcase *testcase) AttachDisk(diskName string, hostName string) (string, string, error) {
|
||||
func (testcase *testcase) AttachDisk(diskName string, nodeName types.NodeName) (string, string, error) {
|
||||
expected := &testcase.attach
|
||||
|
||||
if expected.diskName == "" && expected.hostName == "" {
|
||||
if expected.diskName == "" && expected.nodeName == "" {
|
||||
// testcase.attach looks uninitialized, test did not expect to call
|
||||
// AttachDisk
|
||||
testcase.t.Errorf("Unexpected AttachDisk call!")
|
||||
@ -246,20 +248,20 @@ func (testcase *testcase) AttachDisk(diskName string, hostName string) (string,
|
||||
return "", "", errors.New("Unexpected AttachDisk call: wrong diskName")
|
||||
}
|
||||
|
||||
if expected.hostName != hostName {
|
||||
testcase.t.Errorf("Unexpected AttachDisk call: expected hostName %s, got %s", expected.hostName, hostName)
|
||||
return "", "", errors.New("Unexpected AttachDisk call: wrong hostName")
|
||||
if expected.nodeName != nodeName {
|
||||
testcase.t.Errorf("Unexpected AttachDisk call: expected nodeName %s, got %s", expected.nodeName, nodeName)
|
||||
return "", "", errors.New("Unexpected AttachDisk call: wrong nodeName")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("AttachDisk call: %s, %s, returning %q, %v", diskName, hostName, expected.retDeviceUUID, expected.ret)
|
||||
glog.V(4).Infof("AttachDisk call: %s, %s, returning %q, %v", diskName, nodeName, expected.retDeviceUUID, expected.ret)
|
||||
|
||||
return "", expected.retDeviceUUID, expected.ret
|
||||
}
|
||||
|
||||
func (testcase *testcase) DetachDisk(diskName string, hostName string) error {
|
||||
func (testcase *testcase) DetachDisk(diskName string, nodeName types.NodeName) error {
|
||||
expected := &testcase.detach
|
||||
|
||||
if expected.diskName == "" && expected.hostName == "" {
|
||||
if expected.diskName == "" && expected.nodeName == "" {
|
||||
// testcase.detach looks uninitialized, test did not expect to call
|
||||
// DetachDisk
|
||||
testcase.t.Errorf("Unexpected DetachDisk call!")
|
||||
@ -271,20 +273,20 @@ func (testcase *testcase) DetachDisk(diskName string, hostName string) error {
|
||||
return errors.New("Unexpected DetachDisk call: wrong diskName")
|
||||
}
|
||||
|
||||
if expected.hostName != hostName {
|
||||
testcase.t.Errorf("Unexpected DetachDisk call: expected hostname %s, got %s", expected.hostName, hostName)
|
||||
return errors.New("Unexpected DetachDisk call: wrong hostname")
|
||||
if expected.nodeName != nodeName {
|
||||
testcase.t.Errorf("Unexpected DetachDisk call: expected nodeName %s, got %s", expected.nodeName, nodeName)
|
||||
return errors.New("Unexpected DetachDisk call: wrong nodeName")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("DetachDisk call: %s, %s, returning %v", diskName, hostName, expected.ret)
|
||||
glog.V(4).Infof("DetachDisk call: %s, %s, returning %v", diskName, nodeName, expected.ret)
|
||||
|
||||
return expected.ret
|
||||
}
|
||||
|
||||
func (testcase *testcase) DiskIsAttached(diskName, hostName string) (bool, error) {
|
||||
func (testcase *testcase) DiskIsAttached(diskName string, nodeName types.NodeName) (bool, error) {
|
||||
expected := &testcase.diskIsAttached
|
||||
|
||||
if expected.diskName == "" && expected.hostName == "" {
|
||||
if expected.diskName == "" && expected.nodeName == "" {
|
||||
// testcase.diskIsAttached looks uninitialized, test did not expect to
|
||||
// call DiskIsAttached
|
||||
testcase.t.Errorf("Unexpected DiskIsAttached call!")
|
||||
@ -296,12 +298,12 @@ func (testcase *testcase) DiskIsAttached(diskName, hostName string) (bool, error
|
||||
return false, errors.New("Unexpected DiskIsAttached call: wrong diskName")
|
||||
}
|
||||
|
||||
if expected.hostName != hostName {
|
||||
testcase.t.Errorf("Unexpected DiskIsAttached call: expected hostName %s, got %s", expected.hostName, hostName)
|
||||
return false, errors.New("Unexpected DiskIsAttached call: wrong hostName")
|
||||
if expected.nodeName != nodeName {
|
||||
testcase.t.Errorf("Unexpected DiskIsAttached call: expected nodeName %s, got %s", expected.nodeName, nodeName)
|
||||
return false, errors.New("Unexpected DiskIsAttached call: wrong nodeName")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("DiskIsAttached call: %s, %s, returning %v, %v", diskName, hostName, expected.isAttached, expected.ret)
|
||||
glog.V(4).Infof("DiskIsAttached call: %s, %s, returning %v, %v", diskName, nodeName, expected.isAttached, expected.ret)
|
||||
|
||||
return expected.isAttached, expected.ret
|
||||
}
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
)
|
||||
|
||||
type mockVolumes struct {
|
||||
@ -33,11 +34,11 @@ type mockVolumes struct {
|
||||
|
||||
var _ aws.Volumes = &mockVolumes{}
|
||||
|
||||
func (v *mockVolumes) AttachDisk(diskName string, instanceName string, readOnly bool) (string, error) {
|
||||
func (v *mockVolumes) AttachDisk(diskName string, nodeName types.NodeName, readOnly bool) (string, error) {
|
||||
return "", fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
func (v *mockVolumes) DetachDisk(diskName string, instanceName string) (string, error) {
|
||||
func (v *mockVolumes) DetachDisk(diskName string, nodeName types.NodeName) (string, error) {
|
||||
return "", fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
@ -57,7 +58,7 @@ func (c *mockVolumes) GetDiskPath(volumeName string) (string, error) {
|
||||
return "", fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
func (c *mockVolumes) DiskIsAttached(volumeName, instanceID string) (bool, error) {
|
||||
func (c *mockVolumes) DiskIsAttached(volumeName string, nodeName types.NodeName) (bool, error) {
|
||||
return false, fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
|
@ -37,6 +37,7 @@ import (
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
@ -54,8 +55,8 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
||||
var (
|
||||
podClient client.PodInterface
|
||||
nodeClient client.NodeInterface
|
||||
host0Name string
|
||||
host1Name string
|
||||
host0Name types.NodeName
|
||||
host1Name types.NodeName
|
||||
)
|
||||
f := framework.NewDefaultFramework("pod-disks")
|
||||
|
||||
@ -68,8 +69,8 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
||||
|
||||
Expect(len(nodes.Items)).To(BeNumerically(">=", 2), "Requires at least 2 nodes")
|
||||
|
||||
host0Name = nodes.Items[0].ObjectMeta.Name
|
||||
host1Name = nodes.Items[1].ObjectMeta.Name
|
||||
host0Name = types.NodeName(nodes.Items[0].ObjectMeta.Name)
|
||||
host1Name = types.NodeName(nodes.Items[1].ObjectMeta.Name)
|
||||
|
||||
mathrand.Seed(time.Now().UTC().UnixNano())
|
||||
})
|
||||
@ -91,7 +92,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
||||
By("cleaning up PD-RW test environment")
|
||||
podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0))
|
||||
podClient.Delete(host1Pod.Name, api.NewDeleteOptions(0))
|
||||
detachAndDeletePDs(diskName, []string{host0Name, host1Name})
|
||||
detachAndDeletePDs(diskName, []types.NodeName{host0Name, host1Name})
|
||||
}()
|
||||
|
||||
By("submitting host0Pod to kubernetes")
|
||||
@ -155,7 +156,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
||||
By("cleaning up PD-RW test environment")
|
||||
podClient.Delete(host0Pod.Name, &api.DeleteOptions{})
|
||||
podClient.Delete(host1Pod.Name, &api.DeleteOptions{})
|
||||
detachAndDeletePDs(diskName, []string{host0Name, host1Name})
|
||||
detachAndDeletePDs(diskName, []types.NodeName{host0Name, host1Name})
|
||||
}()
|
||||
|
||||
By("submitting host0Pod to kubernetes")
|
||||
@ -220,7 +221,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
||||
podClient.Delete(rwPod.Name, api.NewDeleteOptions(0))
|
||||
podClient.Delete(host0ROPod.Name, api.NewDeleteOptions(0))
|
||||
podClient.Delete(host1ROPod.Name, api.NewDeleteOptions(0))
|
||||
detachAndDeletePDs(diskName, []string{host0Name, host1Name})
|
||||
detachAndDeletePDs(diskName, []types.NodeName{host0Name, host1Name})
|
||||
}()
|
||||
|
||||
By("submitting rwPod to ensure PD is formatted")
|
||||
@ -272,7 +273,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
||||
podClient.Delete(rwPod.Name, &api.DeleteOptions{})
|
||||
podClient.Delete(host0ROPod.Name, &api.DeleteOptions{})
|
||||
podClient.Delete(host1ROPod.Name, &api.DeleteOptions{})
|
||||
detachAndDeletePDs(diskName, []string{host0Name, host1Name})
|
||||
detachAndDeletePDs(diskName, []types.NodeName{host0Name, host1Name})
|
||||
}()
|
||||
|
||||
By("submitting rwPod to ensure PD is formatted")
|
||||
@ -322,7 +323,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
||||
if host0Pod != nil {
|
||||
podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0))
|
||||
}
|
||||
detachAndDeletePDs(diskName, []string{host0Name})
|
||||
detachAndDeletePDs(diskName, []types.NodeName{host0Name})
|
||||
}()
|
||||
|
||||
fileAndContentToVerify := make(map[string]string)
|
||||
@ -377,8 +378,8 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
||||
if host0Pod != nil {
|
||||
podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0))
|
||||
}
|
||||
detachAndDeletePDs(disk1Name, []string{host0Name})
|
||||
detachAndDeletePDs(disk2Name, []string{host0Name})
|
||||
detachAndDeletePDs(disk1Name, []types.NodeName{host0Name})
|
||||
detachAndDeletePDs(disk2Name, []types.NodeName{host0Name})
|
||||
}()
|
||||
|
||||
containerName := "mycontainer"
|
||||
@ -535,16 +536,14 @@ func deletePD(pdName string) error {
|
||||
}
|
||||
}
|
||||
|
||||
func detachPD(hostName, pdName string) error {
|
||||
func detachPD(nodeName types.NodeName, pdName string) error {
|
||||
if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
|
||||
instanceName := strings.Split(hostName, ".")[0]
|
||||
|
||||
gceCloud, err := getGCECloud()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = gceCloud.DetachDisk(pdName, instanceName)
|
||||
err = gceCloud.DetachDisk(pdName, nodeName)
|
||||
if err != nil {
|
||||
if gerr, ok := err.(*googleapi.Error); ok && strings.Contains(gerr.Message, "Invalid value for field 'disk'") {
|
||||
// PD already detached, ignore error.
|
||||
@ -575,7 +574,7 @@ func detachPD(hostName, pdName string) error {
|
||||
}
|
||||
}
|
||||
|
||||
func testPDPod(diskNames []string, targetHost string, readOnly bool, numContainers int) *api.Pod {
|
||||
func testPDPod(diskNames []string, targetNode types.NodeName, readOnly bool, numContainers int) *api.Pod {
|
||||
containers := make([]api.Container, numContainers)
|
||||
for i := range containers {
|
||||
containers[i].Name = "mycontainer"
|
||||
@ -608,7 +607,7 @@ func testPDPod(diskNames []string, targetHost string, readOnly bool, numContaine
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: containers,
|
||||
NodeName: targetHost,
|
||||
NodeName: string(targetNode),
|
||||
},
|
||||
}
|
||||
|
||||
@ -644,31 +643,31 @@ func testPDPod(diskNames []string, targetHost string, readOnly bool, numContaine
|
||||
}
|
||||
|
||||
// Waits for specified PD to to detach from specified hostName
|
||||
func waitForPDDetach(diskName, hostName string) error {
|
||||
func waitForPDDetach(diskName string, nodeName types.NodeName) error {
|
||||
if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
|
||||
framework.Logf("Waiting for GCE PD %q to detach from node %q.", diskName, hostName)
|
||||
framework.Logf("Waiting for GCE PD %q to detach from node %q.", diskName, nodeName)
|
||||
gceCloud, err := getGCECloud()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for start := time.Now(); time.Since(start) < gcePDDetachTimeout; time.Sleep(gcePDDetachPollTime) {
|
||||
diskAttached, err := gceCloud.DiskIsAttached(diskName, hostName)
|
||||
diskAttached, err := gceCloud.DiskIsAttached(diskName, nodeName)
|
||||
if err != nil {
|
||||
framework.Logf("Error waiting for PD %q to detach from node %q. 'DiskIsAttached(...)' failed with %v", diskName, hostName, err)
|
||||
framework.Logf("Error waiting for PD %q to detach from node %q. 'DiskIsAttached(...)' failed with %v", diskName, nodeName, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if !diskAttached {
|
||||
// Specified disk does not appear to be attached to specified node
|
||||
framework.Logf("GCE PD %q appears to have successfully detached from %q.", diskName, hostName)
|
||||
framework.Logf("GCE PD %q appears to have successfully detached from %q.", diskName, nodeName)
|
||||
return nil
|
||||
}
|
||||
|
||||
framework.Logf("Waiting for GCE PD %q to detach from %q.", diskName, hostName)
|
||||
framework.Logf("Waiting for GCE PD %q to detach from %q.", diskName, nodeName)
|
||||
}
|
||||
|
||||
return fmt.Errorf("Gave up waiting for GCE PD %q to detach from %q after %v", diskName, hostName, gcePDDetachTimeout)
|
||||
return fmt.Errorf("Gave up waiting for GCE PD %q to detach from %q after %v", diskName, nodeName, gcePDDetachTimeout)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -684,7 +683,7 @@ func getGCECloud() (*gcecloud.GCECloud, error) {
|
||||
return gceCloud, nil
|
||||
}
|
||||
|
||||
func detachAndDeletePDs(diskName string, hosts []string) {
|
||||
func detachAndDeletePDs(diskName string, hosts []types.NodeName) {
|
||||
for _, host := range hosts {
|
||||
framework.Logf("Detaching GCE PD %q from node %q.", diskName, host)
|
||||
detachPD(host, diskName)
|
||||
@ -697,7 +696,8 @@ func detachAndDeletePDs(diskName string, hosts []string) {
|
||||
|
||||
func waitForPDInVolumesInUse(
|
||||
nodeClient client.NodeInterface,
|
||||
diskName, nodeName string,
|
||||
diskName string,
|
||||
nodeName types.NodeName,
|
||||
timeout time.Duration,
|
||||
shouldExist bool) error {
|
||||
logStr := "to contain"
|
||||
@ -708,7 +708,7 @@ func waitForPDInVolumesInUse(
|
||||
"Waiting for node %s's VolumesInUse Status %s PD %q",
|
||||
nodeName, logStr, diskName)
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(nodeStatusPollTime) {
|
||||
nodeObj, err := nodeClient.Get(nodeName)
|
||||
nodeObj, err := nodeClient.Get(string(nodeName))
|
||||
if err != nil || nodeObj == nil {
|
||||
framework.Logf(
|
||||
"Failed to fetch node object %q from API server. err=%v",
|
||||
|
Loading…
Reference in New Issue
Block a user