Merge pull request #49164 from vmware/vSphereCloudProviderCodeRefactoring

Automatic merge from submit-queue

VSphere cloud provider code refactoring

The current PR tracks the vSphere Cloud Provider code refactoring which includes the following changes.
- VCLib Package - A framework used by vSphere cloud provider for managing the vSphere entities. VCLib package mainly does the following:
  - Volume management on datastore (Create/Delete)
  - Volume management on Virtual Machines (Attach/Detach)
  - Storage Policy Management
- vSphere Cloud Provider changes to implement the cloud provider interfaces by calling into VCLib package.
- Modifications to e2e tests to accomodate the latest design changes.

@divyenpatel @rohitjogvmw @luomiao 

```release-note
vSphere cloud provider: vSphere cloud provider code refactoring
```
This commit is contained in:
Kubernetes Submit Queue
2017-08-09 18:18:58 -07:00
committed by GitHub
31 changed files with 2309 additions and 1791 deletions

View File

@@ -12,32 +12,24 @@ go_library(
name = "go_default_library",
srcs = [
"vsphere.go",
"vsphere_metrics.go",
"vsphere_util.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api/v1/helper:go_default_library",
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/vsphere/vclib:go_default_library",
"//pkg/cloudprovider/providers/vsphere/vclib/diskmanagers:go_default_library",
"//pkg/controller:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
"//vendor/github.com/vmware/govmomi:go_default_library",
"//vendor/github.com/vmware/govmomi/find:go_default_library",
"//vendor/github.com/vmware/govmomi/object:go_default_library",
"//vendor/github.com/vmware/govmomi/pbm:go_default_library",
"//vendor/github.com/vmware/govmomi/pbm/types:go_default_library",
"//vendor/github.com/vmware/govmomi/property:go_default_library",
"//vendor/github.com/vmware/govmomi/session:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25/mo:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25/soap:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25/types:go_default_library",
"//vendor/golang.org/x/net/context:go_default_library",
"//vendor/gopkg.in/gcfg.v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
],
)
@@ -48,6 +40,7 @@ go_test(
tags = ["automanaged"],
deps = [
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/vsphere/vclib:go_default_library",
"//vendor/golang.org/x/net/context:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library",
@@ -63,6 +56,9 @@ filegroup(
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
srcs = [
":package-srcs",
"//pkg/cloudprovider/providers/vsphere/vclib:all-srcs",
],
tags = ["automanaged"],
)

View File

@@ -0,0 +1,59 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"connection.go",
"constants.go",
"custom_errors.go",
"datacenter.go",
"datastore.go",
"folder.go",
"pbm.go",
"utils.go",
"virtualmachine.go",
"vmoptions.go",
"volumeoptions.go",
"vsphere_metrics.go",
],
tags = ["automanaged"],
deps = [
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
"//vendor/github.com/vmware/govmomi:go_default_library",
"//vendor/github.com/vmware/govmomi/find:go_default_library",
"//vendor/github.com/vmware/govmomi/object:go_default_library",
"//vendor/github.com/vmware/govmomi/pbm:go_default_library",
"//vendor/github.com/vmware/govmomi/pbm/types:go_default_library",
"//vendor/github.com/vmware/govmomi/property:go_default_library",
"//vendor/github.com/vmware/govmomi/session:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25/mo:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25/soap:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25/types:go_default_library",
"//vendor/golang.org/x/net/context:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/cloudprovider/providers/vsphere/vclib/diskmanagers:all-srcs",
],
tags = ["automanaged"],
)

View File

@@ -0,0 +1,99 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vclib
import (
"fmt"
neturl "net/url"
"sync"
"github.com/golang/glog"
"github.com/vmware/govmomi"
"github.com/vmware/govmomi/session"
"github.com/vmware/govmomi/vim25"
"golang.org/x/net/context"
)
// VSphereConnection contains information for connecting to vCenter
type VSphereConnection struct {
GoVmomiClient *govmomi.Client
Username string
Password string
Hostname string
Port string
Insecure bool
RoundTripperCount uint
}
var (
clientLock sync.Mutex
)
// Connect makes connection to vCenter and sets VSphereConnection.GoVmomiClient.
// If connection.GoVmomiClient is already set, it obtains the existing user session.
// if user session is not valid, connection.GoVmomiClient will be set to the new client.
func (connection *VSphereConnection) Connect(ctx context.Context) error {
var err error
clientLock.Lock()
defer clientLock.Unlock()
if connection.GoVmomiClient == nil {
connection.GoVmomiClient, err = connection.NewClient(ctx)
if err != nil {
glog.Errorf("Failed to create govmomi client. err: %+v", err)
return err
}
return nil
}
m := session.NewManager(connection.GoVmomiClient.Client)
userSession, err := m.UserSession(ctx)
if err != nil {
glog.Errorf("Error while obtaining user session. err: %+v", err)
return err
}
if userSession != nil {
return nil
}
glog.Warningf("Creating new client session since the existing session is not valid or not authenticated")
connection.GoVmomiClient.Logout(ctx)
connection.GoVmomiClient, err = connection.NewClient(ctx)
if err != nil {
glog.Errorf("Failed to create govmomi client. err: %+v", err)
return err
}
return nil
}
// NewClient creates a new govmomi client for the VSphereConnection obj
func (connection *VSphereConnection) NewClient(ctx context.Context) (*govmomi.Client, error) {
url, err := neturl.Parse(fmt.Sprintf("https://%s:%s/sdk", connection.Hostname, connection.Port))
if err != nil {
glog.Errorf("Failed to parse URL: %s. err: %+v", url, err)
return nil, err
}
url.User = neturl.UserPassword(connection.Username, connection.Password)
client, err := govmomi.NewClient(ctx, url, connection.Insecure)
if err != nil {
glog.Errorf("Failed to create new client. err: %+v", err)
return nil, err
}
if connection.RoundTripperCount == 0 {
connection.RoundTripperCount = RoundTripperDefaultCount
}
client.RoundTripper = vim25.Retry(client.RoundTripper, vim25.TemporaryNetworkError(int(connection.RoundTripperCount)))
return client, nil
}

View File

@@ -0,0 +1,52 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vclib
// Volume Constnts
const (
ThinDiskType = "thin"
PreallocatedDiskType = "preallocated"
EagerZeroedThickDiskType = "eagerZeroedThick"
ZeroedThickDiskType = "zeroedThick"
)
// Controller Constants
const (
SCSIControllerLimit = 4
SCSIControllerDeviceLimit = 15
SCSIDeviceSlots = 16
SCSIReservedSlot = 7
SCSIControllerType = "scsi"
LSILogicControllerType = "lsiLogic"
BusLogicControllerType = "busLogic"
LSILogicSASControllerType = "lsiLogic-sas"
PVSCSIControllerType = "pvscsi"
)
// Other Constants
const (
LogLevel = 4
DatastoreProperty = "datastore"
ResourcePoolProperty = "resourcePool"
DatastoreInfoProperty = "info"
VirtualMachineType = "VirtualMachine"
RoundTripperDefaultCount = 3
VSANDatastoreType = "vsan"
DummyVMPrefixName = "vsphere-k8s"
ActivePowerState = "poweredOn"
)

View File

@@ -0,0 +1,37 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vclib
import "errors"
// Error Messages
const (
FileAlreadyExistErrMsg = "File requested already exist"
NoDiskUUIDFoundErrMsg = "No disk UUID found"
NoDevicesFoundErrMsg = "No devices found"
DiskNotFoundErrMsg = "No vSphere disk ID found"
InvalidVolumeOptionsErrMsg = "VolumeOptions verification failed"
)
// Error constants
var (
ErrFileAlreadyExist = errors.New(FileAlreadyExistErrMsg)
ErrNoDiskUUIDFound = errors.New(NoDiskUUIDFoundErrMsg)
ErrNoDevicesFound = errors.New(NoDevicesFoundErrMsg)
ErrNoDiskIDFound = errors.New(DiskNotFoundErrMsg)
ErrInvalidVolumeOptions = errors.New(InvalidVolumeOptionsErrMsg)
)

View File

@@ -0,0 +1,164 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vclib
import (
"errors"
"fmt"
"strings"
"github.com/golang/glog"
"github.com/vmware/govmomi/find"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/property"
"github.com/vmware/govmomi/vim25/mo"
"github.com/vmware/govmomi/vim25/types"
"golang.org/x/net/context"
)
// Datacenter extends the govmomi Datacenter object
type Datacenter struct {
*object.Datacenter
}
// GetDatacenter returns the DataCenter Object for the given datacenterPath
// If datacenter is located in a folder, include full path to datacenter else just provide the datacenter name
func GetDatacenter(ctx context.Context, connection *VSphereConnection, datacenterPath string) (*Datacenter, error) {
finder := find.NewFinder(connection.GoVmomiClient.Client, true)
datacenter, err := finder.Datacenter(ctx, datacenterPath)
if err != nil {
glog.Errorf("Failed to find the datacenter: %s. err: %+v", datacenterPath, err)
return nil, err
}
dc := Datacenter{datacenter}
return &dc, nil
}
// GetVMByUUID gets the VM object from the given vmUUID
func (dc *Datacenter) GetVMByUUID(ctx context.Context, vmUUID string) (*VirtualMachine, error) {
s := object.NewSearchIndex(dc.Client())
vmUUID = strings.ToLower(strings.TrimSpace(vmUUID))
svm, err := s.FindByUuid(ctx, dc.Datacenter, vmUUID, true, nil)
if err != nil {
glog.Errorf("Failed to find VM by UUID. VM UUID: %s, err: %+v", vmUUID, err)
return nil, err
}
if svm == nil {
glog.Errorf("Unable to find VM by UUID. VM UUID: %s", vmUUID)
return nil, fmt.Errorf("Failed to find VM by UUID: %s", vmUUID)
}
virtualMachine := VirtualMachine{object.NewVirtualMachine(dc.Client(), svm.Reference()), dc}
return &virtualMachine, nil
}
// GetVMByPath gets the VM object from the given vmPath
// vmPath should be the full path to VM and not just the name
func (dc *Datacenter) GetVMByPath(ctx context.Context, vmPath string) (*VirtualMachine, error) {
finder := getFinder(dc)
vm, err := finder.VirtualMachine(ctx, vmPath)
if err != nil {
glog.Errorf("Failed to find VM by Path. VM Path: %s, err: %+v", vmPath, err)
return nil, err
}
virtualMachine := VirtualMachine{vm, dc}
return &virtualMachine, nil
}
// GetDatastoreByPath gets the Datastore object from the given vmDiskPath
func (dc *Datacenter) GetDatastoreByPath(ctx context.Context, vmDiskPath string) (*Datastore, error) {
datastorePathObj := new(object.DatastorePath)
isSuccess := datastorePathObj.FromString(vmDiskPath)
if !isSuccess {
glog.Errorf("Failed to parse vmDiskPath: %s", vmDiskPath)
return nil, errors.New("Failed to parse vmDiskPath")
}
finder := getFinder(dc)
ds, err := finder.Datastore(ctx, datastorePathObj.Datastore)
if err != nil {
glog.Errorf("Failed while searching for datastore: %s. err: %+v", datastorePathObj.Datastore, err)
return nil, err
}
datastore := Datastore{ds, dc}
return &datastore, nil
}
// GetDatastoreByName gets the Datastore object for the given datastore name
func (dc *Datacenter) GetDatastoreByName(ctx context.Context, name string) (*Datastore, error) {
finder := getFinder(dc)
ds, err := finder.Datastore(ctx, name)
if err != nil {
glog.Errorf("Failed while searching for datastore: %s. err: %+v", name, err)
return nil, err
}
datastore := Datastore{ds, dc}
return &datastore, nil
}
// GetFolderByPath gets the Folder Object from the given folder path
// folderPath should be the full path to folder
func (dc *Datacenter) GetFolderByPath(ctx context.Context, folderPath string) (*Folder, error) {
finder := getFinder(dc)
vmFolder, err := finder.Folder(ctx, folderPath)
if err != nil {
glog.Errorf("Failed to get the folder reference for %s. err: %+v", folderPath, err)
return nil, err
}
folder := Folder{vmFolder, dc}
return &folder, nil
}
// GetVMMoList gets the VM Managed Objects with the given properties from the VM object
func (dc *Datacenter) GetVMMoList(ctx context.Context, vmObjList []*VirtualMachine, properties []string) ([]mo.VirtualMachine, error) {
var vmMoList []mo.VirtualMachine
var vmRefs []types.ManagedObjectReference
if len(vmObjList) < 1 {
glog.Errorf("VirtualMachine Object list is empty")
return nil, fmt.Errorf("VirtualMachine Object list is empty")
}
for _, vmObj := range vmObjList {
vmRefs = append(vmRefs, vmObj.Reference())
}
pc := property.DefaultCollector(dc.Client())
err := pc.Retrieve(ctx, vmRefs, properties, &vmMoList)
if err != nil {
glog.Errorf("Failed to get VM managed objects from VM objects. vmObjList: %+v, properties: %+v, err: %v", vmObjList, properties, err)
return nil, err
}
return vmMoList, nil
}
// GetDatastoreMoList gets the Datastore Managed Objects with the given properties from the datastore objects
func (dc *Datacenter) GetDatastoreMoList(ctx context.Context, dsObjList []*Datastore, properties []string) ([]mo.Datastore, error) {
var dsMoList []mo.Datastore
var dsRefs []types.ManagedObjectReference
if len(dsObjList) < 1 {
glog.Errorf("Datastore Object list is empty")
return nil, fmt.Errorf("Datastore Object list is empty")
}
for _, dsObj := range dsObjList {
dsRefs = append(dsRefs, dsObj.Reference())
}
pc := property.DefaultCollector(dc.Client())
err := pc.Retrieve(ctx, dsRefs, properties, &dsMoList)
if err != nil {
glog.Errorf("Failed to get Datastore managed objects from datastore objects. dsObjList: %+v, properties: %+v, err: %v", dsObjList, properties, err)
return nil, err
}
return dsMoList, nil
}

View File

@@ -0,0 +1,75 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vclib
import (
"github.com/golang/glog"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/property"
"github.com/vmware/govmomi/vim25/mo"
"github.com/vmware/govmomi/vim25/soap"
"github.com/vmware/govmomi/vim25/types"
"golang.org/x/net/context"
)
// Datastore extends the govmomi Datastore object
type Datastore struct {
*object.Datastore
Datacenter *Datacenter
}
// CreateDirectory creates the directory at location specified by directoryPath.
// If the intermediate level folders do not exist, and the parameter createParents is true, all the non-existent folders are created.
// directoryPath must be in the format "[vsanDatastore] kubevols"
func (ds *Datastore) CreateDirectory(ctx context.Context, directoryPath string, createParents bool) error {
fileManager := object.NewFileManager(ds.Client())
err := fileManager.MakeDirectory(ctx, directoryPath, ds.Datacenter.Datacenter, createParents)
if err != nil {
if soap.IsSoapFault(err) {
soapFault := soap.ToSoapFault(err)
if _, ok := soapFault.VimFault().(types.FileAlreadyExists); ok {
return ErrFileAlreadyExist
}
}
return err
}
glog.V(LogLevel).Infof("Created dir with path as %+q", directoryPath)
return nil
}
// GetType returns the type of datastore
func (ds *Datastore) GetType(ctx context.Context) (string, error) {
var dsMo mo.Datastore
pc := property.DefaultCollector(ds.Client())
err := pc.RetrieveOne(ctx, ds.Datastore.Reference(), []string{"summary"}, &dsMo)
if err != nil {
glog.Errorf("Failed to retrieve datastore summary property. err: %v", err)
return "", err
}
return dsMo.Summary.Type, nil
}
// IsCompatibleWithStoragePolicy returns true if datastore is compatible with given storage policy else return false
// for not compatible datastore, fault message is also returned
func (ds *Datastore) IsCompatibleWithStoragePolicy(ctx context.Context, storagePolicyID string) (bool, string, error) {
pbmClient, err := NewPbmClient(ctx, ds.Client())
if err != nil {
glog.Errorf("Failed to get new PbmClient Object. err: %v", err)
return false, "", err
}
return pbmClient.IsDatastoreCompatible(ctx, storagePolicyID, ds)
}

View File

@@ -0,0 +1,38 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"vdm.go",
"virtualdisk.go",
"vmdm.go",
],
tags = ["automanaged"],
deps = [
"//pkg/cloudprovider/providers/vsphere/vclib:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/vmware/govmomi/object:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25/types:go_default_library",
"//vendor/golang.org/x/net/context:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@@ -0,0 +1,91 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package diskmanagers
import (
"time"
"golang.org/x/net/context"
"github.com/golang/glog"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/types"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
)
// virtualDiskManager implements VirtualDiskProvider Interface for creating and deleting volume using VirtualDiskManager
type virtualDiskManager struct {
diskPath string
volumeOptions *vclib.VolumeOptions
}
// Create implements Disk's Create interface
// Contains implementation of virtualDiskManager based Provisioning
func (diskManager virtualDiskManager) Create(ctx context.Context, datastore *vclib.Datastore) (err error) {
if diskManager.volumeOptions.SCSIControllerType == "" {
diskManager.volumeOptions.SCSIControllerType = vclib.LSILogicControllerType
}
// Create virtual disk
diskFormat := vclib.DiskFormatValidType[diskManager.volumeOptions.DiskFormat]
// Create a virtual disk manager
vdm := object.NewVirtualDiskManager(datastore.Client())
// Create specification for new virtual disk
vmDiskSpec := &types.FileBackedVirtualDiskSpec{
VirtualDiskSpec: types.VirtualDiskSpec{
AdapterType: diskManager.volumeOptions.SCSIControllerType,
DiskType: diskFormat,
},
CapacityKb: int64(diskManager.volumeOptions.CapacityKB),
}
requestTime := time.Now()
// Create virtual disk
task, err := vdm.CreateVirtualDisk(ctx, diskManager.diskPath, datastore.Datacenter.Datacenter, vmDiskSpec)
if err != nil {
vclib.RecordvSphereMetric(vclib.APICreateVolume, requestTime, err)
glog.Errorf("Failed to create virtual disk: %s. err: %+v", diskManager.diskPath, err)
return err
}
err = task.Wait(ctx)
vclib.RecordvSphereMetric(vclib.APICreateVolume, requestTime, err)
if err != nil {
glog.Errorf("Failed to create virtual disk: %s. err: %+v", diskManager.diskPath, err)
return err
}
return nil
}
// Delete implements Disk's Delete interface
func (diskManager virtualDiskManager) Delete(ctx context.Context, datastore *vclib.Datastore) error {
// Create a virtual disk manager
virtualDiskManager := object.NewVirtualDiskManager(datastore.Client())
diskPath := vclib.RemoveClusterFromVDiskPath(diskManager.diskPath)
requestTime := time.Now()
// Delete virtual disk
task, err := virtualDiskManager.DeleteVirtualDisk(ctx, diskPath, datastore.Datacenter.Datacenter)
if err != nil {
glog.Errorf("Failed to delete virtual disk. err: %v", err)
vclib.RecordvSphereMetric(vclib.APIDeleteVolume, requestTime, err)
return err
}
err = task.Wait(ctx)
vclib.RecordvSphereMetric(vclib.APIDeleteVolume, requestTime, err)
if err != nil {
glog.Errorf("Failed to delete virtual disk. err: %v", err)
return err
}
return nil
}

View File

@@ -0,0 +1,80 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package diskmanagers
import (
"fmt"
"github.com/golang/glog"
"golang.org/x/net/context"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
)
// VirtualDisk is for the Disk Management
type VirtualDisk struct {
DiskPath string
VolumeOptions *vclib.VolumeOptions
VMOptions *vclib.VMOptions
}
// VirtualDisk Operations Const
const (
VirtualDiskCreateOperation = "Create"
VirtualDiskDeleteOperation = "Delete"
)
// VirtualDiskProvider defines interfaces for creating disk
type VirtualDiskProvider interface {
Create(ctx context.Context, datastore *vclib.Datastore) error
Delete(ctx context.Context, datastore *vclib.Datastore) error
}
// getDiskManager returns vmDiskManager or vdmDiskManager based on given volumeoptions
func getDiskManager(disk *VirtualDisk, diskOperation string) VirtualDiskProvider {
var diskProvider VirtualDiskProvider
switch diskOperation {
case VirtualDiskDeleteOperation:
diskProvider = virtualDiskManager{disk.DiskPath, disk.VolumeOptions}
case VirtualDiskCreateOperation:
if disk.VolumeOptions.StoragePolicyName != "" || disk.VolumeOptions.VSANStorageProfileData != "" || disk.VolumeOptions.StoragePolicyID != "" {
diskProvider = vmDiskManager{disk.DiskPath, disk.VolumeOptions, disk.VMOptions}
} else {
diskProvider = virtualDiskManager{disk.DiskPath, disk.VolumeOptions}
}
}
return diskProvider
}
// Create gets appropriate disk manager and calls respective create method
func (virtualDisk *VirtualDisk) Create(ctx context.Context, datastore *vclib.Datastore) error {
if virtualDisk.VolumeOptions.DiskFormat == "" {
virtualDisk.VolumeOptions.DiskFormat = vclib.ThinDiskType
}
if !virtualDisk.VolumeOptions.VerifyVolumeOptions() {
glog.Error("VolumeOptions verification failed. volumeOptions: ", virtualDisk.VolumeOptions)
return vclib.ErrInvalidVolumeOptions
}
if virtualDisk.VolumeOptions.StoragePolicyID != "" && virtualDisk.VolumeOptions.StoragePolicyName != "" {
return fmt.Errorf("Storage Policy ID and Storage Policy Name both set, Please set only one parameter")
}
return getDiskManager(virtualDisk, VirtualDiskCreateOperation).Create(ctx, datastore)
}
// Delete gets appropriate disk manager and calls respective delete method
func (virtualDisk *VirtualDisk) Delete(ctx context.Context, datastore *vclib.Datastore) error {
return getDiskManager(virtualDisk, VirtualDiskDeleteOperation).Delete(ctx, datastore)
}

View File

@@ -0,0 +1,246 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package diskmanagers
import (
"fmt"
"strings"
"github.com/golang/glog"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/types"
"golang.org/x/net/context"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
)
// vmDiskManager implements VirtualDiskProvider interface for creating volume using Virtual Machine Reconfigure approach
type vmDiskManager struct {
diskPath string
volumeOptions *vclib.VolumeOptions
vmOptions *vclib.VMOptions
}
// Create implements Disk's Create interface
// Contains implementation of VM based Provisioning to provision disk with SPBM Policy or VSANStorageProfileData
func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datastore) (err error) {
if vmdisk.volumeOptions.SCSIControllerType == "" {
vmdisk.volumeOptions.SCSIControllerType = vclib.PVSCSIControllerType
}
pbmClient, err := vclib.NewPbmClient(ctx, datastore.Client())
if err != nil {
glog.Errorf("Error occurred while creating new pbmClient, err: %+v", err)
return err
}
if vmdisk.volumeOptions.StoragePolicyID == "" && vmdisk.volumeOptions.StoragePolicyName != "" {
vmdisk.volumeOptions.StoragePolicyID, err = pbmClient.ProfileIDByName(ctx, vmdisk.volumeOptions.StoragePolicyName)
if err != nil {
glog.Errorf("Error occurred while getting Profile Id from Profile Name: %s, err: %+v", vmdisk.volumeOptions.StoragePolicyName, err)
return err
}
}
if vmdisk.volumeOptions.StoragePolicyID != "" {
compatible, faultMessage, err := datastore.IsCompatibleWithStoragePolicy(ctx, vmdisk.volumeOptions.StoragePolicyID)
if err != nil {
glog.Errorf("Error occurred while checking datastore compatibility with storage policy id: %s, err: %+v", vmdisk.volumeOptions.StoragePolicyID, err)
return err
}
if !compatible {
glog.Errorf("Datastore: %s is not compatible with Policy: %s", datastore.Name(), vmdisk.volumeOptions.StoragePolicyName)
return fmt.Errorf("User specified datastore is not compatible with the storagePolicy: %q. Failed with faults: %+q", vmdisk.volumeOptions.StoragePolicyName, faultMessage)
}
}
storageProfileSpec := &types.VirtualMachineDefinedProfileSpec{}
// Is PBM storage policy ID is present, set the storage spec profile ID,
// else, set raw the VSAN policy string.
if vmdisk.volumeOptions.StoragePolicyID != "" {
storageProfileSpec.ProfileId = vmdisk.volumeOptions.StoragePolicyID
} else if vmdisk.volumeOptions.VSANStorageProfileData != "" {
// Check Datastore type - VSANStorageProfileData is only applicable to vSAN Datastore
dsType, err := datastore.GetType(ctx)
if err != nil {
return err
}
if dsType != vclib.VSANDatastoreType {
glog.Errorf("The specified datastore: %q is not a VSAN datastore", datastore.Name())
return fmt.Errorf("The specified datastore: %q is not a VSAN datastore."+
" The policy parameters will work only with VSAN Datastore."+
" So, please specify a valid VSAN datastore in Storage class definition.", datastore.Name())
}
storageProfileSpec.ProfileId = ""
storageProfileSpec.ProfileData = &types.VirtualMachineProfileRawData{
ExtensionKey: "com.vmware.vim.sps",
ObjectData: vmdisk.volumeOptions.VSANStorageProfileData,
}
} else {
glog.Errorf("Both volumeOptions.StoragePolicyID and volumeOptions.VSANStorageProfileData are not set. One of them should be set")
return fmt.Errorf("Both volumeOptions.StoragePolicyID and volumeOptions.VSANStorageProfileData are not set. One of them should be set")
}
var dummyVM *vclib.VirtualMachine
// Check if VM already exist in the folder.
// If VM is already present, use it, else create a new dummy VM.
dummyVMFullName := vclib.DummyVMPrefixName + "-" + vmdisk.volumeOptions.Name
dummyVM, err = datastore.Datacenter.GetVMByPath(ctx, vmdisk.vmOptions.VMFolder.InventoryPath+"/"+dummyVMFullName)
if err != nil {
// Create a dummy VM
glog.V(1).Info("Creating Dummy VM: %q", dummyVMFullName)
dummyVM, err = vmdisk.createDummyVM(ctx, datastore.Datacenter, dummyVMFullName)
if err != nil {
glog.Errorf("Failed to create Dummy VM. err: %v", err)
return err
}
}
// Reconfigure the VM to attach the disk with the VSAN policy configured
virtualMachineConfigSpec := types.VirtualMachineConfigSpec{}
disk, _, err := dummyVM.CreateDiskSpec(ctx, vmdisk.diskPath, datastore, vmdisk.volumeOptions)
if err != nil {
glog.Errorf("Failed to create Disk Spec. err: %v", err)
return err
}
deviceConfigSpec := &types.VirtualDeviceConfigSpec{
Device: disk,
Operation: types.VirtualDeviceConfigSpecOperationAdd,
FileOperation: types.VirtualDeviceConfigSpecFileOperationCreate,
}
deviceConfigSpec.Profile = append(deviceConfigSpec.Profile, storageProfileSpec)
virtualMachineConfigSpec.DeviceChange = append(virtualMachineConfigSpec.DeviceChange, deviceConfigSpec)
fileAlreadyExist := false
task, err := dummyVM.Reconfigure(ctx, virtualMachineConfigSpec)
err = task.Wait(ctx)
if err != nil {
fileAlreadyExist = isAlreadyExists(vmdisk.diskPath, err)
if fileAlreadyExist {
//Skip error and continue to detach the disk as the disk was already created on the datastore.
glog.V(vclib.LogLevel).Info("File: %v already exists", vmdisk.diskPath)
} else {
glog.Errorf("Failed to attach the disk to VM: %q with err: %+v", dummyVMFullName, err)
return err
}
}
// Detach the disk from the dummy VM.
err = dummyVM.DetachDisk(ctx, vmdisk.diskPath)
if err != nil {
if vclib.DiskNotFoundErrMsg == err.Error() && fileAlreadyExist {
// Skip error if disk was already detached from the dummy VM but still present on the datastore.
glog.V(vclib.LogLevel).Info("File: %v is already detached", vmdisk.diskPath)
} else {
glog.Errorf("Failed to detach the disk: %q from VM: %q with err: %+v", vmdisk.diskPath, dummyVMFullName, err)
return err
}
}
// Delete the dummy VM
err = dummyVM.DeleteVM(ctx)
if err != nil {
glog.Errorf("Failed to destroy the vm: %q with err: %+v", dummyVMFullName, err)
}
return nil
}
func (vmdisk vmDiskManager) Delete(ctx context.Context, datastore *vclib.Datastore) error {
return fmt.Errorf("vmDiskManager.Delete is not supported")
}
// CreateDummyVM create a Dummy VM at specified location with given name.
func (vmdisk vmDiskManager) createDummyVM(ctx context.Context, datacenter *vclib.Datacenter, vmName string) (*vclib.VirtualMachine, error) {
// Create a virtual machine config spec with 1 SCSI adapter.
virtualMachineConfigSpec := types.VirtualMachineConfigSpec{
Name: vmName,
Files: &types.VirtualMachineFileInfo{
VmPathName: "[" + vmdisk.volumeOptions.Datastore + "]",
},
NumCPUs: 1,
MemoryMB: 4,
DeviceChange: []types.BaseVirtualDeviceConfigSpec{
&types.VirtualDeviceConfigSpec{
Operation: types.VirtualDeviceConfigSpecOperationAdd,
Device: &types.ParaVirtualSCSIController{
VirtualSCSIController: types.VirtualSCSIController{
SharedBus: types.VirtualSCSISharingNoSharing,
VirtualController: types.VirtualController{
BusNumber: 0,
VirtualDevice: types.VirtualDevice{
Key: 1000,
},
},
},
},
},
},
}
task, err := vmdisk.vmOptions.VMFolder.CreateVM(ctx, virtualMachineConfigSpec, vmdisk.vmOptions.VMResourcePool, nil)
if err != nil {
glog.Errorf("Failed to create VM. err: %+v", err)
return nil, err
}
dummyVMTaskInfo, err := task.WaitForResult(ctx, nil)
if err != nil {
glog.Errorf("Error occurred while waiting for create VM task result. err: %+v", err)
return nil, err
}
vmRef := dummyVMTaskInfo.Result.(object.Reference)
dummyVM := object.NewVirtualMachine(datacenter.Client(), vmRef.Reference())
return &vclib.VirtualMachine{VirtualMachine: dummyVM, Datacenter: datacenter}, nil
}
// CleanUpDummyVMs deletes stale dummyVM's
func CleanUpDummyVMs(ctx context.Context, folder *vclib.Folder, dc *vclib.Datacenter) error {
vmList, err := folder.GetVirtualMachines(ctx)
if err != nil {
glog.V(4).Infof("Failed to get virtual machines in the kubernetes cluster: %s, err: %+v", folder.InventoryPath, err)
return err
}
if vmList == nil || len(vmList) == 0 {
glog.Errorf("No virtual machines found in the kubernetes cluster: %s", folder.InventoryPath)
return fmt.Errorf("No virtual machines found in the kubernetes cluster: %s", folder.InventoryPath)
}
var dummyVMList []*vclib.VirtualMachine
// Loop through VM's in the Kubernetes cluster to find dummy VM's
for _, vm := range vmList {
vmName, err := vm.ObjectName(ctx)
if err != nil {
glog.V(4).Infof("Unable to get name from VM with err: %+v", err)
continue
}
if strings.HasPrefix(vmName, vclib.DummyVMPrefixName) {
vmObj := vclib.VirtualMachine{VirtualMachine: object.NewVirtualMachine(dc.Client(), vm.Reference()), Datacenter: dc}
dummyVMList = append(dummyVMList, &vmObj)
}
}
for _, vm := range dummyVMList {
err = vm.DeleteVM(ctx)
if err != nil {
glog.V(4).Infof("Unable to delete dummy VM with err: %+v", err)
continue
}
}
return nil
}
func isAlreadyExists(path string, err error) bool {
errorMessage := fmt.Sprintf("Cannot complete the operation because the file or folder %s already exists", path)
if errorMessage == err.Error() {
return true
}
return false
}

View File

@@ -0,0 +1,46 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vclib
import (
"github.com/golang/glog"
"github.com/vmware/govmomi/object"
"golang.org/x/net/context"
)
// Folder extends the govmomi Folder object
type Folder struct {
*object.Folder
Datacenter *Datacenter
}
// GetVirtualMachines returns list of VirtualMachine inside a folder.
func (folder *Folder) GetVirtualMachines(ctx context.Context) ([]*VirtualMachine, error) {
vmFolders, err := folder.Children(ctx)
if err != nil {
glog.Errorf("Failed to get children from Folder: %s. err: %+v", folder.InventoryPath, err)
return nil, err
}
var vmObjList []*VirtualMachine
for _, vmFolder := range vmFolders {
if vmFolder.Reference().Type == VirtualMachineType {
vmObj := VirtualMachine{object.NewVirtualMachine(folder.Client(), vmFolder.Reference()), folder.Datacenter}
vmObjList = append(vmObjList, &vmObj)
}
}
return vmObjList, nil
}

View File

@@ -0,0 +1,169 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vclib
import (
"fmt"
"github.com/golang/glog"
"github.com/vmware/govmomi/pbm"
"golang.org/x/net/context"
pbmtypes "github.com/vmware/govmomi/pbm/types"
"github.com/vmware/govmomi/vim25"
)
// PbmClient is extending govmomi pbm, and provides functions to get compatible list of datastore for given policy
type PbmClient struct {
*pbm.Client
}
// NewPbmClient returns a new PBM Client object
func NewPbmClient(ctx context.Context, client *vim25.Client) (*PbmClient, error) {
pbmClient, err := pbm.NewClient(ctx, client)
if err != nil {
glog.Errorf("Failed to create new Pbm Client. err: %+v", err)
return nil, err
}
return &PbmClient{pbmClient}, nil
}
// IsDatastoreCompatible check if the datastores is compatible for given storage policy id
// if datastore is not compatible with policy, fault message with the Datastore Name is returned
func (pbmClient *PbmClient) IsDatastoreCompatible(ctx context.Context, storagePolicyID string, datastore *Datastore) (bool, string, error) {
faultMessage := ""
placementHub := pbmtypes.PbmPlacementHub{
HubType: datastore.Reference().Type,
HubId: datastore.Reference().Value,
}
hubs := []pbmtypes.PbmPlacementHub{placementHub}
req := []pbmtypes.BasePbmPlacementRequirement{
&pbmtypes.PbmPlacementCapabilityProfileRequirement{
ProfileId: pbmtypes.PbmProfileId{
UniqueId: storagePolicyID,
},
},
}
compatibilityResult, err := pbmClient.CheckRequirements(ctx, hubs, nil, req)
if err != nil {
glog.Errorf("Error occurred for CheckRequirements call. err %+v", err)
return false, "", err
}
if compatibilityResult != nil && len(compatibilityResult) > 0 {
compatibleHubs := compatibilityResult.CompatibleDatastores()
if compatibleHubs != nil && len(compatibleHubs) > 0 {
return true, "", nil
}
dsName, err := datastore.ObjectName(ctx)
if err != nil {
glog.Errorf("Failed to get datastore ObjectName")
return false, "", err
}
if compatibilityResult[0].Error[0].LocalizedMessage == "" {
faultMessage = "Datastore: " + dsName + " is not compatible with the storage policy."
} else {
faultMessage = "Datastore: " + dsName + " is not compatible with the storage policy. LocalizedMessage: " + compatibilityResult[0].Error[0].LocalizedMessage + "\n"
}
return false, faultMessage, nil
}
return false, "", fmt.Errorf("compatibilityResult is nil or empty")
}
// GetCompatibleDatastores filters and returns compatible list of datastores for given storage policy id
// For Non Compatible Datastores, fault message with the Datastore Name is also returned
func (pbmClient *PbmClient) GetCompatibleDatastores(ctx context.Context, storagePolicyID string, datastores []*Datastore) ([]*Datastore, string, error) {
var (
dsMorNameMap = getDsMorNameMap(ctx, datastores)
localizedMessagesForNotCompatibleDatastores = ""
)
compatibilityResult, err := pbmClient.GetPlacementCompatibilityResult(ctx, storagePolicyID, datastores)
if err != nil {
glog.Errorf("Error occurred while retrieving placement compatibility result for datastores: %+v with storagePolicyID: %s. err: %+v", datastores, storagePolicyID, err)
return nil, "", err
}
compatibleHubs := compatibilityResult.CompatibleDatastores()
var compatibleDatastoreList []*Datastore
for _, hub := range compatibleHubs {
compatibleDatastoreList = append(compatibleDatastoreList, getDatastoreFromPlacementHub(datastores, hub))
}
for _, res := range compatibilityResult {
for _, err := range res.Error {
dsName := dsMorNameMap[res.Hub.HubId]
localizedMessage := ""
if err.LocalizedMessage != "" {
localizedMessage = "Datastore: " + dsName + " not compatible with the storage policy. LocalizedMessage: " + err.LocalizedMessage + "\n"
} else {
localizedMessage = "Datastore: " + dsName + " not compatible with the storage policy. \n"
}
localizedMessagesForNotCompatibleDatastores += localizedMessage
}
}
// Return an error if there are no compatible datastores.
if len(compatibleHubs) < 1 {
glog.Errorf("No compatible datastores found that satisfy the storage policy requirements: %s", storagePolicyID)
return nil, localizedMessagesForNotCompatibleDatastores, fmt.Errorf("No compatible datastores found that satisfy the storage policy requirements")
}
return compatibleDatastoreList, localizedMessagesForNotCompatibleDatastores, nil
}
// GetPlacementCompatibilityResult gets placement compatibility result based on storage policy requirements.
func (pbmClient *PbmClient) GetPlacementCompatibilityResult(ctx context.Context, storagePolicyID string, datastore []*Datastore) (pbm.PlacementCompatibilityResult, error) {
var hubs []pbmtypes.PbmPlacementHub
for _, ds := range datastore {
hubs = append(hubs, pbmtypes.PbmPlacementHub{
HubType: ds.Reference().Type,
HubId: ds.Reference().Value,
})
}
req := []pbmtypes.BasePbmPlacementRequirement{
&pbmtypes.PbmPlacementCapabilityProfileRequirement{
ProfileId: pbmtypes.PbmProfileId{
UniqueId: storagePolicyID,
},
},
}
res, err := pbmClient.CheckRequirements(ctx, hubs, nil, req)
if err != nil {
glog.Errorf("Error occurred for CheckRequirements call. err: %+v", err)
return nil, err
}
return res, nil
}
// getDataStoreForPlacementHub returns matching datastore associated with given pbmPlacementHub
func getDatastoreFromPlacementHub(datastore []*Datastore, pbmPlacementHub pbmtypes.PbmPlacementHub) *Datastore {
for _, ds := range datastore {
if ds.Reference().Type == pbmPlacementHub.HubType && ds.Reference().Value == pbmPlacementHub.HubId {
return ds
}
}
return nil
}
// getDsMorNameMap returns map of ds Mor and Datastore Object Name
func getDsMorNameMap(ctx context.Context, datastores []*Datastore) map[string]string {
dsMorNameMap := make(map[string]string)
for _, ds := range datastores {
dsObjectName, err := ds.ObjectName(ctx)
if err == nil {
dsMorNameMap[ds.Reference().Value] = dsObjectName
} else {
glog.Errorf("Error occurred while getting datastore object name. err: %+v", err)
}
}
return dsMorNameMap
}

View File

@@ -0,0 +1,117 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vclib
import (
"fmt"
"path/filepath"
"regexp"
"strings"
"github.com/vmware/govmomi/find"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/types"
)
func getFinder(dc *Datacenter) *find.Finder {
finder := find.NewFinder(dc.Client(), true)
finder.SetDatacenter(dc.Datacenter)
return finder
}
// formatVirtualDiskUUID removes any spaces and hyphens in UUID
// Example UUID input is 42375390-71f9-43a3-a770-56803bcd7baa and output after format is 4237539071f943a3a77056803bcd7baa
func formatVirtualDiskUUID(uuid string) string {
uuidwithNoSpace := strings.Replace(uuid, " ", "", -1)
uuidWithNoHypens := strings.Replace(uuidwithNoSpace, "-", "", -1)
return strings.ToLower(uuidWithNoHypens)
}
// getSCSIControllersOfType filters specific type of Controller device from given list of Virtual Machine Devices
func getSCSIControllersOfType(vmDevices object.VirtualDeviceList, scsiType string) []*types.VirtualController {
// get virtual scsi controllers of passed argument type
var scsiControllers []*types.VirtualController
for _, device := range vmDevices {
devType := vmDevices.Type(device)
if devType == scsiType {
if c, ok := device.(types.BaseVirtualController); ok {
scsiControllers = append(scsiControllers, c.GetVirtualController())
}
}
}
return scsiControllers
}
// getAvailableSCSIController gets available SCSI Controller from list of given controllers, which has less than 15 disk devices.
func getAvailableSCSIController(scsiControllers []*types.VirtualController) *types.VirtualController {
// get SCSI controller which has space for adding more devices
for _, controller := range scsiControllers {
if len(controller.Device) < SCSIControllerDeviceLimit {
return controller
}
}
return nil
}
// getNextUnitNumber gets the next available SCSI controller unit number from given list of Controller Device List
func getNextUnitNumber(devices object.VirtualDeviceList, c types.BaseVirtualController) (int32, error) {
var takenUnitNumbers [SCSIDeviceSlots]bool
takenUnitNumbers[SCSIReservedSlot] = true
key := c.GetVirtualController().Key
for _, device := range devices {
d := device.GetVirtualDevice()
if d.ControllerKey == key {
if d.UnitNumber != nil {
takenUnitNumbers[*d.UnitNumber] = true
}
}
}
for unitNumber, takenUnitNumber := range takenUnitNumbers {
if !takenUnitNumber {
return int32(unitNumber), nil
}
}
return -1, fmt.Errorf("SCSI Controller with key=%d does not have any available slots", key)
}
// getSCSIControllers filters and return list of Controller Devices from given list of Virtual Machine Devices.
func getSCSIControllers(vmDevices object.VirtualDeviceList) []*types.VirtualController {
// get all virtual scsi controllers
var scsiControllers []*types.VirtualController
for _, device := range vmDevices {
devType := vmDevices.Type(device)
switch devType {
case SCSIControllerType, strings.ToLower(LSILogicControllerType), strings.ToLower(BusLogicControllerType), PVSCSIControllerType, strings.ToLower(LSILogicSASControllerType):
if c, ok := device.(types.BaseVirtualController); ok {
scsiControllers = append(scsiControllers, c.GetVirtualController())
}
}
}
return scsiControllers
}
// RemoveClusterFromVDiskPath removes the cluster or folder path from the vDiskPath
// for vDiskPath [DatastoreCluster/sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk, return value is [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk
// for vDiskPath [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk, return value remains same [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk
func RemoveClusterFromVDiskPath(vDiskPath string) string {
datastore := regexp.MustCompile("\\[(.*?)\\]").FindStringSubmatch(vDiskPath)[1]
if filepath.Base(datastore) != datastore {
vDiskPath = strings.Replace(vDiskPath, datastore, filepath.Base(datastore), 1)
}
return vDiskPath
}

View File

@@ -0,0 +1,368 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vclib
import (
"context"
"fmt"
"path/filepath"
"time"
"github.com/golang/glog"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/mo"
"github.com/vmware/govmomi/vim25/types"
)
// VirtualMachine extends the govmomi VirtualMachine object
type VirtualMachine struct {
*object.VirtualMachine
Datacenter *Datacenter
}
// IsDiskAttached checks if disk is attached to the VM.
func (vm *VirtualMachine) IsDiskAttached(ctx context.Context, diskPath string) (bool, error) {
device, err := vm.getVirtualDeviceByPath(ctx, diskPath)
if err != nil {
return false, err
}
if device != nil {
return true, nil
}
return false, nil
}
// GetVirtualDiskPage83Data gets the virtual disk UUID by diskPath
func (vm *VirtualMachine) GetVirtualDiskPage83Data(ctx context.Context, diskPath string) (string, error) {
if len(diskPath) > 0 && filepath.Ext(diskPath) != ".vmdk" {
diskPath += ".vmdk"
}
vdm := object.NewVirtualDiskManager(vm.Client())
// Returns uuid of vmdk virtual disk
diskUUID, err := vdm.QueryVirtualDiskUuid(ctx, diskPath, vm.Datacenter.Datacenter)
if err != nil {
glog.Errorf("QueryVirtualDiskUuid failed for diskPath: %q on VM: %q. err: %+v", diskPath, vm.InventoryPath, err)
return "", ErrNoDiskUUIDFound
}
diskUUID = formatVirtualDiskUUID(diskUUID)
return diskUUID, nil
}
// DeleteVM deletes the VM.
func (vm *VirtualMachine) DeleteVM(ctx context.Context) error {
destroyTask, err := vm.Destroy(ctx)
if err != nil {
glog.Errorf("Failed to delete the VM: %q. err: %+v", vm.InventoryPath, err)
return err
}
return destroyTask.Wait(ctx)
}
// AttachDisk attaches the disk at location - vmDiskPath from Datastore - dsObj to the Virtual Machine
// Additionally the disk can be configured with SPBM policy if volumeOptions.StoragePolicyID is non-empty.
func (vm *VirtualMachine) AttachDisk(ctx context.Context, vmDiskPath string, volumeOptions *VolumeOptions) (string, error) {
// Check if the diskControllerType is valid
if !CheckControllerSupported(volumeOptions.SCSIControllerType) {
return "", fmt.Errorf("Not a valid SCSI Controller Type. Valid options are %q", SCSIControllerTypeValidOptions())
}
vmDiskPathCopy := vmDiskPath
vmDiskPath = RemoveClusterFromVDiskPath(vmDiskPath)
attached, err := vm.IsDiskAttached(ctx, vmDiskPath)
if err != nil {
glog.Errorf("Error occurred while checking if disk is attached on VM: %q. vmDiskPath: %q, err: %+v", vm.InventoryPath, vmDiskPath, err)
return "", err
}
// If disk is already attached, return the disk UUID
if attached {
diskUUID, _ := vm.GetVirtualDiskPage83Data(ctx, vmDiskPath)
return diskUUID, nil
}
dsObj, err := vm.Datacenter.GetDatastoreByPath(ctx, vmDiskPathCopy)
if err != nil {
glog.Errorf("Failed to get datastore from vmDiskPath: %q. err: %+v", vmDiskPath, err)
return "", err
}
// If disk is not attached, create a disk spec for disk to be attached to the VM.
disk, newSCSIController, err := vm.CreateDiskSpec(ctx, vmDiskPath, dsObj, volumeOptions)
if err != nil {
glog.Errorf("Error occurred while creating disk spec. err: %+v", err)
return "", err
}
vmDevices, err := vm.Device(ctx)
if err != nil {
glog.Errorf("Failed to retrieve VM devices for VM: %q. err: %+v", vm.InventoryPath, err)
return "", err
}
virtualMachineConfigSpec := types.VirtualMachineConfigSpec{}
deviceConfigSpec := &types.VirtualDeviceConfigSpec{
Device: disk,
Operation: types.VirtualDeviceConfigSpecOperationAdd,
}
// Configure the disk with the SPBM profile only if ProfileID is not empty.
if volumeOptions.StoragePolicyID != "" {
profileSpec := &types.VirtualMachineDefinedProfileSpec{
ProfileId: volumeOptions.StoragePolicyID,
}
deviceConfigSpec.Profile = append(deviceConfigSpec.Profile, profileSpec)
}
virtualMachineConfigSpec.DeviceChange = append(virtualMachineConfigSpec.DeviceChange, deviceConfigSpec)
requestTime := time.Now()
task, err := vm.Reconfigure(ctx, virtualMachineConfigSpec)
if err != nil {
RecordvSphereMetric(APIAttachVolume, requestTime, err)
glog.Errorf("Failed to attach the disk with storagePolicy: %q on VM: %q. err - %+v", volumeOptions.StoragePolicyID, vm.InventoryPath, err)
if newSCSIController != nil {
vm.deleteController(ctx, newSCSIController, vmDevices)
}
return "", err
}
err = task.Wait(ctx)
RecordvSphereMetric(APIAttachVolume, requestTime, err)
if err != nil {
glog.Errorf("Failed to attach the disk with storagePolicy: %+q on VM: %q. err - %+v", volumeOptions.StoragePolicyID, vm.InventoryPath, err)
if newSCSIController != nil {
vm.deleteController(ctx, newSCSIController, vmDevices)
}
return "", err
}
// Once disk is attached, get the disk UUID.
diskUUID, err := vm.GetVirtualDiskPage83Data(ctx, vmDiskPath)
if err != nil {
glog.Errorf("Error occurred while getting Disk Info from VM: %q. err: %v", vm.InventoryPath, err)
vm.DetachDisk(ctx, vmDiskPath)
if newSCSIController != nil {
vm.deleteController(ctx, newSCSIController, vmDevices)
}
return "", err
}
return diskUUID, nil
}
// DetachDisk detaches the disk specified by vmDiskPath
func (vm *VirtualMachine) DetachDisk(ctx context.Context, vmDiskPath string) error {
vmDiskPath = RemoveClusterFromVDiskPath(vmDiskPath)
device, err := vm.getVirtualDeviceByPath(ctx, vmDiskPath)
if err != nil {
glog.Errorf("Disk ID not found for VM: %q with diskPath: %q", vm.InventoryPath, vmDiskPath)
return err
}
if device == nil {
glog.Errorf("No virtual device found with diskPath: %q on VM: %q", vmDiskPath, vm.InventoryPath)
return fmt.Errorf("No virtual device found with diskPath: %q on VM: %q", vmDiskPath, vm.InventoryPath)
}
// Detach disk from VM
requestTime := time.Now()
err = vm.RemoveDevice(ctx, true, device)
RecordvSphereMetric(APIDetachVolume, requestTime, err)
if err != nil {
glog.Errorf("Error occurred while removing disk device for VM: %q. err: %v", vm.InventoryPath, err)
return err
}
return nil
}
// GetResourcePool gets the resource pool for VM.
func (vm *VirtualMachine) GetResourcePool(ctx context.Context) (*object.ResourcePool, error) {
vmMoList, err := vm.Datacenter.GetVMMoList(ctx, []*VirtualMachine{vm}, []string{"resourcePool"})
if err != nil {
glog.Errorf("Failed to get resource pool from VM: %q. err: %+v", vm.InventoryPath, err)
return nil, err
}
return object.NewResourcePool(vm.Client(), vmMoList[0].ResourcePool.Reference()), nil
}
// Exists checks if the VM exists.
// Returns false if VM doesn't exist or VM is in powerOff state.
func (vm *VirtualMachine) Exists(ctx context.Context) (bool, error) {
vmMoList, err := vm.Datacenter.GetVMMoList(ctx, []*VirtualMachine{vm}, []string{"summary"})
if err != nil {
glog.Errorf("Failed to get VM Managed object with property summary. err: +%v", err)
return false, err
}
if vmMoList[0].Summary.Runtime.PowerState == ActivePowerState {
return true, nil
}
if vmMoList[0].Summary.Config.Template == false {
glog.Warningf("VM is not in %s state", ActivePowerState)
} else {
glog.Warningf("VM is a template")
}
return false, nil
}
// GetAllAccessibleDatastores gets the list of accessible Datastores for the given Virtual Machine
func (vm *VirtualMachine) GetAllAccessibleDatastores(ctx context.Context) ([]*Datastore, error) {
host, err := vm.HostSystem(ctx)
if err != nil {
glog.Errorf("Failed to get host system for VM: %q. err: %+v", vm.InventoryPath, err)
return nil, err
}
var hostSystemMo mo.HostSystem
s := object.NewSearchIndex(vm.Client())
err = s.Properties(ctx, host.Reference(), []string{DatastoreProperty}, &hostSystemMo)
if err != nil {
glog.Errorf("Failed to retrieve datastores for host: %+v. err: %+v", host, err)
return nil, err
}
var dsObjList []*Datastore
for _, dsRef := range hostSystemMo.Datastore {
dsObjList = append(dsObjList, &Datastore{object.NewDatastore(vm.Client(), dsRef), vm.Datacenter})
}
return dsObjList, nil
}
// CreateDiskSpec creates a disk spec for disk
func (vm *VirtualMachine) CreateDiskSpec(ctx context.Context, diskPath string, dsObj *Datastore, volumeOptions *VolumeOptions) (*types.VirtualDisk, types.BaseVirtualDevice, error) {
var newSCSIController types.BaseVirtualDevice
vmDevices, err := vm.Device(ctx)
if err != nil {
glog.Errorf("Failed to retrieve VM devices. err: %+v", err)
return nil, nil, err
}
// find SCSI controller of particular type from VM devices
scsiControllersOfRequiredType := getSCSIControllersOfType(vmDevices, volumeOptions.SCSIControllerType)
scsiController := getAvailableSCSIController(scsiControllersOfRequiredType)
if scsiController == nil {
newSCSIController, err = vm.createAndAttachSCSIController(ctx, volumeOptions.SCSIControllerType)
if err != nil {
glog.Errorf("Failed to create SCSI controller for VM :%q with err: %+v", vm.InventoryPath, err)
return nil, nil, err
}
// Get VM device list
vmDevices, err := vm.Device(ctx)
if err != nil {
glog.Errorf("Failed to retrieve VM devices. err: %v", err)
return nil, nil, err
}
// verify scsi controller in virtual machine
scsiControllersOfRequiredType := getSCSIControllersOfType(vmDevices, volumeOptions.SCSIControllerType)
scsiController = getAvailableSCSIController(scsiControllersOfRequiredType)
if scsiController == nil {
glog.Errorf("Cannot find SCSI controller of type: %q in VM", volumeOptions.SCSIControllerType)
// attempt clean up of scsi controller
vm.deleteController(ctx, newSCSIController, vmDevices)
return nil, nil, fmt.Errorf("Cannot find SCSI controller of type: %q in VM", volumeOptions.SCSIControllerType)
}
}
disk := vmDevices.CreateDisk(scsiController, dsObj.Reference(), diskPath)
unitNumber, err := getNextUnitNumber(vmDevices, scsiController)
if err != nil {
glog.Errorf("Cannot attach disk to VM, unitNumber limit reached - %+v.", err)
return nil, nil, err
}
*disk.UnitNumber = unitNumber
backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo)
backing.DiskMode = string(types.VirtualDiskModeIndependent_persistent)
if volumeOptions.CapacityKB != 0 {
disk.CapacityInKB = int64(volumeOptions.CapacityKB)
}
if volumeOptions.DiskFormat != "" {
var diskFormat string
diskFormat = DiskFormatValidType[volumeOptions.DiskFormat]
switch diskFormat {
case ThinDiskType:
backing.ThinProvisioned = types.NewBool(true)
case EagerZeroedThickDiskType:
backing.EagerlyScrub = types.NewBool(true)
default:
backing.ThinProvisioned = types.NewBool(false)
}
}
return disk, newSCSIController, nil
}
// createAndAttachSCSIController creates and attachs the SCSI controller to the VM.
func (vm *VirtualMachine) createAndAttachSCSIController(ctx context.Context, diskControllerType string) (types.BaseVirtualDevice, error) {
// Get VM device list
vmDevices, err := vm.Device(ctx)
if err != nil {
glog.Errorf("Failed to retrieve VM devices for VM: %q. err: %+v", vm.InventoryPath, err)
return nil, err
}
allSCSIControllers := getSCSIControllers(vmDevices)
if len(allSCSIControllers) >= SCSIControllerLimit {
// we reached the maximum number of controllers we can attach
glog.Errorf("SCSI Controller Limit of %d has been reached, cannot create another SCSI controller", SCSIControllerLimit)
return nil, fmt.Errorf("SCSI Controller Limit of %d has been reached, cannot create another SCSI controller", SCSIControllerLimit)
}
newSCSIController, err := vmDevices.CreateSCSIController(diskControllerType)
if err != nil {
glog.Errorf("Failed to create new SCSI controller on VM: %q. err: %+v", vm.InventoryPath, err)
return nil, err
}
configNewSCSIController := newSCSIController.(types.BaseVirtualSCSIController).GetVirtualSCSIController()
hotAndRemove := true
configNewSCSIController.HotAddRemove = &hotAndRemove
configNewSCSIController.SharedBus = types.VirtualSCSISharing(types.VirtualSCSISharingNoSharing)
// add the scsi controller to virtual machine
err = vm.AddDevice(context.TODO(), newSCSIController)
if err != nil {
glog.V(LogLevel).Infof("Cannot add SCSI controller to VM: %q. err: %+v", vm.InventoryPath, err)
// attempt clean up of scsi controller
vm.deleteController(ctx, newSCSIController, vmDevices)
return nil, err
}
return newSCSIController, nil
}
// getVirtualDeviceByPath gets the virtual device by path
func (vm *VirtualMachine) getVirtualDeviceByPath(ctx context.Context, diskPath string) (types.BaseVirtualDevice, error) {
var diskUUID string
vmDevices, err := vm.Device(ctx)
if err != nil {
glog.Errorf("Failed to get the devices for VM: %q. err: %+v", vm.InventoryPath, err)
return nil, err
}
volumeUUID, err := vm.GetVirtualDiskPage83Data(ctx, diskPath)
if err != nil {
glog.Errorf("Failed to get disk UUID for path: %q on VM: %q. err: %+v", diskPath, vm.InventoryPath, err)
return nil, err
}
// filter vm devices to retrieve device for the given vmdk file identified by disk path
for _, device := range vmDevices {
if vmDevices.TypeName(device) == "VirtualDisk" {
virtualDevice := device.GetVirtualDevice()
if backing, ok := virtualDevice.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok {
diskUUID = formatVirtualDiskUUID(backing.Uuid)
if diskUUID == volumeUUID {
return device, nil
}
}
}
}
return nil, nil
}
// deleteController removes latest added SCSI controller from VM.
func (vm *VirtualMachine) deleteController(ctx context.Context, controllerDevice types.BaseVirtualDevice, vmDevices object.VirtualDeviceList) error {
controllerDeviceList := vmDevices.SelectByType(controllerDevice)
if len(controllerDeviceList) < 1 {
return ErrNoDevicesFound
}
device := controllerDeviceList[len(controllerDeviceList)-1]
err := vm.RemoveDevice(ctx, true, device)
if err != nil {
glog.Errorf("Error occurred while removing device on VM: %q. err: %+v", vm.InventoryPath, err)
return err
}
return nil
}

View File

@@ -0,0 +1,27 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vclib
import (
"github.com/vmware/govmomi/object"
)
// VMOptions provides helper objects for provisioning volume with SPBM Policy
type VMOptions struct {
VMFolder *Folder
VMResourcePool *object.ResourcePool
}

View File

@@ -0,0 +1,107 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vclib
import (
"strings"
"github.com/golang/glog"
)
// VolumeOptions specifies various options for a volume.
type VolumeOptions struct {
CapacityKB int
Tags map[string]string
Name string
DiskFormat string
Datastore string
VSANStorageProfileData string
StoragePolicyName string
StoragePolicyID string
SCSIControllerType string
}
var (
// DiskFormatValidType specifies the valid disk formats
DiskFormatValidType = map[string]string{
ThinDiskType: ThinDiskType,
strings.ToLower(EagerZeroedThickDiskType): EagerZeroedThickDiskType,
strings.ToLower(ZeroedThickDiskType): PreallocatedDiskType,
}
// SCSIControllerValidType specifies the supported SCSI controllers
SCSIControllerValidType = []string{LSILogicControllerType, LSILogicSASControllerType, PVSCSIControllerType}
)
// DiskformatValidOptions generates Valid Options for Diskformat
func DiskformatValidOptions() string {
validopts := ""
for diskformat := range DiskFormatValidType {
validopts += diskformat + ", "
}
validopts = strings.TrimSuffix(validopts, ", ")
return validopts
}
// CheckDiskFormatSupported checks if the diskFormat is valid
func CheckDiskFormatSupported(diskFormat string) bool {
if DiskFormatValidType[diskFormat] == "" {
glog.Errorf("Not a valid Disk Format. Valid options are %+q", DiskformatValidOptions())
return false
}
return true
}
// SCSIControllerTypeValidOptions generates valid options for SCSIControllerType
func SCSIControllerTypeValidOptions() string {
validopts := ""
for _, controllerType := range SCSIControllerValidType {
validopts += (controllerType + ", ")
}
validopts = strings.TrimSuffix(validopts, ", ")
return validopts
}
// CheckControllerSupported checks if the given controller type is valid
func CheckControllerSupported(ctrlType string) bool {
for _, c := range SCSIControllerValidType {
if ctrlType == c {
return true
}
}
glog.Errorf("Not a valid SCSI Controller Type. Valid options are %q", SCSIControllerTypeValidOptions())
return false
}
// VerifyVolumeOptions checks if volumeOptions.SCIControllerType is valid controller type
func (volumeOptions VolumeOptions) VerifyVolumeOptions() bool {
// Validate only if SCSIControllerType is set by user.
// Default value is set later in virtualDiskManager.Create and vmDiskManager.Create
if volumeOptions.SCSIControllerType != "" {
isValid := CheckControllerSupported(volumeOptions.SCSIControllerType)
if !isValid {
return false
}
}
// ThinDiskType is the default, so skip the validation.
if volumeOptions.DiskFormat != ThinDiskType {
isValid := CheckDiskFormatSupported(volumeOptions.DiskFormat)
if !isValid {
return false
}
}
return true
}

View File

@@ -14,33 +14,36 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
package vclib
import (
"github.com/prometheus/client_golang/prometheus"
"time"
"github.com/prometheus/client_golang/prometheus"
)
// Cloud Provider API constants
const (
api_createvolume = "CreateVolume"
api_deletevolume = "DeleteVolume"
api_attachvolume = "AttachVolume"
api_detachvolume = "DetachVolume"
APICreateVolume = "CreateVolume"
APIDeleteVolume = "DeleteVolume"
APIAttachVolume = "AttachVolume"
APIDetachVolume = "DetachVolume"
)
// Cloud Provider Operation constants
const (
operation_deletevolume = "DeleteVolumeOperation"
operation_attachvolume = "AttachVolumeOperation"
operation_detachvolume = "DetachVolumeOperation"
operation_diskIsAttached = "DiskIsAttachedOperation"
operation_disksAreAttached = "DisksAreAttachedOperation"
operation_createvolume = "CreateVolumeOperation"
operation_createvolume_with_policy = "CreateVolumeWithPolicyOperation"
operation_createvolume_with_raw_vsan_policy = "CreateVolumeWithRawVSANPolicyOperation"
OperationDeleteVolume = "DeleteVolumeOperation"
OperationAttachVolume = "AttachVolumeOperation"
OperationDetachVolume = "DetachVolumeOperation"
OperationDiskIsAttached = "DiskIsAttachedOperation"
OperationDisksAreAttached = "DisksAreAttachedOperation"
OperationCreateVolume = "CreateVolumeOperation"
OperationCreateVolumeWithPolicy = "CreateVolumeWithPolicyOperation"
OperationCreateVolumeWithRawVSANPolicy = "CreateVolumeWithRawVSANPolicyOperation"
)
// vsphereApiMetric is for recording latency of Single API Call.
var vsphereApiMetric = prometheus.NewHistogramVec(
// vsphereAPIMetric is for recording latency of Single API Call.
var vsphereAPIMetric = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "cloudprovider_vsphere_api_request_duration_seconds",
Help: "Latency of vsphere api call",
@@ -48,7 +51,7 @@ var vsphereApiMetric = prometheus.NewHistogramVec(
[]string{"request"},
)
var vsphereApiErrorMetric = prometheus.NewCounterVec(
var vsphereAPIErrorMetric = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "cloudprovider_vsphere_api_request_errors",
Help: "vsphere Api errors",
@@ -73,16 +76,18 @@ var vsphereOperationErrorMetric = prometheus.NewCounterVec(
[]string{"operation"},
)
func registerMetrics() {
prometheus.MustRegister(vsphereApiMetric)
prometheus.MustRegister(vsphereApiErrorMetric)
// RegisterMetrics registers all the API and Operation metrics
func RegisterMetrics() {
prometheus.MustRegister(vsphereAPIMetric)
prometheus.MustRegister(vsphereAPIErrorMetric)
prometheus.MustRegister(vsphereOperationMetric)
prometheus.MustRegister(vsphereOperationErrorMetric)
}
func recordvSphereMetric(actionName string, requestTime time.Time, err error) {
// RecordvSphereMetric records the vSphere API and Operation metrics
func RecordvSphereMetric(actionName string, requestTime time.Time, err error) {
switch actionName {
case api_createvolume, api_deletevolume, api_attachvolume, api_detachvolume:
case APICreateVolume, APIDeleteVolume, APIAttachVolume, APIDetachVolume:
recordvSphereAPIMetric(actionName, requestTime, err)
default:
recordvSphereOperationMetric(actionName, requestTime, err)
@@ -91,9 +96,9 @@ func recordvSphereMetric(actionName string, requestTime time.Time, err error) {
func recordvSphereAPIMetric(actionName string, requestTime time.Time, err error) {
if err != nil {
vsphereApiErrorMetric.With(prometheus.Labels{"request": actionName}).Inc()
vsphereAPIErrorMetric.With(prometheus.Labels{"request": actionName}).Inc()
} else {
vsphereApiMetric.With(prometheus.Labels{"request": actionName}).Observe(calculateTimeTaken(requestTime))
vsphereAPIMetric.With(prometheus.Labels{"request": actionName}).Observe(calculateTimeTaken(requestTime))
}
}
@@ -105,16 +110,17 @@ func recordvSphereOperationMetric(actionName string, requestTime time.Time, err
}
}
func recordCreateVolumeMetric(volumeOptions *VolumeOptions, requestTime time.Time, err error) {
// RecordCreateVolumeMetric records the Create Volume metric
func RecordCreateVolumeMetric(volumeOptions *VolumeOptions, requestTime time.Time, err error) {
var actionName string
if volumeOptions.StoragePolicyName != "" {
actionName = operation_createvolume_with_policy
actionName = OperationCreateVolumeWithPolicy
} else if volumeOptions.VSANStorageProfileData != "" {
actionName = operation_createvolume_with_raw_vsan_policy
actionName = OperationCreateVolumeWithRawVSANPolicy
} else {
actionName = operation_createvolume
actionName = OperationCreateVolume
}
recordvSphereMetric(actionName, requestTime, err)
RecordvSphereMetric(actionName, requestTime, err)
}
func calculateTimeTaken(requestBeginTime time.Time) (timeTaken float64) {

File diff suppressed because it is too large Load Diff

View File

@@ -27,6 +27,7 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
)
func configFromEnv() (cfg VSphereConfig, ok bool) {
@@ -125,11 +126,11 @@ func TestVSphereLogin(t *testing.T) {
defer cancel()
// Create vSphere client
err = vSphereLogin(ctx, vs)
err = vs.conn.Connect(ctx)
if err != nil {
t.Errorf("Failed to create vSpere client: %s", err)
t.Errorf("Failed to connect to vSphere: %s", err)
}
defer vs.client.Logout(ctx)
defer vs.conn.GoVmomiClient.Logout(ctx)
}
func TestZones(t *testing.T) {
@@ -168,14 +169,14 @@ func TestInstances(t *testing.T) {
t.Fatalf("CurrentNodeName() failed: %s", err)
}
externalId, err := i.ExternalID(nodeName)
externalID, err := i.ExternalID(nodeName)
if err != nil {
t.Fatalf("Instances.ExternalID(%s) failed: %s", nodeName, err)
}
t.Logf("Found ExternalID(%s) = %s\n", nodeName, externalId)
t.Logf("Found ExternalID(%s) = %s\n", nodeName, externalID)
nonExistingVM := types.NodeName(rand.String(15))
externalId, err = i.ExternalID(nonExistingVM)
externalID, err = i.ExternalID(nonExistingVM)
if err == cloudprovider.InstanceNotFound {
t.Logf("VM %s was not found as expected\n", nonExistingVM)
} else if err == nil {
@@ -184,13 +185,13 @@ func TestInstances(t *testing.T) {
t.Fatalf("Instances.ExternalID did not fail as expected, err: %v", err)
}
instanceId, err := i.InstanceID(nodeName)
instanceID, err := i.InstanceID(nodeName)
if err != nil {
t.Fatalf("Instances.InstanceID(%s) failed: %s", nodeName, err)
}
t.Logf("Found InstanceID(%s) = %s\n", nodeName, instanceId)
t.Logf("Found InstanceID(%s) = %s\n", nodeName, instanceID)
instanceId, err = i.InstanceID(nonExistingVM)
instanceID, err = i.InstanceID(nonExistingVM)
if err == cloudprovider.InstanceNotFound {
t.Logf("VM %s was not found as expected\n", nonExistingVM)
} else if err == nil {
@@ -222,7 +223,7 @@ func TestVolumes(t *testing.T) {
t.Fatalf("CurrentNodeName() failed: %s", err)
}
volumeOptions := &VolumeOptions{
volumeOptions := &vclib.VolumeOptions{
CapacityKB: 1 * 1024 * 1024,
Tags: nil,
Name: "kubernetes-test-volume-" + rand.String(10),
@@ -233,7 +234,7 @@ func TestVolumes(t *testing.T) {
t.Fatalf("Cannot create a new VMDK volume: %v", err)
}
_, _, err = vs.AttachDisk(volPath, "", "")
_, err = vs.AttachDisk(volPath, "", "")
if err != nil {
t.Fatalf("Cannot attach volume(%s) to VM(%s): %v", volPath, nodeName, err)
}
@@ -249,36 +250,3 @@ func TestVolumes(t *testing.T) {
// t.Fatalf("Cannot delete VMDK volume %s: %v", volPath, err)
// }
}
func TestGetVMName(t *testing.T) {
cfg, ok := configFromEnv()
if !ok {
t.Skipf("No config found in environment")
}
// Create vSphere configuration object
vs, err := newVSphere(cfg)
if err != nil {
t.Fatalf("Failed to construct/authenticate vSphere: %s", err)
}
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Create vSphere client
err = vSphereLogin(ctx, vs)
if err != nil {
t.Errorf("Failed to create vSpere client: %s", err)
}
defer vs.client.Logout(ctx)
// Get VM name
vmName, err := getVMName(vs.client, &cfg)
if err != nil {
t.Fatalf("Failed to get VM name: %s", err)
}
if vmName != "vmname" {
t.Errorf("Expect VM name 'vmname', got: %s", vmName)
}
}

View File

@@ -18,21 +18,23 @@ package vsphere
import (
"context"
"errors"
"io/ioutil"
"os"
"runtime"
"strings"
"time"
"github.com/golang/glog"
"github.com/vmware/govmomi"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25"
"github.com/vmware/govmomi/vim25/mo"
"fmt"
"github.com/vmware/govmomi"
"github.com/vmware/govmomi/find"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/pbm"
"github.com/vmware/govmomi/property"
"github.com/vmware/govmomi/vim25/mo"
"github.com/vmware/govmomi/vim25/types"
pbmtypes "github.com/vmware/govmomi/pbm/types"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers"
)
const (
@@ -42,15 +44,17 @@ const (
VirtualMachine = "VirtualMachine"
)
// Reads vSphere configuration from system environment and construct vSphere object
// GetVSphere reads vSphere configuration from system environment and construct vSphere object
func GetVSphere() (*VSphere, error) {
cfg := getVSphereConfig()
client, err := GetgovmomiClient(cfg)
vSphereConn := getVSphereConn(cfg)
client, err := GetgovmomiClient(vSphereConn)
if err != nil {
return nil, err
}
vSphereConn.GoVmomiClient = client
vs := &VSphere{
client: client,
conn: vSphereConn,
cfg: cfg,
localInstanceID: "",
}
@@ -75,233 +79,217 @@ func getVSphereConfig() *VSphereConfig {
return &cfg
}
func GetgovmomiClient(cfg *VSphereConfig) (*govmomi.Client, error) {
if cfg == nil {
cfg = getVSphereConfig()
func getVSphereConn(cfg *VSphereConfig) *vclib.VSphereConnection {
vSphereConn := &vclib.VSphereConnection{
Username: cfg.Global.User,
Password: cfg.Global.Password,
Hostname: cfg.Global.VCenterIP,
Insecure: cfg.Global.InsecureFlag,
RoundTripperCount: cfg.Global.RoundTripperCount,
Port: cfg.Global.VCenterPort,
}
client, err := newClient(context.TODO(), cfg)
return vSphereConn
}
// GetgovmomiClient gets the goVMOMI client for the vsphere connection object
func GetgovmomiClient(conn *vclib.VSphereConnection) (*govmomi.Client, error) {
if conn == nil {
cfg := getVSphereConfig()
conn = getVSphereConn(cfg)
}
client, err := conn.NewClient(context.TODO())
return client, err
}
// Get placement compatibility result based on storage policy requirements.
func (vs *VSphere) GetPlacementCompatibilityResult(ctx context.Context, pbmClient *pbm.Client, storagePolicyID string) (pbm.PlacementCompatibilityResult, error) {
datastores, err := vs.getSharedDatastoresInK8SCluster(ctx)
// getvmUUID gets the BIOS UUID via the sys interface. This UUID is known by vsphere
func getvmUUID() (string, error) {
id, err := ioutil.ReadFile(UUIDPath)
if err != nil {
return nil, err
return "", fmt.Errorf("error retrieving vm uuid: %s", err)
}
var hubs []pbmtypes.PbmPlacementHub
for _, ds := range datastores {
hubs = append(hubs, pbmtypes.PbmPlacementHub{
HubType: ds.Type,
HubId: ds.Value,
})
uuidFromFile := string(id[:])
//strip leading and trailing white space and new line char
uuid := strings.TrimSpace(uuidFromFile)
// check the uuid starts with "VMware-"
if !strings.HasPrefix(uuid, UUIDPrefix) {
return "", fmt.Errorf("Failed to match Prefix, UUID read from the file is %v", uuidFromFile)
}
req := []pbmtypes.BasePbmPlacementRequirement{
&pbmtypes.PbmPlacementCapabilityProfileRequirement{
ProfileId: pbmtypes.PbmProfileId{
UniqueId: storagePolicyID,
},
},
// Strip the prefix and while spaces and -
uuid = strings.Replace(uuid[len(UUIDPrefix):(len(uuid))], " ", "", -1)
uuid = strings.Replace(uuid, "-", "", -1)
if len(uuid) != 32 {
return "", fmt.Errorf("Length check failed, UUID read from the file is %v", uuidFromFile)
}
res, err := pbmClient.CheckRequirements(ctx, hubs, nil, req)
if err != nil {
return nil, err
}
return res, nil
}
// Verify if the user specified datastore is in the list of non-compatible datastores.
// If yes, return the non compatible datastore reference.
func (vs *VSphere) IsUserSpecifiedDatastoreNonCompatible(ctx context.Context, compatibilityResult pbm.PlacementCompatibilityResult, dsName string) (bool, *types.ManagedObjectReference) {
dsMoList := vs.GetNonCompatibleDatastoresMo(ctx, compatibilityResult)
for _, ds := range dsMoList {
if ds.Info.GetDatastoreInfo().Name == dsName {
dsMoRef := ds.Reference()
return true, &dsMoRef
}
}
return false, nil
}
func GetNonCompatibleDatastoreFaultMsg(compatibilityResult pbm.PlacementCompatibilityResult, dsMoref types.ManagedObjectReference) string {
var faultMsg string
for _, res := range compatibilityResult {
if res.Hub.HubId == dsMoref.Value {
for _, err := range res.Error {
faultMsg = faultMsg + err.LocalizedMessage
}
}
}
return faultMsg
}
// Get the best fit compatible datastore by free space.
func GetMostFreeDatastore(dsMo []mo.Datastore) mo.Datastore {
var curMax int64
curMax = -1
var index int
for i, ds := range dsMo {
dsFreeSpace := ds.Info.GetDatastoreInfo().FreeSpace
if dsFreeSpace > curMax {
curMax = dsFreeSpace
index = i
}
}
return dsMo[index]
}
func (vs *VSphere) GetCompatibleDatastoresMo(ctx context.Context, compatibilityResult pbm.PlacementCompatibilityResult) ([]mo.Datastore, error) {
compatibleHubs := compatibilityResult.CompatibleDatastores()
// Return an error if there are no compatible datastores.
if len(compatibleHubs) < 1 {
return nil, fmt.Errorf("There are no compatible datastores that satisfy the storage policy requirements")
}
dsMoList, err := vs.getDatastoreMo(ctx, compatibleHubs)
if err != nil {
return nil, err
}
return dsMoList, nil
}
func (vs *VSphere) GetNonCompatibleDatastoresMo(ctx context.Context, compatibilityResult pbm.PlacementCompatibilityResult) []mo.Datastore {
nonCompatibleHubs := compatibilityResult.NonCompatibleDatastores()
// Return an error if there are no compatible datastores.
if len(nonCompatibleHubs) < 1 {
return nil
}
dsMoList, err := vs.getDatastoreMo(ctx, nonCompatibleHubs)
if err != nil {
return nil
}
return dsMoList
}
// Get the datastore managed objects for the place hubs using property collector.
func (vs *VSphere) getDatastoreMo(ctx context.Context, hubs []pbmtypes.PbmPlacementHub) ([]mo.Datastore, error) {
var dsMoRefs []types.ManagedObjectReference
for _, hub := range hubs {
dsMoRefs = append(dsMoRefs, types.ManagedObjectReference{
Type: hub.HubType,
Value: hub.HubId,
})
}
pc := property.DefaultCollector(vs.client.Client)
var dsMoList []mo.Datastore
err := pc.Retrieve(ctx, dsMoRefs, []string{DatastoreInfoProperty}, &dsMoList)
if err != nil {
return nil, err
}
return dsMoList, nil
// need to add dashes, e.g. "564d395e-d807-e18a-cb25-b79f65eb2b9f"
uuid = fmt.Sprintf("%s-%s-%s-%s-%s", uuid[0:8], uuid[8:12], uuid[12:16], uuid[16:20], uuid[20:32])
return uuid, nil
}
// Get all datastores accessible for the virtual machine object.
func (vs *VSphere) getSharedDatastoresInK8SCluster(ctx context.Context) ([]types.ManagedObjectReference, error) {
f := find.NewFinder(vs.client.Client, true)
dc, err := f.Datacenter(ctx, vs.cfg.Global.Datacenter)
f.SetDatacenter(dc)
vmFolder, err := f.Folder(ctx, strings.TrimSuffix(vs.cfg.Global.WorkingDir, "/"))
func getSharedDatastoresInK8SCluster(ctx context.Context, folder *vclib.Folder) ([]*vclib.Datastore, error) {
vmList, err := folder.GetVirtualMachines(ctx)
if err != nil {
glog.Errorf("Failed to get virtual machines in the kubernetes cluster: %s, err: %+v", folder.InventoryPath, err)
return nil, err
}
vmMoList, err := vs.GetVMsInsideFolder(ctx, vmFolder, []string{NameProperty})
if err != nil {
return nil, err
if vmList == nil || len(vmList) == 0 {
glog.Errorf("No virtual machines found in the kubernetes cluster: %s", folder.InventoryPath)
return nil, fmt.Errorf("No virtual machines found in the kubernetes cluster: %s", folder.InventoryPath)
}
index := 0
var sharedDs []string
for _, vmMo := range vmMoList {
if !strings.HasPrefix(vmMo.Name, DummyVMPrefixName) {
accessibleDatastores, err := vs.getAllAccessibleDatastores(ctx, vmMo)
var sharedDatastores []*vclib.Datastore
for _, vm := range vmList {
vmName, err := vm.ObjectName(ctx)
if err != nil {
return nil, err
}
if !strings.HasPrefix(vmName, DummyVMPrefixName) {
accessibleDatastores, err := vm.GetAllAccessibleDatastores(ctx)
if err != nil {
return nil, err
}
if index == 0 {
sharedDs = accessibleDatastores
sharedDatastores = accessibleDatastores
} else {
sharedDs = intersect(sharedDs, accessibleDatastores)
if len(sharedDs) == 0 {
return nil, fmt.Errorf("No shared datastores found in the Kubernetes cluster")
sharedDatastores = intersect(sharedDatastores, accessibleDatastores)
if len(sharedDatastores) == 0 {
return nil, fmt.Errorf("No shared datastores found in the Kubernetes cluster: %s", folder.InventoryPath)
}
}
index++
}
}
var sharedDSMorefs []types.ManagedObjectReference
for _, ds := range sharedDs {
sharedDSMorefs = append(sharedDSMorefs, types.ManagedObjectReference{
Value: ds,
Type: "Datastore",
})
}
return sharedDSMorefs, nil
return sharedDatastores, nil
}
func intersect(list1 []string, list2 []string) []string {
var sharedList []string
func intersect(list1 []*vclib.Datastore, list2 []*vclib.Datastore) []*vclib.Datastore {
var sharedDs []*vclib.Datastore
for _, val1 := range list1 {
// Check if val1 is found in list2
for _, val2 := range list2 {
if val1 == val2 {
sharedList = append(sharedList, val1)
if val1.Reference().Value == val2.Reference().Value {
sharedDs = append(sharedDs, val1)
break
}
}
}
return sharedList
}
// Get the VM list inside a folder.
func (vs *VSphere) GetVMsInsideFolder(ctx context.Context, vmFolder *object.Folder, properties []string) ([]mo.VirtualMachine, error) {
vmFolders, err := vmFolder.Children(ctx)
if err != nil {
return nil, err
}
pc := property.DefaultCollector(vs.client.Client)
var vmRefs []types.ManagedObjectReference
var vmMoList []mo.VirtualMachine
for _, vmFolder := range vmFolders {
if vmFolder.Reference().Type == VirtualMachine {
vmRefs = append(vmRefs, vmFolder.Reference())
}
}
err = pc.Retrieve(ctx, vmRefs, properties, &vmMoList)
if err != nil {
return nil, err
}
return vmMoList, nil
return sharedDs
}
// Get the datastores accessible for the virtual machine object.
func (vs *VSphere) getAllAccessibleDatastores(ctx context.Context, vmMo mo.VirtualMachine) ([]string, error) {
f := find.NewFinder(vs.client.Client, true)
dc, err := f.Datacenter(ctx, vs.cfg.Global.Datacenter)
if err != nil {
return nil, err
func getAllAccessibleDatastores(ctx context.Context, client *vim25.Client, vmMo mo.VirtualMachine) ([]string, error) {
host := vmMo.Summary.Runtime.Host
if host == nil {
return nil, errors.New("VM doesn't have a HostSystem")
}
f.SetDatacenter(dc)
vmRegex := vs.cfg.Global.WorkingDir + vmMo.Name
vmObj, err := f.VirtualMachine(ctx, vmRegex)
if err != nil {
return nil, err
}
host, err := vmObj.HostSystem(ctx)
if err != nil {
return nil, err
}
var hostSystemMo mo.HostSystem
s := object.NewSearchIndex(vs.client.Client)
err = s.Properties(ctx, host.Reference(), []string{DatastoreProperty}, &hostSystemMo)
s := object.NewSearchIndex(client)
err := s.Properties(ctx, host.Reference(), []string{DatastoreProperty}, &hostSystemMo)
if err != nil {
return nil, err
}
var dsRefValues []string
for _, dsRef := range hostSystemMo.Datastore {
dsRefValues = append(dsRefValues, dsRef.Value)
}
return dsRefValues, nil
}
// getMostFreeDatastore gets the best fit compatible datastore by free space.
func getMostFreeDatastoreName(ctx context.Context, client *vim25.Client, dsObjList []*vclib.Datastore) (string, error) {
dsMoList, err := dsObjList[0].Datacenter.GetDatastoreMoList(ctx, dsObjList, []string{DatastoreInfoProperty})
if err != nil {
return "", err
}
var curMax int64
curMax = -1
var index int
for i, dsMo := range dsMoList {
dsFreeSpace := dsMo.Info.GetDatastoreInfo().FreeSpace
if dsFreeSpace > curMax {
curMax = dsFreeSpace
index = i
}
}
return dsMoList[index].Info.GetDatastoreInfo().Name, nil
}
func getPbmCompatibleDatastore(ctx context.Context, client *vim25.Client, storagePolicyName string, folder *vclib.Folder) (string, error) {
pbmClient, err := vclib.NewPbmClient(ctx, client)
if err != nil {
return "", err
}
storagePolicyID, err := pbmClient.ProfileIDByName(ctx, storagePolicyName)
if err != nil {
glog.Errorf("Failed to get Profile ID by name: %s. err: %+v", storagePolicyName, err)
return "", err
}
sharedDsList, err := getSharedDatastoresInK8SCluster(ctx, folder)
if err != nil {
glog.Errorf("Failed to get shared datastores from kubernetes cluster: %s. err: %+v", folder.InventoryPath, err)
return "", err
}
compatibleDatastores, _, err := pbmClient.GetCompatibleDatastores(ctx, storagePolicyID, sharedDsList)
if err != nil {
glog.Errorf("Failed to get compatible datastores from datastores : %+v with storagePolicy: %s. err: %+v", sharedDsList, storagePolicyID, err)
return "", err
}
datastore, err := getMostFreeDatastoreName(ctx, client, compatibleDatastores)
if err != nil {
glog.Errorf("Failed to get most free datastore from compatible datastores: %+v. err: %+v", compatibleDatastores, err)
return "", err
}
return datastore, err
}
func (vs *VSphere) setVMOptions(ctx context.Context, dc *vclib.Datacenter) (*vclib.VMOptions, error) {
var vmOptions vclib.VMOptions
vm, err := dc.GetVMByPath(ctx, vs.cfg.Global.WorkingDir+"/"+vs.localInstanceID)
if err != nil {
return nil, err
}
resourcePool, err := vm.GetResourcePool(ctx)
if err != nil {
return nil, err
}
folder, err := dc.GetFolderByPath(ctx, vs.cfg.Global.WorkingDir)
if err != nil {
return nil, err
}
vmOptions.VMFolder = folder
vmOptions.VMResourcePool = resourcePool
return &vmOptions, nil
}
// A background routine which will be responsible for deleting stale dummy VM's.
func (vs *VSphere) cleanUpDummyVMs(dummyVMPrefix string) {
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
for {
time.Sleep(CleanUpDummyVMRoutineInterval * time.Minute)
// Ensure client is logged in and session is valid
err := vs.conn.Connect(ctx)
if err != nil {
glog.V(4).Infof("Failed to connect to VC with err: %+v. Retrying again...", err)
continue
}
dc, err := vclib.GetDatacenter(ctx, vs.conn, vs.cfg.Global.Datacenter)
if err != nil {
glog.V(4).Infof("Failed to get the datacenter: %s from VC. err: %+v", vs.cfg.Global.Datacenter, err)
continue
}
// Get the folder reference for global working directory where the dummy VM needs to be created.
vmFolder, err := dc.GetFolderByPath(ctx, vs.cfg.Global.WorkingDir)
if err != nil {
glog.V(4).Infof("Unable to get the kubernetes folder: %q reference. err: %+v", vs.cfg.Global.WorkingDir, err)
continue
}
// A write lock is acquired to make sure the cleanUp routine doesn't delete any VM's created by ongoing PVC requests.
defer cleanUpDummyVMLock.Lock()
err = diskmanagers.CleanUpDummyVMs(ctx, vmFolder, dc)
if err != nil {
glog.V(4).Infof("Unable to clean up dummy VM's in the kubernetes cluster: %q. err: %+v", vs.cfg.Global.WorkingDir, err)
}
}
}

View File

@@ -19,6 +19,7 @@ go_library(
deps = [
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/vsphere:go_default_library",
"//pkg/cloudprovider/providers/vsphere/vclib:go_default_library",
"//pkg/util/keymutex:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/strings:go_default_library",
@@ -43,7 +44,7 @@ go_test(
library = ":go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/cloudprovider/providers/vsphere:go_default_library",
"//pkg/cloudprovider/providers/vsphere/vclib:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",

View File

@@ -75,7 +75,7 @@ func (attacher *vsphereVMDKAttacher) Attach(spec *volume.Spec, nodeName types.No
// vsphereCloud.AttachDisk checks if disk is already attached to host and
// succeeds in that case, so no need to do that separately.
_, diskUUID, err := attacher.vsphereVolumes.AttachDisk(volumeSource.VolumePath, volumeSource.StoragePolicyID, nodeName)
diskUUID, err := attacher.vsphereVolumes.AttachDisk(volumeSource.VolumePath, volumeSource.StoragePolicyID, nodeName)
if err != nil {
glog.Errorf("Error attaching volume %q to node %q: %+v", volumeSource.VolumePath, nodeName, err)
return "", err

View File

@@ -21,7 +21,7 @@ import (
"testing"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
@@ -233,29 +233,29 @@ type diskIsAttachedCall struct {
ret error
}
func (testcase *testcase) AttachDisk(diskName string, storagePolicyName string, nodeName types.NodeName) (string, string, error) {
func (testcase *testcase) AttachDisk(diskName string, storagePolicyName string, nodeName types.NodeName) (string, error) {
expected := &testcase.attach
if expected.diskName == "" && expected.nodeName == "" {
// testcase.attach looks uninitialized, test did not expect to call
// AttachDisk
testcase.t.Errorf("Unexpected AttachDisk call!")
return "", "", errors.New("Unexpected AttachDisk call!")
return "", errors.New("Unexpected AttachDisk call!")
}
if expected.diskName != diskName {
testcase.t.Errorf("Unexpected AttachDisk call: expected diskName %s, got %s", expected.diskName, diskName)
return "", "", errors.New("Unexpected AttachDisk call: wrong diskName")
return "", errors.New("Unexpected AttachDisk call: wrong diskName")
}
if expected.nodeName != nodeName {
testcase.t.Errorf("Unexpected AttachDisk call: expected nodeName %s, got %s", expected.nodeName, nodeName)
return "", "", errors.New("Unexpected AttachDisk call: wrong nodeName")
return "", errors.New("Unexpected AttachDisk call: wrong nodeName")
}
glog.V(4).Infof("AttachDisk call: %s, %s, returning %q, %v", diskName, nodeName, expected.retDeviceUUID, expected.ret)
return "", expected.retDeviceUUID, expected.ret
return expected.retDeviceUUID, expected.ret
}
func (testcase *testcase) DetachDisk(diskName string, nodeName types.NodeName) error {
@@ -312,7 +312,7 @@ func (testcase *testcase) DisksAreAttached(diskNames []string, nodeName types.No
return nil, errors.New("Not implemented")
}
func (testcase *testcase) CreateVolume(volumeOptions *vsphere.VolumeOptions) (volumePath string, err error) {
func (testcase *testcase) CreateVolume(volumeOptions *vclib.VolumeOptions) (volumePath string, err error) {
return "", errors.New("Not implemented")
}

View File

@@ -27,6 +27,7 @@ import (
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
)
@@ -94,7 +95,7 @@ func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner) (volSpec
// vSphere works with kilobytes, convert to KiB with rounding up
volSizeKB := int(volume.RoundUpSize(volSizeBytes, 1024))
name := volume.GenerateVolumeName(v.options.ClusterName, v.options.PVName, 255)
volumeOptions := &vsphere.VolumeOptions{
volumeOptions := &vclib.VolumeOptions{
CapacityKB: volSizeKB,
Tags: *v.options.CloudTags,
Name: name,
@@ -129,7 +130,7 @@ func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner) (volSpec
if volumeOptions.VSANStorageProfileData != "" {
if volumeOptions.StoragePolicyName != "" {
return nil, fmt.Errorf("Cannot specify storage policy capabilities along with storage policy name. Please specify only one.")
return nil, fmt.Errorf("Cannot specify storage policy capabilities along with storage policy name. Please specify only one")
}
volumeOptions.VSANStorageProfileData = "(" + volumeOptions.VSANStorageProfileData + ")"
}
@@ -141,7 +142,6 @@ func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner) (volSpec
vmDiskPath, err := cloud.CreateVolume(volumeOptions)
if err != nil {
glog.V(2).Infof("Error creating vsphere volume: %v", err)
return nil, err
}
volSpec = &VolumeSpec{

View File

@@ -37,6 +37,7 @@ go_library(
"//pkg/api/v1/helper:go_default_library",
"//pkg/apis/storage/v1/util:go_default_library",
"//pkg/cloudprovider/providers/vsphere:go_default_library",
"//pkg/cloudprovider/providers/vsphere/vclib:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
"//test/e2e/framework:go_default_library",

View File

@@ -102,7 +102,7 @@ var _ = SIGDescribe("PersistentVolumes:vsphere", func() {
By("Creating the Client Pod")
clientPod, err = framework.CreateClientPod(c, ns, pvc)
Expect(err).NotTo(HaveOccurred())
node := types.NodeName(clientPod.Spec.NodeName)
node = types.NodeName(clientPod.Spec.NodeName)
By("Verify disk should be attached to the node")
isAttached, err := verifyVSphereDiskAttached(vsp, volumePath, node)

View File

@@ -33,6 +33,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
"k8s.io/kubernetes/test/e2e/framework"
)
@@ -160,13 +161,13 @@ func getVSpherePersistentVolumeClaimSpec(namespace string, labels map[string]str
}
// function to create vmdk volume
func createVSphereVolume(vsp *vsphere.VSphere, volumeOptions *vsphere.VolumeOptions) (string, error) {
func createVSphereVolume(vsp *vsphere.VSphere, volumeOptions *vclib.VolumeOptions) (string, error) {
var (
volumePath string
err error
)
if volumeOptions == nil {
volumeOptions = new(vsphere.VolumeOptions)
volumeOptions = new(vclib.VolumeOptions)
volumeOptions.CapacityKB = 2097152
volumeOptions.Name = "e2e-vmdk-" + strconv.FormatInt(time.Now().UnixNano(), 10)
}

View File

@@ -22,8 +22,6 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8stype "k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
@@ -48,10 +46,8 @@ import (
var _ = SIGDescribe("vsphere Volume fstype", func() {
f := framework.NewDefaultFramework("volume-fstype")
var (
client clientset.Interface
namespace string
storageclass *storage.StorageClass
pvclaim *v1.PersistentVolumeClaim
client clientset.Interface
namespace string
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
@@ -60,87 +56,56 @@ var _ = SIGDescribe("vsphere Volume fstype", func() {
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(len(nodeList.Items)).NotTo(BeZero(), "Unable to find ready and schedulable Node")
})
AfterEach(func() {
var scDeleteError error
var pvDeleteError error
if storageclass != nil {
scDeleteError = client.StorageV1beta1().StorageClasses().Delete(storageclass.Name, nil)
}
if pvclaim != nil {
pvDeleteError = client.CoreV1().PersistentVolumeClaims(namespace).Delete(pvclaim.Name, nil)
}
framework.ExpectNoError(scDeleteError)
framework.ExpectNoError(pvDeleteError)
storageclass = nil
pvclaim = nil
})
It("verify fstype - ext3 formatted volume", func() {
By("Invoking Test for fstype: ext3")
storageclass, pvclaim = invokeTestForFstype(f, client, namespace, "ext3", "ext3")
invokeTestForFstype(f, client, namespace, "ext3", "ext3")
})
It("verify disk format type - default value should be ext4", func() {
By("Invoking Test for fstype: Default Value")
storageclass, pvclaim = invokeTestForFstype(f, client, namespace, "", "ext4")
invokeTestForFstype(f, client, namespace, "", "ext4")
})
})
func invokeTestForFstype(f *framework.Framework, client clientset.Interface, namespace string, fstype string, expectedContent string) (*storage.StorageClass, *v1.PersistentVolumeClaim) {
func invokeTestForFstype(f *framework.Framework, client clientset.Interface, namespace string, fstype string, expectedContent string) {
framework.Logf("Invoking Test for fstype: %s", fstype)
scParameters := make(map[string]string)
scParameters["fstype"] = fstype
By("Creating Storage Class With Fstype")
storageClassSpec := getVSphereStorageClassSpec("fstype", scParameters)
storageclass, err := client.StorageV1().StorageClasses().Create(storageClassSpec)
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("fstype", scParameters))
Expect(err).NotTo(HaveOccurred())
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the Storage Class")
pvclaimSpec := getVSphereClaimSpecWithStorageClassAnnotation(namespace, storageclass)
pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(pvclaimSpec)
pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(getVSphereClaimSpecWithStorageClassAnnotation(namespace, storageclass))
Expect(err).NotTo(HaveOccurred())
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim)
By("Waiting for claim to be in bound phase")
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
// Get new copy of the claim
pvclaim, err = client.CoreV1().PersistentVolumeClaims(pvclaim.Namespace).Get(pvclaim.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// Get the bound PV
pv, err := client.CoreV1().PersistentVolumes().Get(pvclaim.Spec.VolumeName, metav1.GetOptions{})
persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims)
Expect(err).NotTo(HaveOccurred())
By("Creating pod to attach PV to the node")
// Create pod to attach Volume to Node
podSpec := getVSpherePodSpecWithClaim(pvclaim.Name, nil, "/bin/df -T /mnt/test | /bin/awk 'FNR == 2 {print $2}' > /mnt/test/fstype && while true ; do sleep 2 ; done")
pod, err := client.CoreV1().Pods(namespace).Create(podSpec)
Expect(err).NotTo(HaveOccurred())
By("Waiting for pod to be running")
Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed())
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
pod, err := framework.CreatePod(client, namespace, pvclaims, false, "")
Expect(err).NotTo(HaveOccurred())
// Asserts: Right disk is attached to the pod
vsp, err := vsphere.GetVSphere()
Expect(err).NotTo(HaveOccurred())
isAttached, err := verifyVSphereDiskAttached(vsp, pv.Spec.VsphereVolume.VolumePath, k8stype.NodeName(pod.Spec.NodeName))
Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), "disk is not attached with the node")
By("Verify the volume is accessible and available in the pod")
verifyVSphereVolumesAccessible(pod, persistentvolumes, vsp)
_, err = framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/cat", "/mnt/test/fstype"}, expectedContent, time.Minute)
Expect(err).NotTo(HaveOccurred())
var volumePaths []string
volumePaths = append(volumePaths, pv.Spec.VsphereVolume.VolumePath)
By("Deleting pod")
framework.DeletePodWithWait(f, client, pod)
By("Delete pod and wait for volume to be detached from node")
deletePodAndWaitForVolumeToDetach(f, client, pod, vsp, pod.Spec.NodeName, volumePaths)
return storageclass, pvclaim
By("Waiting for volumes to be detached from the node")
waitForVSphereDiskToDetach(vsp, persistentvolumes[0].Spec.VsphereVolume.VolumePath, k8stype.NodeName(pod.Spec.NodeName))
}

View File

@@ -29,6 +29,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
"k8s.io/kubernetes/test/e2e/framework"
)
@@ -216,8 +217,8 @@ var _ = SIGDescribe("Volume Placement", func() {
*/
It("should create and delete pod with multiple volumes from different datastore", func() {
By("creating another vmdk on non default shared datastore")
var volumeOptions *vsphere.VolumeOptions
volumeOptions = new(vsphere.VolumeOptions)
var volumeOptions *vclib.VolumeOptions
volumeOptions = new(vclib.VolumeOptions)
volumeOptions.CapacityKB = 2097152
volumeOptions.Name = "e2e-vmdk-" + strconv.FormatInt(time.Now().UnixNano(), 10)
volumeOptions.Datastore = os.Getenv("VSPHERE_SECOND_SHARED_DATASTORE")

View File

@@ -218,7 +218,7 @@ var _ = SIGDescribe("vSphere Storage policy support for dynamic provisioning", f
framework.Logf("Invoking Test for SPBM storage policy on a non-compatible datastore: %+v", scParameters)
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
Expect(err).To(HaveOccurred())
errorMsg := "User specified datastore: \\\"" + VsanDatastore + "\\\" is not compatible with the storagePolicy: \\\"" + os.Getenv("VSPHERE_SPBM_TAG_POLICY") + "\\\""
errorMsg := "User specified datastore is not compatible with the storagePolicy: \\\"" + os.Getenv("VSPHERE_SPBM_TAG_POLICY") + "\\\""
if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg)
}
@@ -248,7 +248,7 @@ var _ = SIGDescribe("vSphere Storage policy support for dynamic provisioning", f
framework.Logf("Invoking Test for SPBM storage policy and VSAN capabilities together: %+v", scParameters)
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
Expect(err).To(HaveOccurred())
errorMsg := "Cannot specify storage policy capabilities along with storage policy name. Please specify only one."
errorMsg := "Cannot specify storage policy capabilities along with storage policy name. Please specify only one"
if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg)
}