staging the vsphere cloud provider under k8s.io/legacy-cloud-providers/vsphere

Signed-off-by: Andrew Sy Kim <kiman@vmware.com>
This commit is contained in:
Andrew Sy Kim
2019-03-28 16:58:15 -04:00
parent bf22bbdead
commit a58942625d
68 changed files with 455 additions and 43 deletions

View File

@@ -1,9 +1,6 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
@@ -23,7 +20,7 @@ go_library(
"//pkg/cloudprovider/providers/openstack:go_default_library",
"//pkg/cloudprovider/providers/ovirt:go_default_library",
"//pkg/cloudprovider/providers/photon:go_default_library",
"//pkg/cloudprovider/providers/vsphere:go_default_library",
"//staging/src/k8s.io/legacy-cloud-providers/vsphere:go_default_library",
],
)
@@ -46,7 +43,13 @@ filegroup(
"//pkg/cloudprovider/providers/openstack:all-srcs",
"//pkg/cloudprovider/providers/ovirt:all-srcs",
"//pkg/cloudprovider/providers/photon:all-srcs",
"//pkg/cloudprovider/providers/vsphere:all-srcs",
],
tags = ["automanaged"],
)
go_test(
name = "go_default_test",
srcs = ["providers_test.go"],
embed = [":go_default_library"],
deps = ["//staging/src/k8s.io/legacy-cloud-providers/vsphere/testing:go_default_library"],
)

View File

@@ -25,5 +25,5 @@ import (
_ "k8s.io/kubernetes/pkg/cloudprovider/providers/openstack"
_ "k8s.io/kubernetes/pkg/cloudprovider/providers/ovirt"
_ "k8s.io/kubernetes/pkg/cloudprovider/providers/photon"
_ "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
_ "k8s.io/legacy-cloud-providers/vsphere"
)

View File

@@ -1,7 +1,5 @@
// +build !windows,!linux
/*
Copyright 2018 The Kubernetes Authors.
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -16,10 +14,10 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
package cloudprovider
import "fmt"
func getRawUUID() (string, error) {
return "", fmt.Errorf("Retrieving VM UUID on this build is not implemented.")
}
import (
// transitive test dependencies are not vendored by go modules
// so we have to explicitly import them here
_ "k8s.io/legacy-cloud-providers/vsphere/testing"
)

View File

@@ -1,96 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"credentialmanager.go",
"nodemanager.go",
"vsphere.go",
"vsphere_util.go",
"vsphere_util_linux.go",
"vsphere_util_unsupported.go",
"vsphere_util_windows.go",
],
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere",
deps = [
"//pkg/cloudprovider/providers/vsphere/vclib:go_default_library",
"//pkg/cloudprovider/providers/vsphere/vclib/diskmanagers:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/cloud-provider:go_default_library",
"//staging/src/k8s.io/cloud-provider/node/helpers:go_default_library",
"//staging/src/k8s.io/cloud-provider/volume/helpers:go_default_library",
"//vendor/github.com/vmware/govmomi/object:go_default_library",
"//vendor/github.com/vmware/govmomi/property:go_default_library",
"//vendor/github.com/vmware/govmomi/vapi/rest:go_default_library",
"//vendor/github.com/vmware/govmomi/vapi/tags:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25/mo:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25/soap:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25/types:go_default_library",
"//vendor/gopkg.in/gcfg.v1:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"credentialmanager_test.go",
"vsphere_test.go",
"vsphere_util_test.go",
],
embed = [":go_default_library"],
deps = [
"//pkg/cloudprovider/providers/vsphere/vclib:go_default_library",
"//pkg/cloudprovider/providers/vsphere/vclib/fixtures:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/cloud-provider:go_default_library",
"//vendor/github.com/vmware/govmomi:go_default_library",
"//vendor/github.com/vmware/govmomi/find:go_default_library",
"//vendor/github.com/vmware/govmomi/lookup/simulator:go_default_library",
"//vendor/github.com/vmware/govmomi/property:go_default_library",
"//vendor/github.com/vmware/govmomi/simulator:go_default_library",
"//vendor/github.com/vmware/govmomi/simulator/vpx:go_default_library",
"//vendor/github.com/vmware/govmomi/sts/simulator:go_default_library",
"//vendor/github.com/vmware/govmomi/vapi/rest:go_default_library",
"//vendor/github.com/vmware/govmomi/vapi/simulator:go_default_library",
"//vendor/github.com/vmware/govmomi/vapi/tags:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25/mo:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25/types:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/cloudprovider/providers/vsphere/vclib:all-srcs",
],
tags = ["automanaged"],
)

View File

@@ -1,10 +0,0 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- abrarshivani
- baludontu
- divyenpatel
- imkin
- frapposelli
- dougm
- SandeepPissay

View File

@@ -1,164 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"errors"
"fmt"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/client-go/listers/core/v1"
"k8s.io/klog"
"net/http"
"strings"
"sync"
)
// Error Messages
const (
CredentialsNotFoundErrMsg = "Credentials not found"
CredentialMissingErrMsg = "Username/Password is missing"
UnknownSecretKeyErrMsg = "Unknown secret key"
)
// Error constants
var (
ErrCredentialsNotFound = errors.New(CredentialsNotFoundErrMsg)
ErrCredentialMissing = errors.New(CredentialMissingErrMsg)
ErrUnknownSecretKey = errors.New(UnknownSecretKeyErrMsg)
)
type SecretCache struct {
cacheLock sync.Mutex
VirtualCenter map[string]*Credential
Secret *corev1.Secret
}
type Credential struct {
User string `gcfg:"user"`
Password string `gcfg:"password"`
}
type SecretCredentialManager struct {
SecretName string
SecretNamespace string
SecretLister v1.SecretLister
Cache *SecretCache
}
// GetCredential returns credentials for the given vCenter Server.
// GetCredential returns error if Secret is not added.
// GetCredential return error is the secret doesn't contain any credentials.
func (secretCredentialManager *SecretCredentialManager) GetCredential(server string) (*Credential, error) {
err := secretCredentialManager.updateCredentialsMap()
if err != nil {
statusErr, ok := err.(*apierrors.StatusError)
if (ok && statusErr.ErrStatus.Code != http.StatusNotFound) || !ok {
return nil, err
}
// Handle secrets deletion by finding credentials from cache
klog.Warningf("secret %q not found in namespace %q", secretCredentialManager.SecretName, secretCredentialManager.SecretNamespace)
}
credential, found := secretCredentialManager.Cache.GetCredential(server)
if !found {
klog.Errorf("credentials not found for server %q", server)
return nil, ErrCredentialsNotFound
}
return &credential, nil
}
func (secretCredentialManager *SecretCredentialManager) updateCredentialsMap() error {
if secretCredentialManager.SecretLister == nil {
return fmt.Errorf("SecretLister is not initialized")
}
secret, err := secretCredentialManager.SecretLister.Secrets(secretCredentialManager.SecretNamespace).Get(secretCredentialManager.SecretName)
if err != nil {
klog.Errorf("Cannot get secret %s in namespace %s. error: %q", secretCredentialManager.SecretName, secretCredentialManager.SecretNamespace, err)
return err
}
cacheSecret := secretCredentialManager.Cache.GetSecret()
if cacheSecret != nil &&
cacheSecret.GetResourceVersion() == secret.GetResourceVersion() {
klog.V(4).Infof("VCP SecretCredentialManager: Secret %q will not be updated in cache. Since, secrets have same resource version %q", secretCredentialManager.SecretName, cacheSecret.GetResourceVersion())
return nil
}
secretCredentialManager.Cache.UpdateSecret(secret)
return secretCredentialManager.Cache.parseSecret()
}
func (cache *SecretCache) GetSecret() *corev1.Secret {
cache.cacheLock.Lock()
defer cache.cacheLock.Unlock()
return cache.Secret
}
func (cache *SecretCache) UpdateSecret(secret *corev1.Secret) {
cache.cacheLock.Lock()
defer cache.cacheLock.Unlock()
cache.Secret = secret
}
func (cache *SecretCache) GetCredential(server string) (Credential, bool) {
cache.cacheLock.Lock()
defer cache.cacheLock.Unlock()
credential, found := cache.VirtualCenter[server]
if !found {
return Credential{}, found
}
return *credential, found
}
func (cache *SecretCache) parseSecret() error {
cache.cacheLock.Lock()
defer cache.cacheLock.Unlock()
return parseConfig(cache.Secret.Data, cache.VirtualCenter)
}
// parseConfig returns vCenter ip/fdqn mapping to its credentials viz. Username and Password.
func parseConfig(data map[string][]byte, config map[string]*Credential) error {
if len(data) == 0 {
return ErrCredentialMissing
}
for credentialKey, credentialValue := range data {
credentialKey = strings.ToLower(credentialKey)
vcServer := ""
if strings.HasSuffix(credentialKey, "password") {
vcServer = strings.Split(credentialKey, ".password")[0]
if _, ok := config[vcServer]; !ok {
config[vcServer] = &Credential{}
}
config[vcServer].Password = string(credentialValue)
} else if strings.HasSuffix(credentialKey, "username") {
vcServer = strings.Split(credentialKey, ".username")[0]
if _, ok := config[vcServer]; !ok {
config[vcServer] = &Credential{}
}
config[vcServer].User = string(credentialValue)
} else {
klog.Errorf("Unknown secret key %s", credentialKey)
return ErrUnknownSecretKey
}
}
for vcServer, credential := range config {
if credential.User == "" || credential.Password == "" {
klog.Errorf("Username/Password is missing for server %s", vcServer)
return ErrCredentialMissing
}
}
return nil
}

View File

@@ -1,341 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"reflect"
"testing"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
)
func TestSecretCredentialManager_GetCredential(t *testing.T) {
var (
userKey = "username"
passwordKey = "password"
testUser = "user"
testPassword = "password"
testServer = "0.0.0.0"
testServer2 = "0.0.1.1"
testUserServer2 = "user1"
testPasswordServer2 = "password1"
testIncorrectServer = "1.1.1.1"
)
var (
secretName = "vsconf"
secretNamespace = "kube-system"
)
var (
addSecretOp = "ADD_SECRET_OP"
getCredentialsOp = "GET_CREDENTIAL_OP"
deleteSecretOp = "DELETE_SECRET_OP"
)
type GetCredentialsTest struct {
server string
username string
password string
err error
}
type OpSecretTest struct {
secret *corev1.Secret
}
type testEnv struct {
testName string
ops []string
expectedValues []interface{}
}
client := &fake.Clientset{}
metaObj := metav1.ObjectMeta{
Name: secretName,
Namespace: secretNamespace,
}
defaultSecret := &corev1.Secret{
ObjectMeta: metaObj,
Data: map[string][]byte{
testServer + "." + userKey: []byte(testUser),
testServer + "." + passwordKey: []byte(testPassword),
},
}
multiVCSecret := &corev1.Secret{
ObjectMeta: metaObj,
Data: map[string][]byte{
testServer + "." + userKey: []byte(testUser),
testServer + "." + passwordKey: []byte(testPassword),
testServer2 + "." + userKey: []byte(testUserServer2),
testServer2 + "." + passwordKey: []byte(testPasswordServer2),
},
}
emptySecret := &corev1.Secret{
ObjectMeta: metaObj,
Data: map[string][]byte{},
}
tests := []testEnv{
{
testName: "Deleting secret should give the credentials from cache",
ops: []string{addSecretOp, getCredentialsOp, deleteSecretOp, getCredentialsOp},
expectedValues: []interface{}{
OpSecretTest{
secret: defaultSecret,
},
GetCredentialsTest{
username: testUser,
password: testPassword,
server: testServer,
},
OpSecretTest{
secret: defaultSecret,
},
GetCredentialsTest{
username: testUser,
password: testPassword,
server: testServer,
},
},
},
{
testName: "Add secret and get credentials",
ops: []string{addSecretOp, getCredentialsOp},
expectedValues: []interface{}{
OpSecretTest{
secret: defaultSecret,
},
GetCredentialsTest{
username: testUser,
password: testPassword,
server: testServer,
},
},
},
{
testName: "Getcredentials should fail by not adding at secret at first time",
ops: []string{getCredentialsOp},
expectedValues: []interface{}{
GetCredentialsTest{
username: testUser,
password: testPassword,
server: testServer,
err: ErrCredentialsNotFound,
},
},
},
{
testName: "GetCredential should fail to get credentials from empty secrets",
ops: []string{addSecretOp, getCredentialsOp},
expectedValues: []interface{}{
OpSecretTest{
secret: emptySecret,
},
GetCredentialsTest{
server: testServer,
err: ErrCredentialMissing,
},
},
},
{
testName: "GetCredential should fail to get credentials for invalid server",
ops: []string{addSecretOp, getCredentialsOp},
expectedValues: []interface{}{
OpSecretTest{
secret: defaultSecret,
},
GetCredentialsTest{
server: testIncorrectServer,
err: ErrCredentialsNotFound,
},
},
},
{
testName: "GetCredential for multi-vc",
ops: []string{addSecretOp, getCredentialsOp},
expectedValues: []interface{}{
OpSecretTest{
secret: multiVCSecret,
},
GetCredentialsTest{
server: testServer2,
username: testUserServer2,
password: testPasswordServer2,
},
},
},
}
// TODO: replace 0 with NoResyncPeriodFunc() once it moved out pkg/controller/controller_utils.go in k/k.
informerFactory := informers.NewSharedInformerFactory(client, 0)
secretInformer := informerFactory.Core().V1().Secrets()
secretCredentialManager := &SecretCredentialManager{
SecretName: secretName,
SecretNamespace: secretNamespace,
SecretLister: secretInformer.Lister(),
Cache: &SecretCache{
VirtualCenter: make(map[string]*Credential),
},
}
cleanupSecretCredentialManager := func() {
secretCredentialManager.Cache.Secret = nil
for key := range secretCredentialManager.Cache.VirtualCenter {
delete(secretCredentialManager.Cache.VirtualCenter, key)
}
secrets, err := secretCredentialManager.SecretLister.List(labels.Everything())
if err != nil {
t.Fatal("Failed to get all secrets from sharedInformer. error: ", err)
}
for _, secret := range secrets {
err := secretInformer.Informer().GetIndexer().Delete(secret)
if err != nil {
t.Fatalf("Failed to delete secret from informer: %v", err)
}
}
}
for _, test := range tests {
t.Logf("Executing Testcase: %s", test.testName)
for ntest, op := range test.ops {
switch op {
case addSecretOp:
expected := test.expectedValues[ntest].(OpSecretTest)
t.Logf("Adding secret: %s", expected.secret)
err := secretInformer.Informer().GetIndexer().Add(expected.secret)
if err != nil {
t.Fatalf("Failed to add secret to internal cache: %v", err)
}
case getCredentialsOp:
expected := test.expectedValues[ntest].(GetCredentialsTest)
credential, err := secretCredentialManager.GetCredential(expected.server)
t.Logf("Retrieving credentials for server %s", expected.server)
if err != expected.err {
t.Fatalf("Fail to get credentials with error: %v", err)
}
if expected.err == nil {
if expected.username != credential.User ||
expected.password != credential.Password {
t.Fatalf("Received credentials %v "+
"are different than actual credential user:%s password:%s", credential, expected.username,
expected.password)
}
}
case deleteSecretOp:
expected := test.expectedValues[ntest].(OpSecretTest)
t.Logf("Deleting secret: %s", expected.secret)
err := secretInformer.Informer().GetIndexer().Delete(expected.secret)
if err != nil {
t.Fatalf("Failed to delete secret to internal cache: %v", err)
}
}
}
cleanupSecretCredentialManager()
}
}
func TestParseSecretConfig(t *testing.T) {
var (
testUsername = "Admin"
testPassword = "Password"
testIP = "10.20.30.40"
)
var testcases = []struct {
testName string
data map[string][]byte
config map[string]*Credential
expectedError error
}{
{
testName: "Valid username and password",
data: map[string][]byte{
"10.20.30.40.username": []byte(testUsername),
"10.20.30.40.password": []byte(testPassword),
},
config: map[string]*Credential{
testIP: {
User: testUsername,
Password: testPassword,
},
},
expectedError: nil,
},
{
testName: "Invalid username key with valid password key",
data: map[string][]byte{
"10.20.30.40.usernam": []byte(testUsername),
"10.20.30.40.password": []byte(testPassword),
},
config: nil,
expectedError: ErrUnknownSecretKey,
},
{
testName: "Missing username",
data: map[string][]byte{
"10.20.30.40.password": []byte(testPassword),
},
config: map[string]*Credential{
testIP: {
Password: testPassword,
},
},
expectedError: ErrCredentialMissing,
},
{
testName: "Missing password",
data: map[string][]byte{
"10.20.30.40.username": []byte(testUsername),
},
config: map[string]*Credential{
testIP: {
User: testUsername,
},
},
expectedError: ErrCredentialMissing,
},
{
testName: "IP with unknown key",
data: map[string][]byte{
"10.20.30.40": []byte(testUsername),
},
config: nil,
expectedError: ErrUnknownSecretKey,
},
}
resultConfig := make(map[string]*Credential)
cleanupResultConfig := func(config map[string]*Credential) {
for k := range config {
delete(config, k)
}
}
for _, testcase := range testcases {
err := parseConfig(testcase.data, resultConfig)
t.Logf("Executing Testcase: %s", testcase.testName)
if err != testcase.expectedError {
t.Fatalf("Parsing Secret failed for data %+v: %s", testcase.data, err)
}
if testcase.config != nil && !reflect.DeepEqual(testcase.config, resultConfig) {
t.Fatalf("Parsing Secret failed for data %+v expected config %+v and actual config %+v",
testcase.data, resultConfig, testcase.config)
}
cleanupResultConfig(resultConfig)
}
}

View File

@@ -1,478 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"context"
"fmt"
"strings"
"sync"
"github.com/vmware/govmomi/object"
"k8s.io/api/core/v1"
k8stypes "k8s.io/apimachinery/pkg/types"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
)
// Stores info about the kubernetes node
type NodeInfo struct {
dataCenter *vclib.Datacenter
vm *vclib.VirtualMachine
vcServer string
vmUUID string
zone *cloudprovider.Zone
}
type NodeManager struct {
// TODO: replace map with concurrent map when k8s supports go v1.9
// Maps the VC server to VSphereInstance
vsphereInstanceMap map[string]*VSphereInstance
// Maps node name to node info.
nodeInfoMap map[string]*NodeInfo
// Maps node name to node structure
registeredNodes map[string]*v1.Node
//CredentialsManager
credentialManager *SecretCredentialManager
// Mutexes
registeredNodesLock sync.RWMutex
nodeInfoLock sync.RWMutex
credentialManagerLock sync.Mutex
}
type NodeDetails struct {
NodeName string
vm *vclib.VirtualMachine
VMUUID string
Zone *cloudprovider.Zone
}
// TODO: Make it configurable in vsphere.conf
const (
POOL_SIZE = 8
QUEUE_SIZE = POOL_SIZE * 10
)
func (nm *NodeManager) DiscoverNode(node *v1.Node) error {
type VmSearch struct {
vc string
datacenter *vclib.Datacenter
}
var mutex = &sync.Mutex{}
var globalErrMutex = &sync.Mutex{}
var queueChannel chan *VmSearch
var wg sync.WaitGroup
var globalErr *error
queueChannel = make(chan *VmSearch, QUEUE_SIZE)
nodeUUID, err := GetNodeUUID(node)
if err != nil {
klog.Errorf("Node Discovery failed to get node uuid for node %s with error: %v", node.Name, err)
return err
}
klog.V(4).Infof("Discovering node %s with uuid %s", node.ObjectMeta.Name, nodeUUID)
vmFound := false
globalErr = nil
setGlobalErr := func(err error) {
globalErrMutex.Lock()
globalErr = &err
globalErrMutex.Unlock()
}
setVMFound := func(found bool) {
mutex.Lock()
vmFound = found
mutex.Unlock()
}
getVMFound := func() bool {
mutex.Lock()
found := vmFound
mutex.Unlock()
return found
}
go func() {
var datacenterObjs []*vclib.Datacenter
for vc, vsi := range nm.vsphereInstanceMap {
found := getVMFound()
if found == true {
break
}
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
err := nm.vcConnect(ctx, vsi)
if err != nil {
klog.V(4).Info("Discovering node error vc:", err)
setGlobalErr(err)
continue
}
if vsi.cfg.Datacenters == "" {
datacenterObjs, err = vclib.GetAllDatacenter(ctx, vsi.conn)
if err != nil {
klog.V(4).Info("Discovering node error dc:", err)
setGlobalErr(err)
continue
}
} else {
datacenters := strings.Split(vsi.cfg.Datacenters, ",")
for _, dc := range datacenters {
dc = strings.TrimSpace(dc)
if dc == "" {
continue
}
datacenterObj, err := vclib.GetDatacenter(ctx, vsi.conn, dc)
if err != nil {
klog.V(4).Info("Discovering node error dc:", err)
setGlobalErr(err)
continue
}
datacenterObjs = append(datacenterObjs, datacenterObj)
}
}
for _, datacenterObj := range datacenterObjs {
found := getVMFound()
if found == true {
break
}
klog.V(4).Infof("Finding node %s in vc=%s and datacenter=%s", node.Name, vc, datacenterObj.Name())
queueChannel <- &VmSearch{
vc: vc,
datacenter: datacenterObj,
}
}
}
close(queueChannel)
}()
for i := 0; i < POOL_SIZE; i++ {
go func() {
for res := range queueChannel {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
vm, err := res.datacenter.GetVMByUUID(ctx, nodeUUID)
if err != nil {
klog.V(4).Infof("Error while looking for vm=%+v in vc=%s and datacenter=%s: %v",
vm, res.vc, res.datacenter.Name(), err)
if err != vclib.ErrNoVMFound {
setGlobalErr(err)
} else {
klog.V(4).Infof("Did not find node %s in vc=%s and datacenter=%s",
node.Name, res.vc, res.datacenter.Name())
}
continue
}
if vm != nil {
klog.V(4).Infof("Found node %s as vm=%+v in vc=%s and datacenter=%s",
node.Name, vm, res.vc, res.datacenter.Name())
// Get the node zone information
nodeFd := node.ObjectMeta.Labels[v1.LabelZoneFailureDomain]
nodeRegion := node.ObjectMeta.Labels[v1.LabelZoneRegion]
nodeZone := &cloudprovider.Zone{FailureDomain: nodeFd, Region: nodeRegion}
nodeInfo := &NodeInfo{dataCenter: res.datacenter, vm: vm, vcServer: res.vc, vmUUID: nodeUUID, zone: nodeZone}
nm.addNodeInfo(node.ObjectMeta.Name, nodeInfo)
for range queueChannel {
}
setVMFound(true)
break
}
}
wg.Done()
}()
wg.Add(1)
}
wg.Wait()
if vmFound {
return nil
}
if globalErr != nil {
return *globalErr
}
klog.V(4).Infof("Discovery Node: %q vm not found", node.Name)
return vclib.ErrNoVMFound
}
func (nm *NodeManager) RegisterNode(node *v1.Node) error {
nm.addNode(node)
return nm.DiscoverNode(node)
}
func (nm *NodeManager) UnRegisterNode(node *v1.Node) error {
nm.removeNode(node)
return nil
}
func (nm *NodeManager) RediscoverNode(nodeName k8stypes.NodeName) error {
node, err := nm.GetNode(nodeName)
if err != nil {
return err
}
return nm.DiscoverNode(&node)
}
func (nm *NodeManager) GetNode(nodeName k8stypes.NodeName) (v1.Node, error) {
nm.registeredNodesLock.RLock()
node := nm.registeredNodes[convertToString(nodeName)]
nm.registeredNodesLock.RUnlock()
if node == nil {
return v1.Node{}, vclib.ErrNoVMFound
}
return *node, nil
}
func (nm *NodeManager) addNode(node *v1.Node) {
nm.registeredNodesLock.Lock()
nm.registeredNodes[node.ObjectMeta.Name] = node
nm.registeredNodesLock.Unlock()
}
func (nm *NodeManager) removeNode(node *v1.Node) {
nm.registeredNodesLock.Lock()
delete(nm.registeredNodes, node.ObjectMeta.Name)
nm.registeredNodesLock.Unlock()
nm.nodeInfoLock.Lock()
delete(nm.nodeInfoMap, node.ObjectMeta.Name)
nm.nodeInfoLock.Unlock()
}
// GetNodeInfo returns a NodeInfo which datacenter, vm and vc server ip address.
// This method returns an error if it is unable find node VCs and DCs listed in vSphere.conf
// NodeInfo returned may not be updated to reflect current VM location.
//
// This method is a getter but it can cause side-effect of updating NodeInfo object.
func (nm *NodeManager) GetNodeInfo(nodeName k8stypes.NodeName) (NodeInfo, error) {
getNodeInfo := func(nodeName k8stypes.NodeName) *NodeInfo {
nm.nodeInfoLock.RLock()
nodeInfo := nm.nodeInfoMap[convertToString(nodeName)]
nm.nodeInfoLock.RUnlock()
return nodeInfo
}
nodeInfo := getNodeInfo(nodeName)
var err error
if nodeInfo == nil {
// Rediscover node if no NodeInfo found.
klog.V(4).Infof("No VM found for node %q. Initiating rediscovery.", convertToString(nodeName))
err = nm.RediscoverNode(nodeName)
if err != nil {
klog.Errorf("Error %q node info for node %q not found", err, convertToString(nodeName))
return NodeInfo{}, err
}
nodeInfo = getNodeInfo(nodeName)
} else {
// Renew the found NodeInfo to avoid stale vSphere connection.
klog.V(4).Infof("Renewing NodeInfo %+v for node %q", nodeInfo, convertToString(nodeName))
nodeInfo, err = nm.renewNodeInfo(nodeInfo, true)
if err != nil {
klog.Errorf("Error %q occurred while renewing NodeInfo for %q", err, convertToString(nodeName))
return NodeInfo{}, err
}
nm.addNodeInfo(convertToString(nodeName), nodeInfo)
}
return *nodeInfo, nil
}
// GetNodeDetails returns NodeDetails for all the discovered nodes.
//
// This method is a getter but it can cause side-effect of updating NodeInfo objects.
func (nm *NodeManager) GetNodeDetails() ([]NodeDetails, error) {
nm.registeredNodesLock.Lock()
defer nm.registeredNodesLock.Unlock()
var nodeDetails []NodeDetails
for nodeName, nodeObj := range nm.registeredNodes {
nodeInfo, err := nm.GetNodeInfoWithNodeObject(nodeObj)
if err != nil {
return nil, err
}
klog.V(4).Infof("Updated NodeInfo %v for node %q.", nodeInfo, nodeName)
nodeDetails = append(nodeDetails, NodeDetails{nodeName, nodeInfo.vm, nodeInfo.vmUUID, nodeInfo.zone})
}
return nodeDetails, nil
}
func (nm *NodeManager) addNodeInfo(nodeName string, nodeInfo *NodeInfo) {
nm.nodeInfoLock.Lock()
nm.nodeInfoMap[nodeName] = nodeInfo
nm.nodeInfoLock.Unlock()
}
func (nm *NodeManager) GetVSphereInstance(nodeName k8stypes.NodeName) (VSphereInstance, error) {
nodeInfo, err := nm.GetNodeInfo(nodeName)
if err != nil {
klog.V(4).Infof("node info for node %q not found", convertToString(nodeName))
return VSphereInstance{}, err
}
vsphereInstance := nm.vsphereInstanceMap[nodeInfo.vcServer]
if vsphereInstance == nil {
return VSphereInstance{}, fmt.Errorf("vSphereInstance for vc server %q not found while looking for node %q", nodeInfo.vcServer, convertToString(nodeName))
}
return *vsphereInstance, nil
}
// renewNodeInfo renews vSphere connection, VirtualMachine and Datacenter for NodeInfo instance.
func (nm *NodeManager) renewNodeInfo(nodeInfo *NodeInfo, reconnect bool) (*NodeInfo, error) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
vsphereInstance := nm.vsphereInstanceMap[nodeInfo.vcServer]
if vsphereInstance == nil {
err := fmt.Errorf("vSphereInstance for vSphere %q not found while refershing NodeInfo for VM %q", nodeInfo.vcServer, nodeInfo.vm)
return nil, err
}
if reconnect {
err := nm.vcConnect(ctx, vsphereInstance)
if err != nil {
return nil, err
}
}
vm := nodeInfo.vm.RenewVM(vsphereInstance.conn.Client)
return &NodeInfo{
vm: &vm,
dataCenter: vm.Datacenter,
vcServer: nodeInfo.vcServer,
vmUUID: nodeInfo.vmUUID,
zone: nodeInfo.zone,
}, nil
}
func (nodeInfo *NodeInfo) VM() *vclib.VirtualMachine {
if nodeInfo == nil {
return nil
}
return nodeInfo.vm
}
// vcConnect connects to vCenter with existing credentials
// If credentials are invalid:
// 1. It will fetch credentials from credentialManager
// 2. Update the credentials
// 3. Connects again to vCenter with fetched credentials
func (nm *NodeManager) vcConnect(ctx context.Context, vsphereInstance *VSphereInstance) error {
err := vsphereInstance.conn.Connect(ctx)
if err == nil {
return nil
}
credentialManager := nm.CredentialManager()
if !vclib.IsInvalidCredentialsError(err) || credentialManager == nil {
klog.Errorf("Cannot connect to vCenter with err: %v", err)
return err
}
klog.V(4).Infof("Invalid credentials. Cannot connect to server %q. Fetching credentials from secrets.", vsphereInstance.conn.Hostname)
// Get latest credentials from SecretCredentialManager
credentials, err := credentialManager.GetCredential(vsphereInstance.conn.Hostname)
if err != nil {
klog.Errorf("Failed to get credentials from Secret Credential Manager with err: %v", err)
return err
}
vsphereInstance.conn.UpdateCredentials(credentials.User, credentials.Password)
return vsphereInstance.conn.Connect(ctx)
}
// GetNodeInfoWithNodeObject returns a NodeInfo which datacenter, vm and vc server ip address.
// This method returns an error if it is unable find node VCs and DCs listed in vSphere.conf
// NodeInfo returned may not be updated to reflect current VM location.
//
// This method is a getter but it can cause side-effect of updating NodeInfo object.
func (nm *NodeManager) GetNodeInfoWithNodeObject(node *v1.Node) (NodeInfo, error) {
nodeName := node.Name
getNodeInfo := func(nodeName string) *NodeInfo {
nm.nodeInfoLock.RLock()
nodeInfo := nm.nodeInfoMap[nodeName]
nm.nodeInfoLock.RUnlock()
return nodeInfo
}
nodeInfo := getNodeInfo(nodeName)
var err error
if nodeInfo == nil {
// Rediscover node if no NodeInfo found.
klog.V(4).Infof("No VM found for node %q. Initiating rediscovery.", nodeName)
err = nm.DiscoverNode(node)
if err != nil {
klog.Errorf("Error %q node info for node %q not found", err, nodeName)
return NodeInfo{}, err
}
nodeInfo = getNodeInfo(nodeName)
} else {
// Renew the found NodeInfo to avoid stale vSphere connection.
klog.V(4).Infof("Renewing NodeInfo %+v for node %q", nodeInfo, nodeName)
nodeInfo, err = nm.renewNodeInfo(nodeInfo, true)
if err != nil {
klog.Errorf("Error %q occurred while renewing NodeInfo for %q", err, nodeName)
return NodeInfo{}, err
}
nm.addNodeInfo(nodeName, nodeInfo)
}
return *nodeInfo, nil
}
func (nm *NodeManager) CredentialManager() *SecretCredentialManager {
nm.credentialManagerLock.Lock()
defer nm.credentialManagerLock.Unlock()
return nm.credentialManager
}
func (nm *NodeManager) UpdateCredentialManager(credentialManager *SecretCredentialManager) {
nm.credentialManagerLock.Lock()
defer nm.credentialManagerLock.Unlock()
nm.credentialManager = credentialManager
}
func (nm *NodeManager) GetHostsInZone(ctx context.Context, zoneFailureDomain string) ([]*object.HostSystem, error) {
klog.V(9).Infof("GetHostsInZone called with registeredNodes: %v", nm.registeredNodes)
nodeDetails, err := nm.GetNodeDetails()
if err != nil {
return nil, err
}
klog.V(4).Infof("Node Details: %v", nodeDetails)
// Return those hosts that are in the given zone.
hosts := make([]*object.HostSystem, 0)
for _, n := range nodeDetails {
// Match the provided zone failure domain with the node.
klog.V(9).Infof("Matching provided zone %s with node %s zone %s", zoneFailureDomain, n.NodeName, n.Zone.FailureDomain)
if zoneFailureDomain == n.Zone.FailureDomain {
host, err := n.vm.HostSystem(ctx)
if err != nil {
klog.Errorf("Failed to get host system for VM %s. err: %+v", n.vm, err)
continue
}
hosts = append(hosts, host)
}
}
klog.V(4).Infof("GetHostsInZone %v returning: %v", zoneFailureDomain, hosts)
return hosts, nil
}

View File

@@ -1,78 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"connection.go",
"constants.go",
"custom_errors.go",
"datacenter.go",
"datastore.go",
"folder.go",
"pbm.go",
"utils.go",
"virtualmachine.go",
"vmoptions.go",
"volumeoptions.go",
"vsphere_metrics.go",
],
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib",
deps = [
"//staging/src/k8s.io/client-go/pkg/version:go_default_library",
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
"//vendor/github.com/vmware/govmomi/find:go_default_library",
"//vendor/github.com/vmware/govmomi/object:go_default_library",
"//vendor/github.com/vmware/govmomi/pbm:go_default_library",
"//vendor/github.com/vmware/govmomi/pbm/types:go_default_library",
"//vendor/github.com/vmware/govmomi/property:go_default_library",
"//vendor/github.com/vmware/govmomi/session:go_default_library",
"//vendor/github.com/vmware/govmomi/sts:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25/mo:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25/soap:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25/types:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/cloudprovider/providers/vsphere/vclib/diskmanagers:all-srcs",
"//pkg/cloudprovider/providers/vsphere/vclib/fixtures:all-srcs",
],
tags = ["automanaged"],
)
go_test(
name = "go_default_test",
srcs = [
"connection_test.go",
"datacenter_test.go",
"datastore_test.go",
"folder_test.go",
"utils_test.go",
"virtualmachine_test.go",
],
embed = [":go_default_library"],
deps = [
"//pkg/cloudprovider/providers/vsphere/vclib/fixtures:go_default_library",
"//vendor/github.com/vmware/govmomi:go_default_library",
"//vendor/github.com/vmware/govmomi/object:go_default_library",
"//vendor/github.com/vmware/govmomi/simulator:go_default_library",
],
)

View File

@@ -1,227 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vclib
import (
"context"
"crypto/tls"
"encoding/pem"
"fmt"
"net"
neturl "net/url"
"sync"
"github.com/vmware/govmomi/session"
"github.com/vmware/govmomi/sts"
"github.com/vmware/govmomi/vim25"
"github.com/vmware/govmomi/vim25/soap"
"k8s.io/client-go/pkg/version"
"k8s.io/klog"
)
// VSphereConnection contains information for connecting to vCenter
type VSphereConnection struct {
Client *vim25.Client
Username string
Password string
Hostname string
Port string
CACert string
Thumbprint string
Insecure bool
RoundTripperCount uint
credentialsLock sync.Mutex
}
var (
clientLock sync.Mutex
)
// Connect makes connection to vCenter and sets VSphereConnection.Client.
// If connection.Client is already set, it obtains the existing user session.
// if user session is not valid, connection.Client will be set to the new client.
func (connection *VSphereConnection) Connect(ctx context.Context) error {
var err error
clientLock.Lock()
defer clientLock.Unlock()
if connection.Client == nil {
connection.Client, err = connection.NewClient(ctx)
if err != nil {
klog.Errorf("Failed to create govmomi client. err: %+v", err)
return err
}
return nil
}
m := session.NewManager(connection.Client)
userSession, err := m.UserSession(ctx)
if err != nil {
klog.Errorf("Error while obtaining user session. err: %+v", err)
return err
}
if userSession != nil {
return nil
}
klog.Warningf("Creating new client session since the existing session is not valid or not authenticated")
connection.Client, err = connection.NewClient(ctx)
if err != nil {
klog.Errorf("Failed to create govmomi client. err: %+v", err)
return err
}
return nil
}
// Signer returns an sts.Signer for use with SAML token auth if connection is configured for such.
// Returns nil if username/password auth is configured for the connection.
func (connection *VSphereConnection) Signer(ctx context.Context, client *vim25.Client) (*sts.Signer, error) {
// TODO: Add separate fields for certificate and private-key.
// For now we can leave the config structs and validation as-is and
// decide to use LoginByToken if the username value is PEM encoded.
b, _ := pem.Decode([]byte(connection.Username))
if b == nil {
return nil, nil
}
cert, err := tls.X509KeyPair([]byte(connection.Username), []byte(connection.Password))
if err != nil {
klog.Errorf("Failed to load X509 key pair. err: %+v", err)
return nil, err
}
tokens, err := sts.NewClient(ctx, client)
if err != nil {
klog.Errorf("Failed to create STS client. err: %+v", err)
return nil, err
}
req := sts.TokenRequest{
Certificate: &cert,
}
signer, err := tokens.Issue(ctx, req)
if err != nil {
klog.Errorf("Failed to issue SAML token. err: %+v", err)
return nil, err
}
return signer, nil
}
// login calls SessionManager.LoginByToken if certificate and private key are configured,
// otherwise calls SessionManager.Login with user and password.
func (connection *VSphereConnection) login(ctx context.Context, client *vim25.Client) error {
m := session.NewManager(client)
connection.credentialsLock.Lock()
defer connection.credentialsLock.Unlock()
signer, err := connection.Signer(ctx, client)
if err != nil {
return err
}
if signer == nil {
klog.V(3).Infof("SessionManager.Login with username %q", connection.Username)
return m.Login(ctx, neturl.UserPassword(connection.Username, connection.Password))
}
klog.V(3).Infof("SessionManager.LoginByToken with certificate %q", connection.Username)
header := soap.Header{Security: signer}
return m.LoginByToken(client.WithHeader(ctx, header))
}
// Logout calls SessionManager.Logout for the given connection.
func (connection *VSphereConnection) Logout(ctx context.Context) {
clientLock.Lock()
c := connection.Client
clientLock.Unlock()
if c == nil {
return
}
m := session.NewManager(c)
hasActiveSession, err := m.SessionIsActive(ctx)
if err != nil {
klog.Errorf("Logout failed: %s", err)
return
}
if !hasActiveSession {
klog.Errorf("No active session, cannot logout")
return
}
if err := m.Logout(ctx); err != nil {
klog.Errorf("Logout failed: %s", err)
}
}
// NewClient creates a new govmomi client for the VSphereConnection obj
func (connection *VSphereConnection) NewClient(ctx context.Context) (*vim25.Client, error) {
url, err := soap.ParseURL(net.JoinHostPort(connection.Hostname, connection.Port))
if err != nil {
klog.Errorf("Failed to parse URL: %s. err: %+v", url, err)
return nil, err
}
sc := soap.NewClient(url, connection.Insecure)
if ca := connection.CACert; ca != "" {
if err := sc.SetRootCAs(ca); err != nil {
return nil, err
}
}
tpHost := connection.Hostname + ":" + connection.Port
sc.SetThumbprint(tpHost, connection.Thumbprint)
client, err := vim25.NewClient(ctx, sc)
if err != nil {
klog.Errorf("Failed to create new client. err: %+v", err)
return nil, err
}
k8sVersion := version.Get().GitVersion
client.UserAgent = fmt.Sprintf("kubernetes-cloudprovider/%s", k8sVersion)
err = connection.login(ctx, client)
if err != nil {
return nil, err
}
if klog.V(3) {
s, err := session.NewManager(client).UserSession(ctx)
if err == nil {
klog.Infof("New session ID for '%s' = %s", s.UserName, s.Key)
}
}
if connection.RoundTripperCount == 0 {
connection.RoundTripperCount = RoundTripperDefaultCount
}
client.RoundTripper = vim25.Retry(client.RoundTripper, vim25.TemporaryNetworkError(int(connection.RoundTripperCount)))
return client, nil
}
// UpdateCredentials updates username and password.
// Note: Updated username and password will be used when there is no session active
func (connection *VSphereConnection) UpdateCredentials(username string, password string) {
connection.credentialsLock.Lock()
defer connection.credentialsLock.Unlock()
connection.Username = username
connection.Password = password
}

View File

@@ -1,222 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vclib_test
import (
"context"
"crypto/sha1"
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
"os"
"strings"
"testing"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/fixtures"
)
func createTestServer(
t *testing.T,
caCertPath string,
serverCertPath string,
serverKeyPath string,
handler http.HandlerFunc,
) (*httptest.Server, string) {
caCertPEM, err := ioutil.ReadFile(caCertPath)
if err != nil {
t.Fatalf("Could not read ca cert from file")
}
serverCert, err := tls.LoadX509KeyPair(serverCertPath, serverKeyPath)
if err != nil {
t.Fatalf("Could not load server cert and server key from files: %#v", err)
}
certPool := x509.NewCertPool()
if ok := certPool.AppendCertsFromPEM(caCertPEM); !ok {
t.Fatalf("Cannot add CA to CAPool")
}
server := httptest.NewUnstartedServer(http.HandlerFunc(handler))
server.TLS = &tls.Config{
Certificates: []tls.Certificate{
serverCert,
},
RootCAs: certPool,
}
// calculate the leaf certificate's fingerprint
if len(server.TLS.Certificates) < 1 || len(server.TLS.Certificates[0].Certificate) < 1 {
t.Fatal("Expected server.TLS.Certificates not to be empty")
}
x509LeafCert := server.TLS.Certificates[0].Certificate[0]
var tpString string
for i, b := range sha1.Sum(x509LeafCert) {
if i > 0 {
tpString += ":"
}
tpString += fmt.Sprintf("%02X", b)
}
return server, tpString
}
func TestWithValidCaCert(t *testing.T) {
handler, verifyConnectionWasMade := getRequestVerifier(t)
server, _ := createTestServer(t, fixtures.CaCertPath, fixtures.ServerCertPath, fixtures.ServerKeyPath, handler)
server.StartTLS()
u := mustParseURL(t, server.URL)
connection := &vclib.VSphereConnection{
Hostname: u.Hostname(),
Port: u.Port(),
CACert: fixtures.CaCertPath,
}
// Ignoring error here, because we only care about the TLS connection
_, _ = connection.NewClient(context.Background())
verifyConnectionWasMade()
}
func TestWithVerificationWithWrongThumbprint(t *testing.T) {
handler, _ := getRequestVerifier(t)
server, _ := createTestServer(t, fixtures.CaCertPath, fixtures.ServerCertPath, fixtures.ServerKeyPath, handler)
server.StartTLS()
u := mustParseURL(t, server.URL)
connection := &vclib.VSphereConnection{
Hostname: u.Hostname(),
Port: u.Port(),
Thumbprint: "obviously wrong",
}
_, err := connection.NewClient(context.Background())
if msg := err.Error(); !strings.Contains(msg, "thumbprint does not match") {
t.Fatalf("Expected wrong thumbprint error, got '%s'", msg)
}
}
func TestWithVerificationWithoutCaCertOrThumbprint(t *testing.T) {
handler, _ := getRequestVerifier(t)
server, _ := createTestServer(t, fixtures.CaCertPath, fixtures.ServerCertPath, fixtures.ServerKeyPath, handler)
server.StartTLS()
u := mustParseURL(t, server.URL)
connection := &vclib.VSphereConnection{
Hostname: u.Hostname(),
Port: u.Port(),
}
_, err := connection.NewClient(context.Background())
verifyWrappedX509UnkownAuthorityErr(t, err)
}
func TestWithValidThumbprint(t *testing.T) {
handler, verifyConnectionWasMade := getRequestVerifier(t)
server, thumbprint :=
createTestServer(t, fixtures.CaCertPath, fixtures.ServerCertPath, fixtures.ServerKeyPath, handler)
server.StartTLS()
u := mustParseURL(t, server.URL)
connection := &vclib.VSphereConnection{
Hostname: u.Hostname(),
Port: u.Port(),
Thumbprint: thumbprint,
}
// Ignoring error here, because we only care about the TLS connection
_, _ = connection.NewClient(context.Background())
verifyConnectionWasMade()
}
func TestWithInvalidCaCertPath(t *testing.T) {
connection := &vclib.VSphereConnection{
Hostname: "should-not-matter",
Port: "should-not-matter",
CACert: "invalid-path",
}
_, err := connection.NewClient(context.Background())
if _, ok := err.(*os.PathError); !ok {
t.Fatalf("Expected an os.PathError, got: '%s' (%#v)", err.Error(), err)
}
}
func TestInvalidCaCert(t *testing.T) {
connection := &vclib.VSphereConnection{
Hostname: "should-not-matter",
Port: "should-not-matter",
CACert: fixtures.InvalidCertPath,
}
_, err := connection.NewClient(context.Background())
if msg := err.Error(); !strings.Contains(msg, "invalid certificate") {
t.Fatalf("Expected invalid certificate error, got '%s'", msg)
}
}
func verifyWrappedX509UnkownAuthorityErr(t *testing.T, err error) {
urlErr, ok := err.(*url.Error)
if !ok {
t.Fatalf("Expected to receive an url.Error, got '%s' (%#v)", err.Error(), err)
}
x509Err, ok := urlErr.Err.(x509.UnknownAuthorityError)
if !ok {
t.Fatalf("Expected to receive a wrapped x509.UnknownAuthorityError, got: '%s' (%#v)", urlErr.Error(), urlErr)
}
if msg := x509Err.Error(); msg != "x509: certificate signed by unknown authority" {
t.Fatalf("Expected 'signed by unknown authority' error, got: '%s'", msg)
}
}
func getRequestVerifier(t *testing.T) (http.HandlerFunc, func()) {
gotRequest := false
handler := func(w http.ResponseWriter, r *http.Request) {
gotRequest = true
}
checker := func() {
if !gotRequest {
t.Fatalf("Never saw a request, maybe TLS connection could not be established?")
}
}
return handler, checker
}
func mustParseURL(t *testing.T, i string) *url.URL {
u, err := url.Parse(i)
if err != nil {
t.Fatalf("Cannot parse URL: %v", err)
}
return u
}

View File

@@ -1,63 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vclib
// Volume Constants
const (
ThinDiskType = "thin"
PreallocatedDiskType = "preallocated"
EagerZeroedThickDiskType = "eagerZeroedThick"
ZeroedThickDiskType = "zeroedThick"
)
// Controller Constants
const (
SCSIControllerLimit = 4
SCSIControllerDeviceLimit = 15
SCSIDeviceSlots = 16
SCSIReservedSlot = 7
SCSIControllerType = "scsi"
LSILogicControllerType = "lsiLogic"
BusLogicControllerType = "busLogic"
LSILogicSASControllerType = "lsiLogic-sas"
PVSCSIControllerType = "pvscsi"
)
// Other Constants
const (
LogLevel = 4
DatastoreProperty = "datastore"
ResourcePoolProperty = "resourcePool"
DatastoreInfoProperty = "info"
VirtualMachineType = "VirtualMachine"
RoundTripperDefaultCount = 3
VSANDatastoreType = "vsan"
DummyVMPrefixName = "vsphere-k8s"
ActivePowerState = "poweredOn"
DatacenterType = "Datacenter"
ClusterComputeResourceType = "ClusterComputeResource"
HostSystemType = "HostSystem"
)
// Test Constants
const (
TestDefaultDatacenter = "DC0"
TestDefaultDatastore = "LocalDS_0"
TestDefaultNetwork = "VM Network"
testNameNotFound = "enoent"
)

View File

@@ -1,39 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vclib
import "errors"
// Error Messages
const (
FileAlreadyExistErrMsg = "File requested already exist"
NoDiskUUIDFoundErrMsg = "No disk UUID found"
NoDevicesFoundErrMsg = "No devices found"
DiskNotFoundErrMsg = "No vSphere disk ID found"
InvalidVolumeOptionsErrMsg = "VolumeOptions verification failed"
NoVMFoundErrMsg = "No VM found"
)
// Error constants
var (
ErrFileAlreadyExist = errors.New(FileAlreadyExistErrMsg)
ErrNoDiskUUIDFound = errors.New(NoDiskUUIDFoundErrMsg)
ErrNoDevicesFound = errors.New(NoDevicesFoundErrMsg)
ErrNoDiskIDFound = errors.New(DiskNotFoundErrMsg)
ErrInvalidVolumeOptions = errors.New(InvalidVolumeOptionsErrMsg)
ErrNoVMFound = errors.New(NoVMFoundErrMsg)
)

View File

@@ -1,348 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vclib
import (
"context"
"errors"
"fmt"
"path/filepath"
"strings"
"github.com/vmware/govmomi/find"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/property"
"github.com/vmware/govmomi/vim25/mo"
"github.com/vmware/govmomi/vim25/types"
"k8s.io/klog"
)
// Datacenter extends the govmomi Datacenter object
type Datacenter struct {
*object.Datacenter
}
// GetDatacenter returns the DataCenter Object for the given datacenterPath
// If datacenter is located in a folder, include full path to datacenter else just provide the datacenter name
func GetDatacenter(ctx context.Context, connection *VSphereConnection, datacenterPath string) (*Datacenter, error) {
finder := find.NewFinder(connection.Client, false)
datacenter, err := finder.Datacenter(ctx, datacenterPath)
if err != nil {
klog.Errorf("Failed to find the datacenter: %s. err: %+v", datacenterPath, err)
return nil, err
}
dc := Datacenter{datacenter}
return &dc, nil
}
// GetAllDatacenter returns all the DataCenter Objects
func GetAllDatacenter(ctx context.Context, connection *VSphereConnection) ([]*Datacenter, error) {
var dc []*Datacenter
finder := find.NewFinder(connection.Client, false)
datacenters, err := finder.DatacenterList(ctx, "*")
if err != nil {
klog.Errorf("Failed to find the datacenter. err: %+v", err)
return nil, err
}
for _, datacenter := range datacenters {
dc = append(dc, &(Datacenter{datacenter}))
}
return dc, nil
}
// GetVMByUUID gets the VM object from the given vmUUID
func (dc *Datacenter) GetVMByUUID(ctx context.Context, vmUUID string) (*VirtualMachine, error) {
s := object.NewSearchIndex(dc.Client())
vmUUID = strings.ToLower(strings.TrimSpace(vmUUID))
svm, err := s.FindByUuid(ctx, dc.Datacenter, vmUUID, true, nil)
if err != nil {
klog.Errorf("Failed to find VM by UUID. VM UUID: %s, err: %+v", vmUUID, err)
return nil, err
}
if svm == nil {
klog.Errorf("Unable to find VM by UUID. VM UUID: %s", vmUUID)
return nil, ErrNoVMFound
}
virtualMachine := VirtualMachine{object.NewVirtualMachine(dc.Client(), svm.Reference()), dc}
return &virtualMachine, nil
}
// GetHostByVMUUID gets the host object from the given vmUUID
func (dc *Datacenter) GetHostByVMUUID(ctx context.Context, vmUUID string) (*types.ManagedObjectReference, error) {
virtualMachine, err := dc.GetVMByUUID(ctx, vmUUID)
if err != nil {
return nil, err
}
var vmMo mo.VirtualMachine
pc := property.DefaultCollector(virtualMachine.Client())
err = pc.RetrieveOne(ctx, virtualMachine.Reference(), []string{"summary.runtime.host"}, &vmMo)
if err != nil {
klog.Errorf("Failed to retrive VM runtime host, err: %v", err)
return nil, err
}
host := vmMo.Summary.Runtime.Host
klog.Infof("%s host is %s", virtualMachine.Reference(), host)
return host, nil
}
// GetVMByPath gets the VM object from the given vmPath
// vmPath should be the full path to VM and not just the name
func (dc *Datacenter) GetVMByPath(ctx context.Context, vmPath string) (*VirtualMachine, error) {
finder := getFinder(dc)
vm, err := finder.VirtualMachine(ctx, vmPath)
if err != nil {
klog.Errorf("Failed to find VM by Path. VM Path: %s, err: %+v", vmPath, err)
return nil, err
}
virtualMachine := VirtualMachine{vm, dc}
return &virtualMachine, nil
}
// GetAllDatastores gets the datastore URL to DatastoreInfo map for all the datastores in
// the datacenter.
func (dc *Datacenter) GetAllDatastores(ctx context.Context) (map[string]*DatastoreInfo, error) {
finder := getFinder(dc)
datastores, err := finder.DatastoreList(ctx, "*")
if err != nil {
klog.Errorf("Failed to get all the datastores. err: %+v", err)
return nil, err
}
var dsList []types.ManagedObjectReference
for _, ds := range datastores {
dsList = append(dsList, ds.Reference())
}
var dsMoList []mo.Datastore
pc := property.DefaultCollector(dc.Client())
properties := []string{DatastoreInfoProperty}
err = pc.Retrieve(ctx, dsList, properties, &dsMoList)
if err != nil {
klog.Errorf("Failed to get Datastore managed objects from datastore objects."+
" dsObjList: %+v, properties: %+v, err: %v", dsList, properties, err)
return nil, err
}
dsURLInfoMap := make(map[string]*DatastoreInfo)
for _, dsMo := range dsMoList {
dsURLInfoMap[dsMo.Info.GetDatastoreInfo().Url] = &DatastoreInfo{
&Datastore{object.NewDatastore(dc.Client(), dsMo.Reference()),
dc},
dsMo.Info.GetDatastoreInfo()}
}
klog.V(9).Infof("dsURLInfoMap : %+v", dsURLInfoMap)
return dsURLInfoMap, nil
}
// GetAllHosts returns all the host objects in this datacenter of VC
func (dc *Datacenter) GetAllHosts(ctx context.Context) ([]types.ManagedObjectReference, error) {
finder := getFinder(dc)
hostSystems, err := finder.HostSystemList(ctx, "*")
if err != nil {
klog.Errorf("Failed to get all hostSystems. err: %+v", err)
return nil, err
}
var hostMors []types.ManagedObjectReference
for _, hs := range hostSystems {
hostMors = append(hostMors, hs.Reference())
}
return hostMors, nil
}
// GetDatastoreByPath gets the Datastore object from the given vmDiskPath
func (dc *Datacenter) GetDatastoreByPath(ctx context.Context, vmDiskPath string) (*Datastore, error) {
datastorePathObj := new(object.DatastorePath)
isSuccess := datastorePathObj.FromString(vmDiskPath)
if !isSuccess {
klog.Errorf("Failed to parse vmDiskPath: %s", vmDiskPath)
return nil, errors.New("Failed to parse vmDiskPath")
}
return dc.GetDatastoreByName(ctx, datastorePathObj.Datastore)
}
// GetDatastoreByName gets the Datastore object for the given datastore name
func (dc *Datacenter) GetDatastoreByName(ctx context.Context, name string) (*Datastore, error) {
finder := getFinder(dc)
ds, err := finder.Datastore(ctx, name)
if err != nil {
klog.Errorf("Failed while searching for datastore: %s. err: %+v", name, err)
return nil, err
}
datastore := Datastore{ds, dc}
return &datastore, nil
}
// GetResourcePool gets the resource pool for the given path
func (dc *Datacenter) GetResourcePool(ctx context.Context, resourcePoolPath string) (*object.ResourcePool, error) {
finder := getFinder(dc)
var resourcePool *object.ResourcePool
var err error
resourcePool, err = finder.ResourcePoolOrDefault(ctx, resourcePoolPath)
if err != nil {
klog.Errorf("Failed to get the ResourcePool for path '%s'. err: %+v", resourcePoolPath, err)
return nil, err
}
return resourcePool, nil
}
// GetFolderByPath gets the Folder Object from the given folder path
// folderPath should be the full path to folder
func (dc *Datacenter) GetFolderByPath(ctx context.Context, folderPath string) (*Folder, error) {
finder := getFinder(dc)
vmFolder, err := finder.Folder(ctx, folderPath)
if err != nil {
klog.Errorf("Failed to get the folder reference for %s. err: %+v", folderPath, err)
return nil, err
}
folder := Folder{vmFolder, dc}
return &folder, nil
}
// GetVMMoList gets the VM Managed Objects with the given properties from the VM object
func (dc *Datacenter) GetVMMoList(ctx context.Context, vmObjList []*VirtualMachine, properties []string) ([]mo.VirtualMachine, error) {
var vmMoList []mo.VirtualMachine
var vmRefs []types.ManagedObjectReference
if len(vmObjList) < 1 {
klog.Errorf("VirtualMachine Object list is empty")
return nil, fmt.Errorf("VirtualMachine Object list is empty")
}
for _, vmObj := range vmObjList {
vmRefs = append(vmRefs, vmObj.Reference())
}
pc := property.DefaultCollector(dc.Client())
err := pc.Retrieve(ctx, vmRefs, properties, &vmMoList)
if err != nil {
klog.Errorf("Failed to get VM managed objects from VM objects. vmObjList: %+v, properties: %+v, err: %v", vmObjList, properties, err)
return nil, err
}
return vmMoList, nil
}
// GetVirtualDiskPage83Data gets the virtual disk UUID by diskPath
func (dc *Datacenter) GetVirtualDiskPage83Data(ctx context.Context, diskPath string) (string, error) {
if len(diskPath) > 0 && filepath.Ext(diskPath) != ".vmdk" {
diskPath += ".vmdk"
}
vdm := object.NewVirtualDiskManager(dc.Client())
// Returns uuid of vmdk virtual disk
diskUUID, err := vdm.QueryVirtualDiskUuid(ctx, diskPath, dc.Datacenter)
if err != nil {
klog.Warningf("QueryVirtualDiskUuid failed for diskPath: %q. err: %+v", diskPath, err)
return "", err
}
diskUUID = formatVirtualDiskUUID(diskUUID)
return diskUUID, nil
}
// GetDatastoreMoList gets the Datastore Managed Objects with the given properties from the datastore objects
func (dc *Datacenter) GetDatastoreMoList(ctx context.Context, dsObjList []*Datastore, properties []string) ([]mo.Datastore, error) {
var dsMoList []mo.Datastore
var dsRefs []types.ManagedObjectReference
if len(dsObjList) < 1 {
klog.Errorf("Datastore Object list is empty")
return nil, fmt.Errorf("Datastore Object list is empty")
}
for _, dsObj := range dsObjList {
dsRefs = append(dsRefs, dsObj.Reference())
}
pc := property.DefaultCollector(dc.Client())
err := pc.Retrieve(ctx, dsRefs, properties, &dsMoList)
if err != nil {
klog.Errorf("Failed to get Datastore managed objects from datastore objects. dsObjList: %+v, properties: %+v, err: %v", dsObjList, properties, err)
return nil, err
}
return dsMoList, nil
}
// CheckDisksAttached checks if the disk is attached to node.
// This is done by comparing the volume path with the backing.FilePath on the VM Virtual disk devices.
func (dc *Datacenter) CheckDisksAttached(ctx context.Context, nodeVolumes map[string][]string) (map[string]map[string]bool, error) {
attached := make(map[string]map[string]bool)
var vmList []*VirtualMachine
for nodeName, volPaths := range nodeVolumes {
for _, volPath := range volPaths {
setNodeVolumeMap(attached, volPath, nodeName, false)
}
vm, err := dc.GetVMByPath(ctx, nodeName)
if err != nil {
if IsNotFound(err) {
klog.Warningf("Node %q does not exist, vSphere CP will assume disks %v are not attached to it.", nodeName, volPaths)
}
continue
}
vmList = append(vmList, vm)
}
if len(vmList) == 0 {
klog.V(2).Infof("vSphere CP will assume no disks are attached to any node.")
return attached, nil
}
vmMoList, err := dc.GetVMMoList(ctx, vmList, []string{"config.hardware.device", "name"})
if err != nil {
// When there is an error fetching instance information
// it is safer to return nil and let volume information not be touched.
klog.Errorf("Failed to get VM Managed object for nodes: %+v. err: +%v", vmList, err)
return nil, err
}
for _, vmMo := range vmMoList {
if vmMo.Config == nil {
klog.Errorf("Config is not available for VM: %q", vmMo.Name)
continue
}
for nodeName, volPaths := range nodeVolumes {
if nodeName == vmMo.Name {
verifyVolumePathsForVM(vmMo, volPaths, attached)
}
}
}
return attached, nil
}
// VerifyVolumePathsForVM verifies if the volume paths (volPaths) are attached to VM.
func verifyVolumePathsForVM(vmMo mo.VirtualMachine, volPaths []string, nodeVolumeMap map[string]map[string]bool) {
// Verify if the volume paths are present on the VM backing virtual disk devices
for _, volPath := range volPaths {
vmDevices := object.VirtualDeviceList(vmMo.Config.Hardware.Device)
for _, device := range vmDevices {
if vmDevices.TypeName(device) == "VirtualDisk" {
virtualDevice := device.GetVirtualDevice()
if backing, ok := virtualDevice.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok {
if backing.FileName == volPath {
setNodeVolumeMap(nodeVolumeMap, volPath, vmMo.Name, true)
}
}
}
}
}
}
func setNodeVolumeMap(
nodeVolumeMap map[string]map[string]bool,
volumePath string,
nodeName string,
check bool) {
volumeMap := nodeVolumeMap[nodeName]
if volumeMap == nil {
volumeMap = make(map[string]bool)
nodeVolumeMap[nodeName] = volumeMap
}
volumeMap[volumePath] = check
}

View File

@@ -1,179 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vclib
import (
"context"
"testing"
"github.com/vmware/govmomi"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/simulator"
)
func TestDatacenter(t *testing.T) {
ctx := context.Background()
// vCenter model + initial set of objects (cluster, hosts, VMs, network, datastore, etc)
model := simulator.VPX()
defer model.Remove()
err := model.Create()
if err != nil {
t.Fatal(err)
}
s := model.Service.NewServer()
defer s.Close()
avm := simulator.Map.Any(VirtualMachineType).(*simulator.VirtualMachine)
c, err := govmomi.NewClient(ctx, s.URL, true)
if err != nil {
t.Fatal(err)
}
vc := &VSphereConnection{Client: c.Client}
_, err = GetDatacenter(ctx, vc, testNameNotFound)
if err == nil {
t.Error("expected error")
}
dc, err := GetDatacenter(ctx, vc, TestDefaultDatacenter)
if err != nil {
t.Error(err)
}
_, err = dc.GetVMByUUID(ctx, testNameNotFound)
if err == nil {
t.Error("expected error")
}
_, err = dc.GetVMByUUID(ctx, avm.Summary.Config.Uuid)
if err != nil {
t.Error(err)
}
_, err = dc.GetVMByPath(ctx, testNameNotFound)
if err == nil {
t.Error("expected error")
}
vm, err := dc.GetVMByPath(ctx, TestDefaultDatacenter+"/vm/"+avm.Name)
if err != nil {
t.Error(err)
}
_, err = dc.GetDatastoreByPath(ctx, testNameNotFound) // invalid format
if err == nil {
t.Error("expected error")
}
invalidPath := object.DatastorePath{
Datastore: testNameNotFound,
Path: testNameNotFound,
}
_, err = dc.GetDatastoreByPath(ctx, invalidPath.String())
if err == nil {
t.Error("expected error")
}
_, err = dc.GetDatastoreByPath(ctx, avm.Summary.Config.VmPathName)
if err != nil {
t.Error(err)
}
_, err = dc.GetDatastoreByName(ctx, testNameNotFound)
if err == nil {
t.Error("expected error")
}
ds, err := dc.GetDatastoreByName(ctx, TestDefaultDatastore)
if err != nil {
t.Error(err)
}
_, err = dc.GetFolderByPath(ctx, testNameNotFound)
if err == nil {
t.Error("expected error")
}
_, err = dc.GetFolderByPath(ctx, TestDefaultDatacenter+"/vm")
if err != nil {
t.Error(err)
}
_, err = dc.GetVMMoList(ctx, nil, nil)
if err == nil {
t.Error("expected error")
}
_, err = dc.GetVMMoList(ctx, []*VirtualMachine{vm}, []string{testNameNotFound}) // invalid property
if err == nil {
t.Error("expected error")
}
_, err = dc.GetVMMoList(ctx, []*VirtualMachine{vm}, []string{"summary"})
if err != nil {
t.Error(err)
}
diskPath := ds.Path(avm.Name + "/disk1.vmdk")
_, err = dc.GetVirtualDiskPage83Data(ctx, diskPath+testNameNotFound)
if err == nil {
t.Error("expected error")
}
_, err = dc.GetVirtualDiskPage83Data(ctx, diskPath)
if err != nil {
t.Error(err)
}
_, err = dc.GetDatastoreMoList(ctx, nil, nil)
if err == nil {
t.Error("expected error")
}
_, err = dc.GetDatastoreMoList(ctx, []*Datastore{ds}, []string{testNameNotFound}) // invalid property
if err == nil {
t.Error("expected error")
}
_, err = dc.GetDatastoreMoList(ctx, []*Datastore{ds}, []string{DatastoreInfoProperty})
if err != nil {
t.Error(err)
}
nodeVolumes := map[string][]string{
avm.Name: {testNameNotFound, diskPath},
}
attached, err := dc.CheckDisksAttached(ctx, nodeVolumes)
if err != nil {
t.Error(err)
}
if attached[avm.Name][testNameNotFound] {
t.Error("should not be attached")
}
if !attached[avm.Name][diskPath] {
t.Errorf("%s should be attached", diskPath)
}
}

View File

@@ -1,103 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vclib
import (
"context"
"fmt"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/property"
"github.com/vmware/govmomi/vim25/mo"
"github.com/vmware/govmomi/vim25/soap"
"github.com/vmware/govmomi/vim25/types"
"k8s.io/klog"
)
// Datastore extends the govmomi Datastore object
type Datastore struct {
*object.Datastore
Datacenter *Datacenter
}
// DatastoreInfo is a structure to store the Datastore and it's Info.
type DatastoreInfo struct {
*Datastore
Info *types.DatastoreInfo
}
func (di DatastoreInfo) String() string {
return fmt.Sprintf("Datastore: %+v, datastore URL: %s", di.Datastore, di.Info.Url)
}
// CreateDirectory creates the directory at location specified by directoryPath.
// If the intermediate level folders do not exist, and the parameter createParents is true, all the non-existent folders are created.
// directoryPath must be in the format "[vsanDatastore] kubevols"
func (ds *Datastore) CreateDirectory(ctx context.Context, directoryPath string, createParents bool) error {
fileManager := object.NewFileManager(ds.Client())
err := fileManager.MakeDirectory(ctx, directoryPath, ds.Datacenter.Datacenter, createParents)
if err != nil {
if soap.IsSoapFault(err) {
soapFault := soap.ToSoapFault(err)
if _, ok := soapFault.VimFault().(types.FileAlreadyExists); ok {
return ErrFileAlreadyExist
}
}
return err
}
klog.V(LogLevel).Infof("Created dir with path as %+q", directoryPath)
return nil
}
// GetType returns the type of datastore
func (ds *Datastore) GetType(ctx context.Context) (string, error) {
var dsMo mo.Datastore
pc := property.DefaultCollector(ds.Client())
err := pc.RetrieveOne(ctx, ds.Datastore.Reference(), []string{"summary"}, &dsMo)
if err != nil {
klog.Errorf("Failed to retrieve datastore summary property. err: %v", err)
return "", err
}
return dsMo.Summary.Type, nil
}
// IsCompatibleWithStoragePolicy returns true if datastore is compatible with given storage policy else return false
// for not compatible datastore, fault message is also returned
func (ds *Datastore) IsCompatibleWithStoragePolicy(ctx context.Context, storagePolicyID string) (bool, string, error) {
pbmClient, err := NewPbmClient(ctx, ds.Client())
if err != nil {
klog.Errorf("Failed to get new PbmClient Object. err: %v", err)
return false, "", err
}
return pbmClient.IsDatastoreCompatible(ctx, storagePolicyID, ds)
}
// GetDatastoreHostMounts gets the host names mounted on given datastore
func (ds *Datastore) GetDatastoreHostMounts(ctx context.Context) ([]types.ManagedObjectReference, error) {
var dsMo mo.Datastore
pc := property.DefaultCollector(ds.Client())
err := pc.RetrieveOne(ctx, ds.Datastore.Reference(), []string{"host"}, &dsMo)
if err != nil {
klog.Errorf("Failed to retrieve datastore host mount property. err: %v", err)
return nil, err
}
hosts := make([]types.ManagedObjectReference, len(dsMo.Host))
for _, dsHostMount := range dsMo.Host {
hosts = append(hosts, dsHostMount.Key)
}
return hosts, nil
}

View File

@@ -1,91 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vclib
import (
"context"
"testing"
"github.com/vmware/govmomi"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/simulator"
)
func TestDatastore(t *testing.T) {
ctx := context.Background()
// vCenter model + initial set of objects (cluster, hosts, VMs, network, datastore, etc)
model := simulator.VPX()
defer model.Remove()
err := model.Create()
if err != nil {
t.Fatal(err)
}
s := model.Service.NewServer()
defer s.Close()
c, err := govmomi.NewClient(ctx, s.URL, true)
if err != nil {
t.Fatal(err)
}
vc := &VSphereConnection{Client: c.Client}
dc, err := GetDatacenter(ctx, vc, TestDefaultDatacenter)
if err != nil {
t.Error(err)
}
all, err := dc.GetAllDatastores(ctx)
if err != nil {
t.Fatal(err)
}
for _, info := range all {
ds := info.Datastore
kind, cerr := ds.GetType(ctx)
if cerr != nil {
t.Error(err)
}
if kind == "" {
t.Error("empty Datastore type")
}
dir := object.DatastorePath{
Datastore: info.Info.Name,
Path: "kubevols",
}
// TODO: test Datastore.IsCompatibleWithStoragePolicy (vcsim needs PBM support)
for _, fail := range []bool{false, true} {
cerr = ds.CreateDirectory(ctx, dir.String(), false)
if fail {
if cerr != ErrFileAlreadyExist {
t.Errorf("expected %s, got: %s", ErrFileAlreadyExist, cerr)
}
continue
}
if cerr != nil {
t.Error(err)
}
}
}
}

View File

@@ -1,35 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"vdm.go",
"virtualdisk.go",
"vmdm.go",
],
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers",
deps = [
"//pkg/cloudprovider/providers/vsphere/vclib:go_default_library",
"//vendor/github.com/vmware/govmomi/object:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25/types:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@@ -1,91 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package diskmanagers
import (
"context"
"time"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/types"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
)
// virtualDiskManager implements VirtualDiskProvider Interface for creating and deleting volume using VirtualDiskManager
type virtualDiskManager struct {
diskPath string
volumeOptions *vclib.VolumeOptions
}
// Create implements Disk's Create interface
// Contains implementation of virtualDiskManager based Provisioning
func (diskManager virtualDiskManager) Create(ctx context.Context, datastore *vclib.Datastore) (canonicalDiskPath string, err error) {
if diskManager.volumeOptions.SCSIControllerType == "" {
diskManager.volumeOptions.SCSIControllerType = vclib.LSILogicControllerType
}
// Create virtual disk
diskFormat := vclib.DiskFormatValidType[diskManager.volumeOptions.DiskFormat]
// Create a virtual disk manager
vdm := object.NewVirtualDiskManager(datastore.Client())
// Create specification for new virtual disk
vmDiskSpec := &types.FileBackedVirtualDiskSpec{
VirtualDiskSpec: types.VirtualDiskSpec{
AdapterType: diskManager.volumeOptions.SCSIControllerType,
DiskType: diskFormat,
},
CapacityKb: int64(diskManager.volumeOptions.CapacityKB),
}
requestTime := time.Now()
// Create virtual disk
task, err := vdm.CreateVirtualDisk(ctx, diskManager.diskPath, datastore.Datacenter.Datacenter, vmDiskSpec)
if err != nil {
vclib.RecordvSphereMetric(vclib.APICreateVolume, requestTime, err)
klog.Errorf("Failed to create virtual disk: %s. err: %+v", diskManager.diskPath, err)
return "", err
}
taskInfo, err := task.WaitForResult(ctx, nil)
vclib.RecordvSphereMetric(vclib.APICreateVolume, requestTime, err)
if err != nil {
klog.Errorf("Failed to complete virtual disk creation: %s. err: %+v", diskManager.diskPath, err)
return "", err
}
canonicalDiskPath = taskInfo.Result.(string)
return canonicalDiskPath, nil
}
// Delete implements Disk's Delete interface
func (diskManager virtualDiskManager) Delete(ctx context.Context, datacenter *vclib.Datacenter) error {
// Create a virtual disk manager
virtualDiskManager := object.NewVirtualDiskManager(datacenter.Client())
diskPath := vclib.RemoveStorageClusterORFolderNameFromVDiskPath(diskManager.diskPath)
requestTime := time.Now()
// Delete virtual disk
task, err := virtualDiskManager.DeleteVirtualDisk(ctx, diskPath, datacenter.Datacenter)
if err != nil {
klog.Errorf("Failed to delete virtual disk. err: %v", err)
vclib.RecordvSphereMetric(vclib.APIDeleteVolume, requestTime, err)
return err
}
err = task.Wait(ctx)
vclib.RecordvSphereMetric(vclib.APIDeleteVolume, requestTime, err)
if err != nil {
klog.Errorf("Failed to delete virtual disk. err: %v", err)
return err
}
return nil
}

View File

@@ -1,80 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package diskmanagers
import (
"context"
"fmt"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
)
// VirtualDisk is for the Disk Management
type VirtualDisk struct {
DiskPath string
VolumeOptions *vclib.VolumeOptions
VMOptions *vclib.VMOptions
}
// VirtualDisk Operations Const
const (
VirtualDiskCreateOperation = "Create"
VirtualDiskDeleteOperation = "Delete"
)
// VirtualDiskProvider defines interfaces for creating disk
type VirtualDiskProvider interface {
Create(ctx context.Context, datastore *vclib.Datastore) (string, error)
Delete(ctx context.Context, datacenter *vclib.Datacenter) error
}
// getDiskManager returns vmDiskManager or vdmDiskManager based on given volumeoptions
func getDiskManager(disk *VirtualDisk, diskOperation string) VirtualDiskProvider {
var diskProvider VirtualDiskProvider
switch diskOperation {
case VirtualDiskDeleteOperation:
diskProvider = virtualDiskManager{disk.DiskPath, disk.VolumeOptions}
case VirtualDiskCreateOperation:
if disk.VolumeOptions.StoragePolicyName != "" || disk.VolumeOptions.VSANStorageProfileData != "" || disk.VolumeOptions.StoragePolicyID != "" {
diskProvider = vmDiskManager{disk.DiskPath, disk.VolumeOptions, disk.VMOptions}
} else {
diskProvider = virtualDiskManager{disk.DiskPath, disk.VolumeOptions}
}
}
return diskProvider
}
// Create gets appropriate disk manager and calls respective create method
func (virtualDisk *VirtualDisk) Create(ctx context.Context, datastore *vclib.Datastore) (string, error) {
if virtualDisk.VolumeOptions.DiskFormat == "" {
virtualDisk.VolumeOptions.DiskFormat = vclib.ThinDiskType
}
if !virtualDisk.VolumeOptions.VerifyVolumeOptions() {
klog.Error("VolumeOptions verification failed. volumeOptions: ", virtualDisk.VolumeOptions)
return "", vclib.ErrInvalidVolumeOptions
}
if virtualDisk.VolumeOptions.StoragePolicyID != "" && virtualDisk.VolumeOptions.StoragePolicyName != "" {
return "", fmt.Errorf("Storage Policy ID and Storage Policy Name both set, Please set only one parameter")
}
return getDiskManager(virtualDisk, VirtualDiskCreateOperation).Create(ctx, datastore)
}
// Delete gets appropriate disk manager and calls respective delete method
func (virtualDisk *VirtualDisk) Delete(ctx context.Context, datacenter *vclib.Datacenter) error {
return getDiskManager(virtualDisk, VirtualDiskDeleteOperation).Delete(ctx, datacenter)
}

View File

@@ -1,253 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package diskmanagers
import (
"context"
"fmt"
"hash/fnv"
"strings"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/types"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
)
// vmDiskManager implements VirtualDiskProvider interface for creating volume using Virtual Machine Reconfigure approach
type vmDiskManager struct {
diskPath string
volumeOptions *vclib.VolumeOptions
vmOptions *vclib.VMOptions
}
// Create implements Disk's Create interface
// Contains implementation of VM based Provisioning to provision disk with SPBM Policy or VSANStorageProfileData
func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datastore) (canonicalDiskPath string, err error) {
if vmdisk.volumeOptions.SCSIControllerType == "" {
vmdisk.volumeOptions.SCSIControllerType = vclib.PVSCSIControllerType
}
pbmClient, err := vclib.NewPbmClient(ctx, datastore.Client())
if err != nil {
klog.Errorf("Error occurred while creating new pbmClient, err: %+v", err)
return "", err
}
if vmdisk.volumeOptions.StoragePolicyID == "" && vmdisk.volumeOptions.StoragePolicyName != "" {
vmdisk.volumeOptions.StoragePolicyID, err = pbmClient.ProfileIDByName(ctx, vmdisk.volumeOptions.StoragePolicyName)
if err != nil {
klog.Errorf("Error occurred while getting Profile Id from Profile Name: %s, err: %+v", vmdisk.volumeOptions.StoragePolicyName, err)
return "", err
}
}
if vmdisk.volumeOptions.StoragePolicyID != "" {
compatible, faultMessage, err := datastore.IsCompatibleWithStoragePolicy(ctx, vmdisk.volumeOptions.StoragePolicyID)
if err != nil {
klog.Errorf("Error occurred while checking datastore compatibility with storage policy id: %s, err: %+v", vmdisk.volumeOptions.StoragePolicyID, err)
return "", err
}
if !compatible {
klog.Errorf("Datastore: %s is not compatible with Policy: %s", datastore.Name(), vmdisk.volumeOptions.StoragePolicyName)
return "", fmt.Errorf("User specified datastore is not compatible with the storagePolicy: %q. Failed with faults: %+q", vmdisk.volumeOptions.StoragePolicyName, faultMessage)
}
}
storageProfileSpec := &types.VirtualMachineDefinedProfileSpec{}
// Is PBM storage policy ID is present, set the storage spec profile ID,
// else, set raw the VSAN policy string.
if vmdisk.volumeOptions.StoragePolicyID != "" {
storageProfileSpec.ProfileId = vmdisk.volumeOptions.StoragePolicyID
} else if vmdisk.volumeOptions.VSANStorageProfileData != "" {
// Check Datastore type - VSANStorageProfileData is only applicable to vSAN Datastore
dsType, err := datastore.GetType(ctx)
if err != nil {
return "", err
}
if dsType != vclib.VSANDatastoreType {
klog.Errorf("The specified datastore: %q is not a VSAN datastore", datastore.Name())
return "", fmt.Errorf("The specified datastore: %q is not a VSAN datastore."+
" The policy parameters will work only with VSAN Datastore."+
" So, please specify a valid VSAN datastore in Storage class definition.", datastore.Name())
}
storageProfileSpec.ProfileId = ""
storageProfileSpec.ProfileData = &types.VirtualMachineProfileRawData{
ExtensionKey: "com.vmware.vim.sps",
ObjectData: vmdisk.volumeOptions.VSANStorageProfileData,
}
} else {
klog.Errorf("Both volumeOptions.StoragePolicyID and volumeOptions.VSANStorageProfileData are not set. One of them should be set")
return "", fmt.Errorf("Both volumeOptions.StoragePolicyID and volumeOptions.VSANStorageProfileData are not set. One of them should be set")
}
var dummyVM *vclib.VirtualMachine
// Check if VM already exist in the folder.
// If VM is already present, use it, else create a new dummy VM.
fnvHash := fnv.New32a()
fnvHash.Write([]byte(vmdisk.volumeOptions.Name))
dummyVMFullName := vclib.DummyVMPrefixName + "-" + fmt.Sprint(fnvHash.Sum32())
dummyVM, err = datastore.Datacenter.GetVMByPath(ctx, vmdisk.vmOptions.VMFolder.InventoryPath+"/"+dummyVMFullName)
if err != nil {
// Create a dummy VM
klog.V(1).Infof("Creating Dummy VM: %q", dummyVMFullName)
dummyVM, err = vmdisk.createDummyVM(ctx, datastore.Datacenter, dummyVMFullName)
if err != nil {
klog.Errorf("Failed to create Dummy VM. err: %v", err)
return "", err
}
}
// Reconfigure the VM to attach the disk with the VSAN policy configured
virtualMachineConfigSpec := types.VirtualMachineConfigSpec{}
disk, _, err := dummyVM.CreateDiskSpec(ctx, vmdisk.diskPath, datastore, vmdisk.volumeOptions)
if err != nil {
klog.Errorf("Failed to create Disk Spec. err: %v", err)
return "", err
}
deviceConfigSpec := &types.VirtualDeviceConfigSpec{
Device: disk,
Operation: types.VirtualDeviceConfigSpecOperationAdd,
FileOperation: types.VirtualDeviceConfigSpecFileOperationCreate,
}
deviceConfigSpec.Profile = append(deviceConfigSpec.Profile, storageProfileSpec)
virtualMachineConfigSpec.DeviceChange = append(virtualMachineConfigSpec.DeviceChange, deviceConfigSpec)
fileAlreadyExist := false
task, err := dummyVM.Reconfigure(ctx, virtualMachineConfigSpec)
if err != nil {
klog.Errorf("Failed to reconfig. err: %v", err)
return "", err
}
err = task.Wait(ctx)
if err != nil {
fileAlreadyExist = isAlreadyExists(vmdisk.diskPath, err)
if fileAlreadyExist {
//Skip error and continue to detach the disk as the disk was already created on the datastore.
klog.V(vclib.LogLevel).Infof("File: %v already exists", vmdisk.diskPath)
} else {
klog.Errorf("Failed to attach the disk to VM: %q with err: %+v", dummyVMFullName, err)
return "", err
}
}
// Detach the disk from the dummy VM.
err = dummyVM.DetachDisk(ctx, vmdisk.diskPath)
if err != nil {
if vclib.DiskNotFoundErrMsg == err.Error() && fileAlreadyExist {
// Skip error if disk was already detached from the dummy VM but still present on the datastore.
klog.V(vclib.LogLevel).Infof("File: %v is already detached", vmdisk.diskPath)
} else {
klog.Errorf("Failed to detach the disk: %q from VM: %q with err: %+v", vmdisk.diskPath, dummyVMFullName, err)
return "", err
}
}
// Delete the dummy VM
err = dummyVM.DeleteVM(ctx)
if err != nil {
klog.Errorf("Failed to destroy the vm: %q with err: %+v", dummyVMFullName, err)
}
return vmdisk.diskPath, nil
}
func (vmdisk vmDiskManager) Delete(ctx context.Context, datacenter *vclib.Datacenter) error {
return fmt.Errorf("vmDiskManager.Delete is not supported")
}
// CreateDummyVM create a Dummy VM at specified location with given name.
func (vmdisk vmDiskManager) createDummyVM(ctx context.Context, datacenter *vclib.Datacenter, vmName string) (*vclib.VirtualMachine, error) {
// Create a virtual machine config spec with 1 SCSI adapter.
virtualMachineConfigSpec := types.VirtualMachineConfigSpec{
Name: vmName,
Files: &types.VirtualMachineFileInfo{
VmPathName: "[" + vmdisk.volumeOptions.Datastore + "]",
},
NumCPUs: 1,
MemoryMB: 4,
DeviceChange: []types.BaseVirtualDeviceConfigSpec{
&types.VirtualDeviceConfigSpec{
Operation: types.VirtualDeviceConfigSpecOperationAdd,
Device: &types.ParaVirtualSCSIController{
VirtualSCSIController: types.VirtualSCSIController{
SharedBus: types.VirtualSCSISharingNoSharing,
VirtualController: types.VirtualController{
BusNumber: 0,
VirtualDevice: types.VirtualDevice{
Key: 1000,
},
},
},
},
},
},
}
task, err := vmdisk.vmOptions.VMFolder.CreateVM(ctx, virtualMachineConfigSpec, vmdisk.vmOptions.VMResourcePool, nil)
if err != nil {
klog.Errorf("Failed to create VM. err: %+v", err)
return nil, err
}
dummyVMTaskInfo, err := task.WaitForResult(ctx, nil)
if err != nil {
klog.Errorf("Error occurred while waiting for create VM task result. err: %+v", err)
return nil, err
}
vmRef := dummyVMTaskInfo.Result.(object.Reference)
dummyVM := object.NewVirtualMachine(datacenter.Client(), vmRef.Reference())
return &vclib.VirtualMachine{VirtualMachine: dummyVM, Datacenter: datacenter}, nil
}
// CleanUpDummyVMs deletes stale dummyVM's
func CleanUpDummyVMs(ctx context.Context, folder *vclib.Folder, dc *vclib.Datacenter) error {
vmList, err := folder.GetVirtualMachines(ctx)
if err != nil {
klog.V(4).Infof("Failed to get virtual machines in the kubernetes cluster: %s, err: %+v", folder.InventoryPath, err)
return err
}
if vmList == nil || len(vmList) == 0 {
klog.Errorf("No virtual machines found in the kubernetes cluster: %s", folder.InventoryPath)
return fmt.Errorf("No virtual machines found in the kubernetes cluster: %s", folder.InventoryPath)
}
var dummyVMList []*vclib.VirtualMachine
// Loop through VM's in the Kubernetes cluster to find dummy VM's
for _, vm := range vmList {
vmName, err := vm.ObjectName(ctx)
if err != nil {
klog.V(4).Infof("Unable to get name from VM with err: %+v", err)
continue
}
if strings.HasPrefix(vmName, vclib.DummyVMPrefixName) {
vmObj := vclib.VirtualMachine{VirtualMachine: object.NewVirtualMachine(dc.Client(), vm.Reference()), Datacenter: dc}
dummyVMList = append(dummyVMList, &vmObj)
}
}
for _, vm := range dummyVMList {
err = vm.DeleteVM(ctx)
if err != nil {
klog.V(4).Infof("Unable to delete dummy VM with err: %+v", err)
continue
}
}
return nil
}
func isAlreadyExists(path string, err error) bool {
errorMessage := fmt.Sprintf("Cannot complete the operation because the file or folder %s already exists", path)
if errorMessage == err.Error() {
return true
}
return false
}

View File

@@ -1,26 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["fixtures.go"],
data = glob([
"*.pem",
"*.key",
]),
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/fixtures",
visibility = ["//visibility:public"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -1,51 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIJJwIBAAKCAgEA4CKLwCPwMUIVaGhvZxLmXEzDflILVaGCZRRBbfYucfysylT/
JKPMlKs3ORNVW1cdiW1z/ZUlAlN+eqq40WSVQJqLUeXltsfZwemdFmf3SAWIu9v9
wI5mhLQJMh2XPKNILCBhrET/ANLVPbObJUFvGavpR9XVXTXsLUvuCR+oSpDvQYyn
WKJ5dAwqKaFx3GCEFAm0dNnSzliQrzKFOE0DUMxFQH5Lt2EYLHrya+K4ZtYbX5nK
X++T9R5pZs0npqmTQS/rIffv2hT89tKdqPz/MCt5xwmjsAO2uri5O+WaLUIkf8Bd
fmVAusE/5v2p3x3MH0rUXaNPg7FqLj1cnbcwHqqt3PmVl9VZINkPbnHHiua21GNq
DAZ/G/vP8/hlXwIeE8d6YPsSPm4NEH0Ku+yk0TEL6QkGFMYYpyCw1BNYGXd+zvf1
xjZtGrcViHhesxuv71nGdJbNSi7zwkYXydSKCNnjJ+Oqyip5uUC+DmydqcJTQLcZ
W5ObNfeB8PLz6UuVidMffh8evE13L60cS5wZyZWherMqB+I/uREt05gikCtlJVOo
shuLS0QjbK/INYCSFBJjt0xrwTbw13SQsEhktQYdqTHaDBWi6uh+pcY9msF1jZJ+
GAEPYcLzK3o2/kE6g09TZ3QDeP9bEDTllL+mIs4JGiWGNC/eGjGfyyAnfmECAwEA
AQKCAf88aRNBtm4G2MjsWzmrjmyIdCg84+AqNF3w4ITCHphmILRx1HbwaTW63GsF
9zAKbnCHmfipYImZFugAKAOobHPN9dmXOV+w5CzNFyo/38XGo7c26xR50efP3Lad
y1v3/Ap32kJ5LB+PGURgXQh0Ai7vvGYj9n6LoP0HOG/wBZhWgLn78O0p9qDFpoG2
tsz5mQoAXJ1G4W7wLu7QSc2eXyOFo4kG2QOPaZwaYQj2CyWokgzOt6TUNr6qUogW
LTWCtjH6X/AAN9Nt9Do6TIoyAf7F/PHVs8NqrZWSvjcu7bOgfzNXO4H3j1LjAzM2
Dyi5+k4KISEcG+hSln8H94H/AGD3Yea44sDnIZoOtKTB+O7V+jyU7qwtX9QaEu04
CslnZOun0/PR/C9mI7QaGu1YJcxdIw9Nlj07+iAzI4ZjuO+qHeUM7SNvH/MVbglA
2ZDkp7J3VlJgFObvHdINZMWNO1FIg/pc7TcXNsUkNAwnCwLh6//5/cZF+BtGlc4A
SGkhYKX3dRp8qLjNKxux3VHROoDByJDEUcnn0fEAm9aMbV+PofpghJtQqnKbsMn8
iF29v+9+JBIHFxAwhCIv9atF82VHt/sGPcsRqohttRWJDaUMBM3N8rvciiigcYzh
c/o4kH0YNoFSs4+evhYQDxk8yIGsgyuGfnW5QaLUIPa2AxblAoIBAQDyfoJr3UFq
LfkTkYHjAo4eua1vzlM3/8aFFnuQhMeoFvw4aA26x1DsUUozIRXTWWbnFH6GY8T3
B46jgWcO4UaMqbxQxTpHSDQFSVn/budugxGU70WQ9LcjSobk9uCXgk2MmRn9tA/6
+ergswSEuPxyNzgDF60BTrS5V2Akh6eF/sYZWnMKazZ3kdw1V5Y/IxfNH1yo6GRz
PTPVyyX6kU3+DNQSplgcsKYFhyoT2HPIRaxR1fTIw9E5w1rQWanYz/A0I3SDECsc
qJDy1rzC+0Tye2XLcWzHu5l1ng8GPLQJfjEtMTKXMIHjpLFC1P4hXNrlxTOnALSS
95bwzvDqfxULAoIBAQDsnkUVOvoXrE9xRI2EyWi1K08i5YSwy3rtV+uJP2Zyy4uB
r3TfzxFnYdXWykzHJGqHd6N5M6vCmbcLMS0G9z5QpDhrIF5vk26P9isvZ3k7rkWG
jgwif3kBcPQXlCDgwwnVmGsBf/A+2z3HOfNPK3Iy3VamFvYD52wgL8+N0puZ42aU
aH759JjLGcaVZWzWNdIcpS1OsBucGXCj3IeHmLjhJFbVebIHJ4rCs7gj51H8R8uk
fksxsgfPdRRpYq7NkDOzVDPb/KtTf5C4ZDogRaxj765DMnn6LhBFQVuDWEDJgjlF
Aolt8ynskf3xd19nlX99QAzXnql6LLClwps6G8XDAoIBADzhslDufevwmuZk091w
2MmyCG9Xt+EJYIgtetxv2cjD7JMk3L2WKSULy7tGhTpI6eL+bD3FcsAqr48xf/Rm
btYGD3ef7N/Uqurg3a2Z5JUEZzejUy3vosNDhNabfQvM9TdlgPcHbDOw511+1JWV
9Bug7XkpSpBXeFxIKaVCQbcMniPjZ5qoDEa84jKqSNiVMPaY9ySZJA8iwI7esCxW
quQryFreVKTvXN9qbhAJehhAFeF9/DUjpLYB7Bz/RftfSYltlWUKfCh30dyGOWIi
v865WHdZhNwop4C2LEN+nhz8B9C212LKFPJYeQC0hRFPRM4HUs6NCMkVTFotOqNF
QL0CggEAGXBysPOkS8NEz0K1jF8zGLdNTM0sVO2ri7T2J81fMFxd5VV91Uon7tt/
6BXb51Us9t+P/cnmX4ezPErPMn6GfpkJT8stHAXXzzaCMhiH2jjEVNEU0Oivk84X
ECnm1wNhHUvDxWeB5uAfZjn+xLZBEuLlG/o//O92modJY1APVp4yOyZ48FqxyrQ8
u3cqGmWy701674jTjxbVG2jsUVHEHsCPbWgmEcrYilJUK9gE4oC9jjPd1bv0RwOp
bCMl9Afa5x7YbIBf0xxV7N0puqqC/EOakrLslk85hJigRCDK5l9P1PGO4PlRupN/
n+Rbp4FVMZwfRVdTlUUUwN2JXtf5jQKCAQEAqSMv1mkLS3qnmW1E/qAYrEmMlHZo
253wuwsO0XS7xCxcEumIvjYCvhnHPYIO2rqsscmk42gYe/OUfteMb71BJ+HnlyOo
9oDbZg8W2DSUzTUy0yT/JMcNTwVCPeVj+bZ/LzDP5jKmZ7vXZkLGQCgU6ENVmsCg
b8nKz0xc7o8jERaSGY+h3LthXF0wAZJ3NdbnJjFbL8hYpwTrD6xd/yg3M5grrCLe
iBKfdpCIN6VrqI9VymoPZryb1OVEiClt0LHWTIXQPcH2J/CrMeWoGhRBW3yTAECf
HPhYMZddW2y6uOFjRcUCu2HG35ogEYlDd0kjH1HhPC2xXcFQBmOyPpEeDQ==
-----END RSA PRIVATE KEY-----

View File

@@ -1,29 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIE/jCCAuYCCQDRJ2qPhdmG0DANBgkqhkiG9w0BAQsFADBAMQswCQYDVQQGEwJV
UzELMAkGA1UECAwCQ0ExEzARBgNVBAoMCkFjbWUsIEluYy4xDzANBgNVBAMMBnNv
bWVDQTAgFw0xODA2MDgxMzM5MjFaGA8yMjE4MDQyMTEzMzkyMVowQDELMAkGA1UE
BhMCVVMxCzAJBgNVBAgMAkNBMRMwEQYDVQQKDApBY21lLCBJbmMuMQ8wDQYDVQQD
DAZzb21lQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDgIovAI/Ax
QhVoaG9nEuZcTMN+UgtVoYJlFEFt9i5x/KzKVP8ko8yUqzc5E1VbVx2JbXP9lSUC
U356qrjRZJVAmotR5eW2x9nB6Z0WZ/dIBYi72/3AjmaEtAkyHZc8o0gsIGGsRP8A
0tU9s5slQW8Zq+lH1dVdNewtS+4JH6hKkO9BjKdYonl0DCopoXHcYIQUCbR02dLO
WJCvMoU4TQNQzEVAfku3YRgsevJr4rhm1htfmcpf75P1HmlmzSemqZNBL+sh9+/a
FPz20p2o/P8wK3nHCaOwA7a6uLk75ZotQiR/wF1+ZUC6wT/m/anfHcwfStRdo0+D
sWouPVydtzAeqq3c+ZWX1Vkg2Q9ucceK5rbUY2oMBn8b+8/z+GVfAh4Tx3pg+xI+
bg0QfQq77KTRMQvpCQYUxhinILDUE1gZd37O9/XGNm0atxWIeF6zG6/vWcZ0ls1K
LvPCRhfJ1IoI2eMn46rKKnm5QL4ObJ2pwlNAtxlbk5s194Hw8vPpS5WJ0x9+Hx68
TXcvrRxLnBnJlaF6syoH4j+5ES3TmCKQK2UlU6iyG4tLRCNsr8g1gJIUEmO3TGvB
NvDXdJCwSGS1Bh2pMdoMFaLq6H6lxj2awXWNkn4YAQ9hwvMrejb+QTqDT1NndAN4
/1sQNOWUv6YizgkaJYY0L94aMZ/LICd+YQIDAQABMA0GCSqGSIb3DQEBCwUAA4IC
AQBYBRH/q3gB4gEiOAUl9HbnoUb7MznZ0uQTH7fUYqr66ceZkg9w1McbwiAeZAaY
qQWwr3u4A8/Bg8csE2yQTsXeA33FP3Q6obyuYn4q7e++4+9SLkbSSQfbB67pGUK5
/pal6ULrLGzs69fbL1tOaA/VKQJndg3N9cftyiIUWTzHDop8SLmIobWVRtPQHf00
oKq8loakyluQdxQxnGdl7vMXwSpSpIH84TOdy2JN90MzVLgOz55sb/wRYfhClNFD
+1sb2V4nL2w1kXaO2UVPzk7qpG5FE54JPvvN67Ec4JjMSnGo8l3dJ9jGEmgBIML3
l1onrti2HStSs1vR4Ax0xok08okRlrGA4FqQiSx853T5uLa/JLmWfLKg9ixR4ZV+
dF+2ZrFwDLZUr4VeaDd2v2mQFBNLvdZrqp1OZ4B/1+H5S8ucb+oVhGqzDkEvRCc+
WYpNxx7kpwZPTLmMYTXXKdTWfpgz9GL0LSkY8d1rxLwHxtV8EzAkV+zIWix4h/IE
0FG4WvhrttMCu8ulZhGGoVqy7gdb4+ViWnUYNuCCjIcRJj7SeZaDawBASa/jZwik
Hxrwn0osGUqEUBmvjDdXJpTaKCr2GFOvhCM2pG6AXa14b5hS2DgbX+NZYcScYtVC
vn2HMDjnIEF4uOfDJU5eLok4jli5+VwzOQ7hOHs3DIm4+g==
-----END CERTIFICATE-----

View File

@@ -1,93 +0,0 @@
#!/usr/bin/env bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -eu
readonly VALID_DAYS='73000'
readonly RSA_KEY_SIZE='4096'
createKey() {
openssl genrsa \
-out "$1" \
"$RSA_KEY_SIZE"
}
createCaCert() {
openssl req \
-x509 \
-subj "$( getSubj 'someCA' )" \
-new \
-nodes \
-key "$2" \
-sha256 \
-days "$VALID_DAYS" \
-out "$1"
}
createCSR() {
openssl req \
-new \
-sha256 \
-key "$2" \
-subj "$( getSubj 'localhost' )" \
-reqexts SAN \
-config <( getSANConfig ) \
-out "$1"
}
signCSR() {
openssl x509 \
-req \
-in "$2" \
-CA "$3" \
-CAkey "$4" \
-CAcreateserial \
-days "$VALID_DAYS" \
-sha256 \
-extfile <( getSAN ) \
-out "$1"
}
getSubj() {
local cn="${1:-someRandomCN}"
echo "/C=US/ST=CA/O=Acme, Inc./CN=${cn}"
}
getSAN() {
printf "subjectAltName=DNS:localhost,IP:127.0.0.1"
}
getSANConfig() {
cat /etc/ssl/openssl.cnf
printf '\n[SAN]\n'
getSAN
}
main() {
local caCertPath="./ca.pem"
local caKeyPath="./ca.key"
local serverCsrPath="./server.csr"
local serverCertPath="./server.pem"
local serverKeyPath="./server.key"
createKey "$caKeyPath"
createCaCert "$caCertPath" "$caKeyPath"
createKey "$serverKeyPath"
createCSR "$serverCsrPath" "$serverKeyPath"
signCSR "$serverCertPath" "$serverCsrPath" "$caCertPath" "$caKeyPath"
}
main "$@"

View File

@@ -1,65 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fixtures
import (
"os"
"path/filepath"
"runtime"
"strings"
)
var (
// CaCertPath is the filepath to a certificate that can be used as a CA
// certificate.
CaCertPath string
// ServerCertPath is the filepath to a leaf certifiacte signed by the CA at
// `CaCertPath`.
ServerCertPath string
// ServerKeyPath is the filepath to the private key for the ceritifiacte at
// `ServerCertPath`.
ServerKeyPath string
// InvalidCertPath is the filepath to an invalid certificate.
InvalidCertPath string
)
func init() {
_, thisFile, _, ok := runtime.Caller(0)
if !ok {
panic("Cannot get path to the fixtures")
}
fixturesDir := filepath.Dir(thisFile)
cwd, err := os.Getwd()
if err != nil {
panic("Cannot get CWD: " + err.Error())
}
// When tests run in a bazel sandbox `runtime.Caller()`
// returns a relative path, when run with plain `go test` the path
// returned is absolute. To make those fixtures work in both those cases,
// we prepend the CWD iff the CWD is not yet part of the path to the fixtures.
if !strings.HasPrefix(fixturesDir, cwd) {
fixturesDir = filepath.Join(cwd, fixturesDir)
}
CaCertPath = filepath.Join(fixturesDir, "ca.pem")
ServerCertPath = filepath.Join(fixturesDir, "server.pem")
ServerKeyPath = filepath.Join(fixturesDir, "server.key")
InvalidCertPath = filepath.Join(fixturesDir, "invalid.pem")
}

View File

@@ -1 +0,0 @@
this is some invalid content

View File

@@ -1,28 +0,0 @@
-----BEGIN CERTIFICATE REQUEST-----
MIIEtTCCAp0CAQAwQzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMRMwEQYDVQQK
DApBY21lLCBJbmMuMRIwEAYDVQQDDAlsb2NhbGhvc3QwggIiMA0GCSqGSIb3DQEB
AQUAA4ICDwAwggIKAoICAQCVkk5HMKNvMXVJoJcUfKK252UT6rdnlsaFLZOlcbp3
otqiq3A2jhQLeL5Ocyd22s/ak2RX9liK+ynV8fP3YWoUBP5elhwbykubiIvSTRS5
85Z0s9NfzscImMpnivt+bOy3KOoriy/0jfJ7WMqLRUTUEusXUpW8QT/U9cK6DrwQ
E/9oXTr669yvqjyFsxjOB0pLOFFib0LeQZxrA2h+oAP8qT/Of6kyTgGWjLhSC1cV
eCPZsSeZUT61FbIu/b5M42WYuddoFbf8y9m0oLeYizYob7poE25jw91bNa8y2nfS
v+JuCcfO4wq29cnldGFNpJPhBhc1sbBvVshXXKWdfzN1c8RCS5hNANy1phAJ7RFe
3Uj0WneBVBHHJMz7Qh61uxTST1W8HBDTuaBTxGKTcPFWd9u4lj/BEScRFOSC/qiO
1HCKzOsYhjnHfql5GzfQKpEy/e4m2oL8VTqcJBsfHCyxDIH+6Y3ovttymxAUPJ14
r3mG9FDLq1va/+8xzDswyjmRIVQeOgvllzgM5vCKqz6nsXtLRYgkwHMk5yOaAIzO
BnsmZztsyaubjcYvM5pUsiO49VWk6ntiAn+WpF/sreFlesx1peQKbTVovwvn137d
V92Oncce+ZikKHxtz4qOz+dH1Fz7Ykor8fXcsfdbkKvwWdz8U/pOBu+83CxBXTWA
bwIDAQABoC0wKwYJKoZIhvcNAQkOMR4wHDAaBgNVHREEEzARgglsb2NhbGhvc3SH
BH8AAAEwDQYJKoZIhvcNAQELBQADggIBADgJfI3xRKlOInZQjg+afz+L477IiFmP
Pf0qwO/EqBkCmbDbmvXpXi/y9Ffh6bMx2naN873nW3k1uVG2W0O4Bl7di9PkmRxY
ktcWY+CaxDT5+Y3LmrqICgrZmELTuV5G8xX2/7bpdEtY4sWpoOeOun+CeGTCeUGx
sGxOWrhydYwrkowupPthYreIIBBPHWl2gEw/m+Y7aJZGtKnDD9eCbF6RxmXRWHDu
0Ly+F3veXbht9LjKPFsgfsogo33Nl8+W1LCActKNY7NMDdGkc+RqaTyxhYEwomui
N1NDOW1qHqSyp2RC13cXokfLL58WGXS6PpNhSln9u4ZG9a+TY+vw1qC//1CyTicY
ylyEn2qfqTSG3W7T/u6ZTL0MpMjFv8VigpffJcFDjq6lVH8LyTniSXdCREy78jAo
8O/2tzJtWrar8bbeN7KCwVcJVaK15a1GWZmo5Ei33U/2Tm+UyRbWL8eISO2Hs3WM
90aFPaHfqKpiPsJrnnOm270lZclgqEtpsyuLsAClqxytCYPw4zTa6WOfDJtmVUrT
1fvMjqwzvs7jbNrgfkwSxXiABwTMQQWeAtuSO+zZH4Ms10qyANoh4FFi/oS3dRKQ
0kdu7AsJqnou9q9HWq1WCTqMcyNE0KPHuo4xhtOlWoGbsugTs7XBml30D7bKJVfG
PazsY1b0/cx7
-----END CERTIFICATE REQUEST-----

View File

@@ -1,51 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIJKAIBAAKCAgEAlZJORzCjbzF1SaCXFHyitudlE+q3Z5bGhS2TpXG6d6Laoqtw
No4UC3i+TnMndtrP2pNkV/ZYivsp1fHz92FqFAT+XpYcG8pLm4iL0k0UufOWdLPT
X87HCJjKZ4r7fmzstyjqK4sv9I3ye1jKi0VE1BLrF1KVvEE/1PXCug68EBP/aF06
+uvcr6o8hbMYzgdKSzhRYm9C3kGcawNofqAD/Kk/zn+pMk4Bloy4UgtXFXgj2bEn
mVE+tRWyLv2+TONlmLnXaBW3/MvZtKC3mIs2KG+6aBNuY8PdWzWvMtp30r/ibgnH
zuMKtvXJ5XRhTaST4QYXNbGwb1bIV1ylnX8zdXPEQkuYTQDctaYQCe0RXt1I9Fp3
gVQRxyTM+0IetbsU0k9VvBwQ07mgU8Rik3DxVnfbuJY/wREnERTkgv6ojtRwiszr
GIY5x36peRs30CqRMv3uJtqC/FU6nCQbHxwssQyB/umN6L7bcpsQFDydeK95hvRQ
y6tb2v/vMcw7MMo5kSFUHjoL5Zc4DObwiqs+p7F7S0WIJMBzJOcjmgCMzgZ7Jmc7
bMmrm43GLzOaVLIjuPVVpOp7YgJ/lqRf7K3hZXrMdaXkCm01aL8L59d+3Vfdjp3H
HvmYpCh8bc+Kjs/nR9Rc+2JKK/H13LH3W5Cr8Fnc/FP6TgbvvNwsQV01gG8CAwEA
AQKCAgBLBQn8DPo8YDsqxcBhRy45vQ/mkHiTHX3O+JAwkD1tmiI9Ku3qfxKwukwB
fyKRK6jLQdg3gljgxJ80Ltol/xc8mVCYUoQgsDOB/FfdEEpQBkw1lqhzSnxr5G7I
xl3kCHAmYgAp/PL9n2C620sj1YdzM1X06bgupy+D+gxEU/WhvtYBG5nklv6moSUg
DjdnxyJNXh7710Bbx97Tke8Ma+f0B1P4l/FeSN/lCgm9JPD11L9uhbuN28EvBIXN
qfmUCQ5BLx1KmHIi+n/kaCQN/+0XFQsS/oQEyA2znNaWFBu7egDxHji4nQoXwGoW
i2vujJibafmkNc5/2bA8mTx8JXvCLhU2L9j2ZumpKOda0g+pfMauesL+9rvZdqwW
gjdjndOHZlg3qm40hGCDBVmmV3mdnvXrk1BbuB4Y0N7qGo3PyYtJHGwJILaNQVGR
Sj75uTatxJwFXsqSaJaErV3Q90IiyXX4AOFGnWHOs29GEwtnDbCvT/rzqutTYSXD
Yv0XFDznzJelhZTH7FbaW3FW3YGEG1ER/0MtKpsAH4i7H9q3KKK8yrzUsgUkGwXt
xtoLckh91xilPIGbzARdELTEdHrjlFL+qaz3PIqEQScWz3WBu2JcIzGbp6PQfMZ+
FZXarEb/ADZuX0+WoKFYR5jzwMoQfF/fxe2Ib/37ETNw4BgfSQKCAQEAxOw64XgO
nUVJslzGK/H5fqTVpD1rfRmvVAiSDLAuWpClbpDZXqEPuoPPYsiccuUWu9VkJE1F
6MZEexGx1jFkN08QUHD1Bobzu6ThaBc2PrWHRjFGKM60d0AkhOiL4N04FGwVeCN6
xzIJFk1E4VOOo1+lzeAWRvi1lwuWTgQi+m25nwBJtmYdBLGeS+DXy80Fi6deECei
ipDzJ4rxJsZ61uqBeYC4CfuHW9m5rCzJWPMMMFrPdl3OxEyZzKng4Co5EYc5i/QH
piXD6IJayKcTPRK3tBJZp2YCIIdtQLcjAwmDEDowQtelHkbTihXMGRarf3VcOEoN
ozMRgcLEEynuKwKCAQEAwnF5ZkkJEL/1MCOZ6PZfSKl35ZMIz/4Umk8hOMAQGhCT
cnxlDUfGSBu4OihdBbIuBSBsYDjgcev8uyiIPDVy0FIkBKRGfgrNCLDh19aHljvE
bUc3akvbft0mro86AvSd/Rpc7sj841bru37RDUm6AJOtIvb6DWUpMOZgMm0WMmSI
kNs/UT+7rqg+AZPP8lumnJIFnRK38xOehQAaS1FHWGP//38py8yo8eXpMsoCWMch
c+kZD2jsAYV+SWjjkZjcrv/52+asd4AotRXIShV8E8xItQeq6vLHKOaIe0tC2Y44
ONAKiu4dgABt1voy8I5J63MwgeNmgAUS+KsgUclYzQKCAQEAlt/3bPAzIkQH5uQ1
4U2PvnxEQ4XbaQnYzyWR4K7LlQ/l8ASCxoHYLyr2JdVWKKFk/ZzNERMzUNk3dqNk
AZvuEII/GaKx2MJk04vMN5gxM3KZpinyeymEEynN0RbqtOpJITx+ZoGofB3V4IRr
FciTLJEH0+iwqMe9OXDjQ/rfYcfXw/7QezNZYFNF2RT3wWnfqdQduXrkig3sfotx
oCfJzgf2E0WPu/Y/CxyRqVzXF5N/7zxkX2gYF0YpQCmX5afz+X4FlTju81lT9DyL
mdiIYO6KWSkGD7+UOaAJEOA/rwAGrtQmTdAy7jONt+pjaYV4+DrO4UG7mSJzc1vq
JlSl6QKCAQARqwPv8mT7e6XI2QNMMs7XqGZ3mtOrKpguqVAIexM7exQazAjWmxX+
SV6FElPZh6Y82wRd/e0PDPVrADTY27ZyDXSuY0rwewTEbGYpGZo6YXXoxBbZ9sic
D3ZLWEJaMGYGsJWPMP4hni1PXSebwH5BPSn3Sl/QRcfnZJeLHXRt4cqy9uka9eKU
7T6tIAQ+LmvGQFJ4QlIqqTa3ORoqi9kiw/tn+OMQXKlhSZXWApsR/A4jHSQkzVDc
loeyHfDHsw8ia6oFfEFhnmiUg8UuTiN3HRHiOS8jqCnGoqP2KBGL+StMpkK++wH9
NozEgvmL+DHpTg8zTjlrGortw4btR5FlAoIBABVni+EsGA5K/PM1gIct2pDm+6Kq
UCYScTwIjftuwKLk/KqermG9QJLiJouKO3ZSz7iCelu87Dx1cKeXrc2LQ1pnQzCB
JnI6BCT+zRnQFXjLokJXD2hIS2hXhqV6/9FRXLKKMYePcDxWt/etLNGmpLnhDfb3
sMOH/9pnaGmtk36Ce03Hh7E1C6io/MKfTq+KKUV1UGwO1BdNQCiclkYzAUqn1O+Y
c8BaeGKc2c6as8DKrPTGGQGmzo/ZUxQVfVFl2g7+HXISWBBcui/G5gtnU1afZqbW
mTmDoqs4510vhlkhN9XZ0DyhewDIqNNGEY2vS1x2fJz1XC2Eve4KpSyUsiE=
-----END RSA PRIVATE KEY-----

View File

@@ -1,30 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIFJjCCAw6gAwIBAgIJAOcEAbv8NslfMA0GCSqGSIb3DQEBCwUAMEAxCzAJBgNV
BAYTAlVTMQswCQYDVQQIDAJDQTETMBEGA1UECgwKQWNtZSwgSW5jLjEPMA0GA1UE
AwwGc29tZUNBMCAXDTE4MDYwODEzMzkyNFoYDzIyMTgwNDIxMTMzOTI0WjBDMQsw
CQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExEzARBgNVBAoMCkFjbWUsIEluYy4xEjAQ
BgNVBAMMCWxvY2FsaG9zdDCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB
AJWSTkcwo28xdUmglxR8orbnZRPqt2eWxoUtk6Vxunei2qKrcDaOFAt4vk5zJ3ba
z9qTZFf2WIr7KdXx8/dhahQE/l6WHBvKS5uIi9JNFLnzlnSz01/OxwiYymeK+35s
7Lco6iuLL/SN8ntYyotFRNQS6xdSlbxBP9T1wroOvBAT/2hdOvrr3K+qPIWzGM4H
Sks4UWJvQt5BnGsDaH6gA/ypP85/qTJOAZaMuFILVxV4I9mxJ5lRPrUVsi79vkzj
ZZi512gVt/zL2bSgt5iLNihvumgTbmPD3Vs1rzLad9K/4m4Jx87jCrb1yeV0YU2k
k+EGFzWxsG9WyFdcpZ1/M3VzxEJLmE0A3LWmEAntEV7dSPRad4FUEcckzPtCHrW7
FNJPVbwcENO5oFPEYpNw8VZ327iWP8ERJxEU5IL+qI7UcIrM6xiGOcd+qXkbN9Aq
kTL97ibagvxVOpwkGx8cLLEMgf7pjei+23KbEBQ8nXiveYb0UMurW9r/7zHMOzDK
OZEhVB46C+WXOAzm8IqrPqexe0tFiCTAcyTnI5oAjM4GeyZnO2zJq5uNxi8zmlSy
I7j1VaTqe2ICf5akX+yt4WV6zHWl5AptNWi/C+fXft1X3Y6dxx75mKQofG3Pio7P
50fUXPtiSivx9dyx91uQq/BZ3PxT+k4G77zcLEFdNYBvAgMBAAGjHjAcMBoGA1Ud
EQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAgEABL8kffi7
48qSD+/l/UwCYdmqta1vAbOkvLnPtfXe1XlDpJipNuPxUBc8nNTemtrbg0erNJnC
jQHodqmdKBJJOdaEKTwAGp5pYvvjlU3WasmhfJy+QwOWgeqjJcTUo3+DEaHRls16
AZXlsp3hB6z0gzR/qzUuZwpMbL477JpuZtAcwLYeVvLG8bQRyWyEy8JgGDoYSn8s
Z16s+r6AX+cnL/2GHkZ+oc3iuXJbnac4xfWTKDiYnyzK6RWRnoyro7X0jiPz6XX3
wyoWzB1uMSCXscrW6ZcKyKqz75lySLuwGxOMhX4nGOoYHY0ZtrYn5WK2ZAJxsQnn
8QcjPB0nq37U7ifk1uebmuXe99iqyKnWaLvlcpe+HnO5pVxFkSQEf7Zh+hEnRDkN
IBzLFnqwDS1ug/oQ1aSvc8oBh2ylKDJuGtPNqGKibNJyb2diXO/aEUOKRUKPAxKa
dbKsc4Y1bhZNN3/MICMoyghwAOiuwUQMR5uhxTkQmZUwNrPFa+eW6GvyoYLFUsZs
hZfWLNGD5mLADElxs0HF7F9Zk6pSocTDXba4d4lfxsq88SyZZ7PbjJYFRfLQPzd1
CfvpRPqolEmZo1Y5Q644PELYiJRKpBxmX5GtC5j5eaUD9XdGKvXsGhb0m0gW75rq
iUnnLkZt2ya1cDJDiCnJjo7r5KxMo0XXFDc=
-----END CERTIFICATE-----

View File

@@ -1,47 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vclib
import (
"context"
"github.com/vmware/govmomi/object"
"k8s.io/klog"
)
// Folder extends the govmomi Folder object
type Folder struct {
*object.Folder
Datacenter *Datacenter
}
// GetVirtualMachines returns list of VirtualMachine inside a folder.
func (folder *Folder) GetVirtualMachines(ctx context.Context) ([]*VirtualMachine, error) {
vmFolders, err := folder.Children(ctx)
if err != nil {
klog.Errorf("Failed to get children from Folder: %s. err: %+v", folder.InventoryPath, err)
return nil, err
}
var vmObjList []*VirtualMachine
for _, vmFolder := range vmFolders {
if vmFolder.Reference().Type == VirtualMachineType {
vmObj := VirtualMachine{object.NewVirtualMachine(folder.Client(), vmFolder.Reference()), folder.Datacenter}
vmObjList = append(vmObjList, &vmObj)
}
}
return vmObjList, nil
}

View File

@@ -1,83 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vclib
import (
"context"
"path"
"testing"
"github.com/vmware/govmomi"
"github.com/vmware/govmomi/simulator"
)
func TestFolder(t *testing.T) {
ctx := context.Background()
model := simulator.VPX()
// Child folder "F0" will be created under the root folder and datacenter folders,
// and all resources are created within the "F0" child folders.
model.Folder = 1
defer model.Remove()
err := model.Create()
if err != nil {
t.Fatal(err)
}
s := model.Service.NewServer()
defer s.Close()
c, err := govmomi.NewClient(ctx, s.URL, true)
if err != nil {
t.Fatal(err)
}
vc := &VSphereConnection{Client: c.Client}
dc, err := GetDatacenter(ctx, vc, TestDefaultDatacenter)
if err != nil {
t.Error(err)
}
const folderName = "F0"
vmFolder := path.Join("/", folderName, dc.Name(), "vm")
tests := []struct {
folderPath string
expect int
}{
{vmFolder, 0},
{path.Join(vmFolder, folderName), (model.Host + model.Cluster) * model.Machine},
}
for i, test := range tests {
folder, cerr := dc.GetFolderByPath(ctx, test.folderPath)
if cerr != nil {
t.Fatal(cerr)
}
vms, cerr := folder.GetVirtualMachines(ctx)
if cerr != nil {
t.Fatalf("%d: %s", i, cerr)
}
if len(vms) != test.expect {
t.Errorf("%d: expected %d VMs, got: %d", i, test.expect, len(vms))
}
}
}

View File

@@ -1,169 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vclib
import (
"context"
"fmt"
"github.com/vmware/govmomi/pbm"
"k8s.io/klog"
pbmtypes "github.com/vmware/govmomi/pbm/types"
"github.com/vmware/govmomi/vim25"
)
// PbmClient is extending govmomi pbm, and provides functions to get compatible list of datastore for given policy
type PbmClient struct {
*pbm.Client
}
// NewPbmClient returns a new PBM Client object
func NewPbmClient(ctx context.Context, client *vim25.Client) (*PbmClient, error) {
pbmClient, err := pbm.NewClient(ctx, client)
if err != nil {
klog.Errorf("Failed to create new Pbm Client. err: %+v", err)
return nil, err
}
return &PbmClient{pbmClient}, nil
}
// IsDatastoreCompatible check if the datastores is compatible for given storage policy id
// if datastore is not compatible with policy, fault message with the Datastore Name is returned
func (pbmClient *PbmClient) IsDatastoreCompatible(ctx context.Context, storagePolicyID string, datastore *Datastore) (bool, string, error) {
faultMessage := ""
placementHub := pbmtypes.PbmPlacementHub{
HubType: datastore.Reference().Type,
HubId: datastore.Reference().Value,
}
hubs := []pbmtypes.PbmPlacementHub{placementHub}
req := []pbmtypes.BasePbmPlacementRequirement{
&pbmtypes.PbmPlacementCapabilityProfileRequirement{
ProfileId: pbmtypes.PbmProfileId{
UniqueId: storagePolicyID,
},
},
}
compatibilityResult, err := pbmClient.CheckRequirements(ctx, hubs, nil, req)
if err != nil {
klog.Errorf("Error occurred for CheckRequirements call. err %+v", err)
return false, "", err
}
if compatibilityResult != nil && len(compatibilityResult) > 0 {
compatibleHubs := compatibilityResult.CompatibleDatastores()
if compatibleHubs != nil && len(compatibleHubs) > 0 {
return true, "", nil
}
dsName, err := datastore.ObjectName(ctx)
if err != nil {
klog.Errorf("Failed to get datastore ObjectName")
return false, "", err
}
if compatibilityResult[0].Error[0].LocalizedMessage == "" {
faultMessage = "Datastore: " + dsName + " is not compatible with the storage policy."
} else {
faultMessage = "Datastore: " + dsName + " is not compatible with the storage policy. LocalizedMessage: " + compatibilityResult[0].Error[0].LocalizedMessage + "\n"
}
return false, faultMessage, nil
}
return false, "", fmt.Errorf("compatibilityResult is nil or empty")
}
// GetCompatibleDatastores filters and returns compatible list of datastores for given storage policy id
// For Non Compatible Datastores, fault message with the Datastore Name is also returned
func (pbmClient *PbmClient) GetCompatibleDatastores(ctx context.Context, dc *Datacenter, storagePolicyID string, datastores []*DatastoreInfo) ([]*DatastoreInfo, string, error) {
var (
dsMorNameMap = getDsMorNameMap(ctx, datastores)
localizedMessagesForNotCompatibleDatastores = ""
)
compatibilityResult, err := pbmClient.GetPlacementCompatibilityResult(ctx, storagePolicyID, datastores)
if err != nil {
klog.Errorf("Error occurred while retrieving placement compatibility result for datastores: %+v with storagePolicyID: %s. err: %+v", datastores, storagePolicyID, err)
return nil, "", err
}
compatibleHubs := compatibilityResult.CompatibleDatastores()
var compatibleDatastoreList []*DatastoreInfo
for _, hub := range compatibleHubs {
compatibleDatastoreList = append(compatibleDatastoreList, getDatastoreFromPlacementHub(datastores, hub))
}
for _, res := range compatibilityResult {
for _, err := range res.Error {
dsName := dsMorNameMap[res.Hub.HubId]
localizedMessage := ""
if err.LocalizedMessage != "" {
localizedMessage = "Datastore: " + dsName + " not compatible with the storage policy. LocalizedMessage: " + err.LocalizedMessage + "\n"
} else {
localizedMessage = "Datastore: " + dsName + " not compatible with the storage policy. \n"
}
localizedMessagesForNotCompatibleDatastores += localizedMessage
}
}
// Return an error if there are no compatible datastores.
if len(compatibleHubs) < 1 {
klog.Errorf("No compatible datastores found that satisfy the storage policy requirements: %s", storagePolicyID)
return nil, localizedMessagesForNotCompatibleDatastores, fmt.Errorf("No compatible datastores found that satisfy the storage policy requirements")
}
return compatibleDatastoreList, localizedMessagesForNotCompatibleDatastores, nil
}
// GetPlacementCompatibilityResult gets placement compatibility result based on storage policy requirements.
func (pbmClient *PbmClient) GetPlacementCompatibilityResult(ctx context.Context, storagePolicyID string, datastore []*DatastoreInfo) (pbm.PlacementCompatibilityResult, error) {
var hubs []pbmtypes.PbmPlacementHub
for _, ds := range datastore {
hubs = append(hubs, pbmtypes.PbmPlacementHub{
HubType: ds.Reference().Type,
HubId: ds.Reference().Value,
})
}
req := []pbmtypes.BasePbmPlacementRequirement{
&pbmtypes.PbmPlacementCapabilityProfileRequirement{
ProfileId: pbmtypes.PbmProfileId{
UniqueId: storagePolicyID,
},
},
}
res, err := pbmClient.CheckRequirements(ctx, hubs, nil, req)
if err != nil {
klog.Errorf("Error occurred for CheckRequirements call. err: %+v", err)
return nil, err
}
return res, nil
}
// getDataStoreForPlacementHub returns matching datastore associated with given pbmPlacementHub
func getDatastoreFromPlacementHub(datastore []*DatastoreInfo, pbmPlacementHub pbmtypes.PbmPlacementHub) *DatastoreInfo {
for _, ds := range datastore {
if ds.Reference().Type == pbmPlacementHub.HubType && ds.Reference().Value == pbmPlacementHub.HubId {
return ds
}
}
return nil
}
// getDsMorNameMap returns map of ds Mor and Datastore Object Name
func getDsMorNameMap(ctx context.Context, datastores []*DatastoreInfo) map[string]string {
dsMorNameMap := make(map[string]string)
for _, ds := range datastores {
dsObjectName, err := ds.ObjectName(ctx)
if err == nil {
dsMorNameMap[ds.Reference().Value] = dsObjectName
} else {
klog.Errorf("Error occurred while getting datastore object name. err: %+v", err)
}
}
return dsMorNameMap
}

View File

@@ -1,210 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vclib
import (
"fmt"
"path/filepath"
"regexp"
"strings"
"github.com/vmware/govmomi/find"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/mo"
"github.com/vmware/govmomi/vim25/soap"
"github.com/vmware/govmomi/vim25/types"
"k8s.io/klog"
)
// IsNotFound return true if err is NotFoundError or DefaultNotFoundError
func IsNotFound(err error) bool {
_, ok := err.(*find.NotFoundError)
if ok {
return true
}
_, ok = err.(*find.DefaultNotFoundError)
if ok {
return true
}
return false
}
func getFinder(dc *Datacenter) *find.Finder {
finder := find.NewFinder(dc.Client(), false)
finder.SetDatacenter(dc.Datacenter)
return finder
}
// formatVirtualDiskUUID removes any spaces and hyphens in UUID
// Example UUID input is 42375390-71f9-43a3-a770-56803bcd7baa and output after format is 4237539071f943a3a77056803bcd7baa
func formatVirtualDiskUUID(uuid string) string {
uuidwithNoSpace := strings.Replace(uuid, " ", "", -1)
uuidWithNoHypens := strings.Replace(uuidwithNoSpace, "-", "", -1)
return strings.ToLower(uuidWithNoHypens)
}
// getSCSIControllersOfType filters specific type of Controller device from given list of Virtual Machine Devices
func getSCSIControllersOfType(vmDevices object.VirtualDeviceList, scsiType string) []*types.VirtualController {
// get virtual scsi controllers of passed argument type
var scsiControllers []*types.VirtualController
for _, device := range vmDevices {
devType := vmDevices.Type(device)
if devType == scsiType {
if c, ok := device.(types.BaseVirtualController); ok {
scsiControllers = append(scsiControllers, c.GetVirtualController())
}
}
}
return scsiControllers
}
// getAvailableSCSIController gets available SCSI Controller from list of given controllers, which has less than 15 disk devices.
func getAvailableSCSIController(scsiControllers []*types.VirtualController) *types.VirtualController {
// get SCSI controller which has space for adding more devices
for _, controller := range scsiControllers {
if len(controller.Device) < SCSIControllerDeviceLimit {
return controller
}
}
return nil
}
// getNextUnitNumber gets the next available SCSI controller unit number from given list of Controller Device List
func getNextUnitNumber(devices object.VirtualDeviceList, c types.BaseVirtualController) (int32, error) {
var takenUnitNumbers [SCSIDeviceSlots]bool
takenUnitNumbers[SCSIReservedSlot] = true
key := c.GetVirtualController().Key
for _, device := range devices {
d := device.GetVirtualDevice()
if d.ControllerKey == key {
if d.UnitNumber != nil {
takenUnitNumbers[*d.UnitNumber] = true
}
}
}
for unitNumber, takenUnitNumber := range takenUnitNumbers {
if !takenUnitNumber {
return int32(unitNumber), nil
}
}
return -1, fmt.Errorf("SCSI Controller with key=%d does not have any available slots", key)
}
// getSCSIControllers filters and return list of Controller Devices from given list of Virtual Machine Devices.
func getSCSIControllers(vmDevices object.VirtualDeviceList) []*types.VirtualController {
// get all virtual scsi controllers
var scsiControllers []*types.VirtualController
for _, device := range vmDevices {
devType := vmDevices.Type(device)
switch devType {
case SCSIControllerType, strings.ToLower(LSILogicControllerType), strings.ToLower(BusLogicControllerType), PVSCSIControllerType, strings.ToLower(LSILogicSASControllerType):
if c, ok := device.(types.BaseVirtualController); ok {
scsiControllers = append(scsiControllers, c.GetVirtualController())
}
}
}
return scsiControllers
}
// RemoveStorageClusterORFolderNameFromVDiskPath removes the cluster or folder path from the vDiskPath
// for vDiskPath [DatastoreCluster/sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk, return value is [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk
// for vDiskPath [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk, return value remains same [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk
func RemoveStorageClusterORFolderNameFromVDiskPath(vDiskPath string) string {
datastore := regexp.MustCompile("\\[(.*?)\\]").FindStringSubmatch(vDiskPath)[1]
if filepath.Base(datastore) != datastore {
vDiskPath = strings.Replace(vDiskPath, datastore, filepath.Base(datastore), 1)
}
return vDiskPath
}
// GetPathFromVMDiskPath retrieves the path from VM Disk Path.
// Example: For vmDiskPath - [vsanDatastore] kubevols/volume.vmdk, the path is kubevols/volume.vmdk
func GetPathFromVMDiskPath(vmDiskPath string) string {
datastorePathObj := new(object.DatastorePath)
isSuccess := datastorePathObj.FromString(vmDiskPath)
if !isSuccess {
klog.Errorf("Failed to parse vmDiskPath: %s", vmDiskPath)
return ""
}
return datastorePathObj.Path
}
//GetDatastorePathObjFromVMDiskPath gets the datastorePathObj from VM disk path.
func GetDatastorePathObjFromVMDiskPath(vmDiskPath string) (*object.DatastorePath, error) {
datastorePathObj := new(object.DatastorePath)
isSuccess := datastorePathObj.FromString(vmDiskPath)
if !isSuccess {
klog.Errorf("Failed to parse volPath: %s", vmDiskPath)
return nil, fmt.Errorf("Failed to parse volPath: %s", vmDiskPath)
}
return datastorePathObj, nil
}
//IsValidUUID checks if the string is a valid UUID.
func IsValidUUID(uuid string) bool {
r := regexp.MustCompile("^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$")
return r.MatchString(uuid)
}
// IsManagedObjectNotFoundError returns true if error is of type ManagedObjectNotFound
func IsManagedObjectNotFoundError(err error) bool {
isManagedObjectNotFoundError := false
if soap.IsSoapFault(err) {
_, isManagedObjectNotFoundError = soap.ToSoapFault(err).VimFault().(types.ManagedObjectNotFound)
}
return isManagedObjectNotFoundError
}
// IsInvalidCredentialsError returns true if error is of type InvalidLogin
func IsInvalidCredentialsError(err error) bool {
isInvalidCredentialsError := false
if soap.IsSoapFault(err) {
_, isInvalidCredentialsError = soap.ToSoapFault(err).VimFault().(types.InvalidLogin)
}
return isInvalidCredentialsError
}
// VerifyVolumePathsForVM verifies if the volume paths (volPaths) are attached to VM.
func VerifyVolumePathsForVM(vmMo mo.VirtualMachine, volPaths []string, nodeName string, nodeVolumeMap map[string]map[string]bool) {
// Verify if the volume paths are present on the VM backing virtual disk devices
vmDevices := object.VirtualDeviceList(vmMo.Config.Hardware.Device)
VerifyVolumePathsForVMDevices(vmDevices, volPaths, nodeName, nodeVolumeMap)
}
// VerifyVolumePathsForVMDevices verifies if the volume paths (volPaths) are attached to VM.
func VerifyVolumePathsForVMDevices(vmDevices object.VirtualDeviceList, volPaths []string, nodeName string, nodeVolumeMap map[string]map[string]bool) {
volPathsMap := make(map[string]bool)
for _, volPath := range volPaths {
volPathsMap[volPath] = true
}
// Verify if the volume paths are present on the VM backing virtual disk devices
for _, device := range vmDevices {
if vmDevices.TypeName(device) == "VirtualDisk" {
virtualDevice := device.GetVirtualDevice()
if backing, ok := virtualDevice.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok {
if volPathsMap[backing.FileName] {
setNodeVolumeMap(nodeVolumeMap, backing.FileName, nodeName, true)
}
}
}
}
}

View File

@@ -1,71 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vclib
import (
"context"
"testing"
"github.com/vmware/govmomi"
"github.com/vmware/govmomi/simulator"
)
func TestUtils(t *testing.T) {
ctx := context.Background()
model := simulator.VPX()
// Child folder "F0" will be created under the root folder and datacenter folders,
// and all resources are created within the "F0" child folders.
model.Folder = 1
defer model.Remove()
err := model.Create()
if err != nil {
t.Fatal(err)
}
s := model.Service.NewServer()
defer s.Close()
c, err := govmomi.NewClient(ctx, s.URL, true)
if err != nil {
t.Fatal(err)
}
vc := &VSphereConnection{Client: c.Client}
dc, err := GetDatacenter(ctx, vc, TestDefaultDatacenter)
if err != nil {
t.Error(err)
}
finder := getFinder(dc)
datastores, err := finder.DatastoreList(ctx, "*")
if err != nil {
t.Fatal(err)
}
count := model.Count()
if count.Datastore != len(datastores) {
t.Errorf("got %d Datastores, expected: %d", len(datastores), count.Datastore)
}
_, err = finder.Datastore(ctx, testNameNotFound)
if !IsNotFound(err) {
t.Errorf("unexpected error: %s", err)
}
}

View File

@@ -1,425 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vclib
import (
"context"
"fmt"
"strings"
"time"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/property"
"github.com/vmware/govmomi/vim25"
"github.com/vmware/govmomi/vim25/mo"
"github.com/vmware/govmomi/vim25/types"
"k8s.io/klog"
)
// VirtualMachine extends the govmomi VirtualMachine object
type VirtualMachine struct {
*object.VirtualMachine
Datacenter *Datacenter
}
// IsDiskAttached checks if disk is attached to the VM.
func (vm *VirtualMachine) IsDiskAttached(ctx context.Context, diskPath string) (bool, error) {
device, err := vm.getVirtualDeviceByPath(ctx, diskPath)
if err != nil {
return false, err
}
if device != nil {
return true, nil
}
return false, nil
}
// DeleteVM deletes the VM.
func (vm *VirtualMachine) DeleteVM(ctx context.Context) error {
destroyTask, err := vm.Destroy(ctx)
if err != nil {
klog.Errorf("Failed to delete the VM: %q. err: %+v", vm.InventoryPath, err)
return err
}
return destroyTask.Wait(ctx)
}
// AttachDisk attaches the disk at location - vmDiskPath from Datastore - dsObj to the Virtual Machine
// Additionally the disk can be configured with SPBM policy if volumeOptions.StoragePolicyID is non-empty.
func (vm *VirtualMachine) AttachDisk(ctx context.Context, vmDiskPath string, volumeOptions *VolumeOptions) (string, error) {
// Check if the diskControllerType is valid
if !CheckControllerSupported(volumeOptions.SCSIControllerType) {
return "", fmt.Errorf("Not a valid SCSI Controller Type. Valid options are %q", SCSIControllerTypeValidOptions())
}
vmDiskPathCopy := vmDiskPath
vmDiskPath = RemoveStorageClusterORFolderNameFromVDiskPath(vmDiskPath)
attached, err := vm.IsDiskAttached(ctx, vmDiskPath)
if err != nil {
klog.Errorf("Error occurred while checking if disk is attached on VM: %q. vmDiskPath: %q, err: %+v", vm.InventoryPath, vmDiskPath, err)
return "", err
}
// If disk is already attached, return the disk UUID
if attached {
diskUUID, _ := vm.Datacenter.GetVirtualDiskPage83Data(ctx, vmDiskPath)
return diskUUID, nil
}
if volumeOptions.StoragePolicyName != "" {
pbmClient, err := NewPbmClient(ctx, vm.Client())
if err != nil {
klog.Errorf("Error occurred while creating new pbmClient. err: %+v", err)
return "", err
}
volumeOptions.StoragePolicyID, err = pbmClient.ProfileIDByName(ctx, volumeOptions.StoragePolicyName)
if err != nil {
klog.Errorf("Failed to get Profile ID by name: %s. err: %+v", volumeOptions.StoragePolicyName, err)
return "", err
}
}
dsObj, err := vm.Datacenter.GetDatastoreByPath(ctx, vmDiskPathCopy)
if err != nil {
klog.Errorf("Failed to get datastore from vmDiskPath: %q. err: %+v", vmDiskPath, err)
return "", err
}
// If disk is not attached, create a disk spec for disk to be attached to the VM.
disk, newSCSIController, err := vm.CreateDiskSpec(ctx, vmDiskPath, dsObj, volumeOptions)
if err != nil {
klog.Errorf("Error occurred while creating disk spec. err: %+v", err)
return "", err
}
vmDevices, err := vm.Device(ctx)
if err != nil {
klog.Errorf("Failed to retrieve VM devices for VM: %q. err: %+v", vm.InventoryPath, err)
return "", err
}
virtualMachineConfigSpec := types.VirtualMachineConfigSpec{}
deviceConfigSpec := &types.VirtualDeviceConfigSpec{
Device: disk,
Operation: types.VirtualDeviceConfigSpecOperationAdd,
}
// Configure the disk with the SPBM profile only if ProfileID is not empty.
if volumeOptions.StoragePolicyID != "" {
profileSpec := &types.VirtualMachineDefinedProfileSpec{
ProfileId: volumeOptions.StoragePolicyID,
}
deviceConfigSpec.Profile = append(deviceConfigSpec.Profile, profileSpec)
}
virtualMachineConfigSpec.DeviceChange = append(virtualMachineConfigSpec.DeviceChange, deviceConfigSpec)
requestTime := time.Now()
task, err := vm.Reconfigure(ctx, virtualMachineConfigSpec)
if err != nil {
RecordvSphereMetric(APIAttachVolume, requestTime, err)
klog.Errorf("Failed to attach the disk with storagePolicy: %q on VM: %q. err - %+v", volumeOptions.StoragePolicyID, vm.InventoryPath, err)
if newSCSIController != nil {
nestedErr := vm.deleteController(ctx, newSCSIController, vmDevices)
if nestedErr != nil {
return "", fmt.Errorf("failed to delete SCSI Controller after reconfiguration failed with err=%v: %v", err, nestedErr)
}
}
return "", err
}
err = task.Wait(ctx)
RecordvSphereMetric(APIAttachVolume, requestTime, err)
if err != nil {
klog.Errorf("Failed to attach the disk with storagePolicy: %+q on VM: %q. err - %+v", volumeOptions.StoragePolicyID, vm.InventoryPath, err)
if newSCSIController != nil {
nestedErr := vm.deleteController(ctx, newSCSIController, vmDevices)
if nestedErr != nil {
return "", fmt.Errorf("failed to delete SCSI Controller after waiting for reconfiguration failed with err='%v': %v", err, nestedErr)
}
}
return "", err
}
// Once disk is attached, get the disk UUID.
diskUUID, err := vm.Datacenter.GetVirtualDiskPage83Data(ctx, vmDiskPath)
if err != nil {
klog.Errorf("Error occurred while getting Disk Info from VM: %q. err: %v", vm.InventoryPath, err)
nestedErr := vm.DetachDisk(ctx, vmDiskPath)
if nestedErr != nil {
return "", fmt.Errorf("failed to detach disk after getting VM UUID failed with err='%v': %v", err, nestedErr)
}
if newSCSIController != nil {
nestedErr = vm.deleteController(ctx, newSCSIController, vmDevices)
if nestedErr != nil {
return "", fmt.Errorf("failed to delete SCSI Controller after getting VM UUID failed with err='%v': %v", err, nestedErr)
}
}
return "", err
}
return diskUUID, nil
}
// DetachDisk detaches the disk specified by vmDiskPath
func (vm *VirtualMachine) DetachDisk(ctx context.Context, vmDiskPath string) error {
vmDiskPath = RemoveStorageClusterORFolderNameFromVDiskPath(vmDiskPath)
device, err := vm.getVirtualDeviceByPath(ctx, vmDiskPath)
if err != nil {
klog.Errorf("Disk ID not found for VM: %q with diskPath: %q", vm.InventoryPath, vmDiskPath)
return err
}
if device == nil {
klog.Errorf("No virtual device found with diskPath: %q on VM: %q", vmDiskPath, vm.InventoryPath)
return fmt.Errorf("No virtual device found with diskPath: %q on VM: %q", vmDiskPath, vm.InventoryPath)
}
// Detach disk from VM
requestTime := time.Now()
err = vm.RemoveDevice(ctx, true, device)
RecordvSphereMetric(APIDetachVolume, requestTime, err)
if err != nil {
klog.Errorf("Error occurred while removing disk device for VM: %q. err: %v", vm.InventoryPath, err)
return err
}
return nil
}
// GetResourcePool gets the resource pool for VM.
func (vm *VirtualMachine) GetResourcePool(ctx context.Context) (*object.ResourcePool, error) {
vmMoList, err := vm.Datacenter.GetVMMoList(ctx, []*VirtualMachine{vm}, []string{"resourcePool"})
if err != nil {
klog.Errorf("Failed to get resource pool from VM: %q. err: %+v", vm.InventoryPath, err)
return nil, err
}
return object.NewResourcePool(vm.Client(), vmMoList[0].ResourcePool.Reference()), nil
}
// IsActive checks if the VM is active.
// Returns true if VM is in poweredOn state.
func (vm *VirtualMachine) IsActive(ctx context.Context) (bool, error) {
vmMoList, err := vm.Datacenter.GetVMMoList(ctx, []*VirtualMachine{vm}, []string{"summary"})
if err != nil {
klog.Errorf("Failed to get VM Managed object with property summary. err: +%v", err)
return false, err
}
if vmMoList[0].Summary.Runtime.PowerState == ActivePowerState {
return true, nil
}
return false, nil
}
// GetAllAccessibleDatastores gets the list of accessible Datastores for the given Virtual Machine
func (vm *VirtualMachine) GetAllAccessibleDatastores(ctx context.Context) ([]*DatastoreInfo, error) {
host, err := vm.HostSystem(ctx)
if err != nil {
klog.Errorf("Failed to get host system for VM: %q. err: %+v", vm.InventoryPath, err)
return nil, err
}
var hostSystemMo mo.HostSystem
s := object.NewSearchIndex(vm.Client())
err = s.Properties(ctx, host.Reference(), []string{DatastoreProperty}, &hostSystemMo)
if err != nil {
klog.Errorf("Failed to retrieve datastores for host: %+v. err: %+v", host, err)
return nil, err
}
var dsRefList []types.ManagedObjectReference
dsRefList = append(dsRefList, hostSystemMo.Datastore...)
var dsMoList []mo.Datastore
pc := property.DefaultCollector(vm.Client())
properties := []string{DatastoreInfoProperty}
err = pc.Retrieve(ctx, dsRefList, properties, &dsMoList)
if err != nil {
klog.Errorf("Failed to get Datastore managed objects from datastore objects."+
" dsObjList: %+v, properties: %+v, err: %v", dsRefList, properties, err)
return nil, err
}
klog.V(9).Infof("Result dsMoList: %+v", dsMoList)
var dsObjList []*DatastoreInfo
for _, dsMo := range dsMoList {
dsObjList = append(dsObjList,
&DatastoreInfo{
&Datastore{object.NewDatastore(vm.Client(), dsMo.Reference()),
vm.Datacenter},
dsMo.Info.GetDatastoreInfo()})
}
return dsObjList, nil
}
// CreateDiskSpec creates a disk spec for disk
func (vm *VirtualMachine) CreateDiskSpec(ctx context.Context, diskPath string, dsObj *Datastore, volumeOptions *VolumeOptions) (*types.VirtualDisk, types.BaseVirtualDevice, error) {
var newSCSIController types.BaseVirtualDevice
vmDevices, err := vm.Device(ctx)
if err != nil {
klog.Errorf("Failed to retrieve VM devices. err: %+v", err)
return nil, nil, err
}
// find SCSI controller of particular type from VM devices
scsiControllersOfRequiredType := getSCSIControllersOfType(vmDevices, volumeOptions.SCSIControllerType)
scsiController := getAvailableSCSIController(scsiControllersOfRequiredType)
if scsiController == nil {
newSCSIController, err = vm.createAndAttachSCSIController(ctx, volumeOptions.SCSIControllerType)
if err != nil {
klog.Errorf("Failed to create SCSI controller for VM :%q with err: %+v", vm.InventoryPath, err)
return nil, nil, err
}
// Get VM device list
vmDevices, err := vm.Device(ctx)
if err != nil {
klog.Errorf("Failed to retrieve VM devices. err: %v", err)
return nil, nil, err
}
// verify scsi controller in virtual machine
scsiControllersOfRequiredType := getSCSIControllersOfType(vmDevices, volumeOptions.SCSIControllerType)
scsiController = getAvailableSCSIController(scsiControllersOfRequiredType)
if scsiController == nil {
klog.Errorf("Cannot find SCSI controller of type: %q in VM", volumeOptions.SCSIControllerType)
// attempt clean up of scsi controller
if err := vm.deleteController(ctx, newSCSIController, vmDevices); err != nil {
return nil, nil, fmt.Errorf("failed to delete SCSI controller after failing to find it on VM: %v", err)
}
return nil, nil, fmt.Errorf("Cannot find SCSI controller of type: %q in VM", volumeOptions.SCSIControllerType)
}
}
disk := vmDevices.CreateDisk(scsiController, dsObj.Reference(), diskPath)
unitNumber, err := getNextUnitNumber(vmDevices, scsiController)
if err != nil {
klog.Errorf("Cannot attach disk to VM, unitNumber limit reached - %+v.", err)
return nil, nil, err
}
*disk.UnitNumber = unitNumber
backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo)
backing.DiskMode = string(types.VirtualDiskModeIndependent_persistent)
if volumeOptions.CapacityKB != 0 {
disk.CapacityInKB = int64(volumeOptions.CapacityKB)
}
if volumeOptions.DiskFormat != "" {
var diskFormat string
diskFormat = DiskFormatValidType[volumeOptions.DiskFormat]
switch diskFormat {
case ThinDiskType:
backing.ThinProvisioned = types.NewBool(true)
case EagerZeroedThickDiskType:
backing.EagerlyScrub = types.NewBool(true)
default:
backing.ThinProvisioned = types.NewBool(false)
}
}
return disk, newSCSIController, nil
}
// GetVirtualDiskPath gets the first available virtual disk devicePath from the VM
func (vm *VirtualMachine) GetVirtualDiskPath(ctx context.Context) (string, error) {
vmDevices, err := vm.Device(ctx)
if err != nil {
klog.Errorf("Failed to get the devices for VM: %q. err: %+v", vm.InventoryPath, err)
return "", err
}
// filter vm devices to retrieve device for the given vmdk file identified by disk path
for _, device := range vmDevices {
if vmDevices.TypeName(device) == "VirtualDisk" {
virtualDevice := device.GetVirtualDevice()
if backing, ok := virtualDevice.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok {
return backing.FileName, nil
}
}
}
return "", nil
}
// createAndAttachSCSIController creates and attachs the SCSI controller to the VM.
func (vm *VirtualMachine) createAndAttachSCSIController(ctx context.Context, diskControllerType string) (types.BaseVirtualDevice, error) {
// Get VM device list
vmDevices, err := vm.Device(ctx)
if err != nil {
klog.Errorf("Failed to retrieve VM devices for VM: %q. err: %+v", vm.InventoryPath, err)
return nil, err
}
allSCSIControllers := getSCSIControllers(vmDevices)
if len(allSCSIControllers) >= SCSIControllerLimit {
// we reached the maximum number of controllers we can attach
klog.Errorf("SCSI Controller Limit of %d has been reached, cannot create another SCSI controller", SCSIControllerLimit)
return nil, fmt.Errorf("SCSI Controller Limit of %d has been reached, cannot create another SCSI controller", SCSIControllerLimit)
}
newSCSIController, err := vmDevices.CreateSCSIController(diskControllerType)
if err != nil {
klog.Errorf("Failed to create new SCSI controller on VM: %q. err: %+v", vm.InventoryPath, err)
return nil, err
}
configNewSCSIController := newSCSIController.(types.BaseVirtualSCSIController).GetVirtualSCSIController()
hotAndRemove := true
configNewSCSIController.HotAddRemove = &hotAndRemove
configNewSCSIController.SharedBus = types.VirtualSCSISharing(types.VirtualSCSISharingNoSharing)
// add the scsi controller to virtual machine
err = vm.AddDevice(context.TODO(), newSCSIController)
if err != nil {
klog.V(LogLevel).Infof("Cannot add SCSI controller to VM: %q. err: %+v", vm.InventoryPath, err)
// attempt clean up of scsi controller
nestedErr := vm.deleteController(ctx, newSCSIController, vmDevices)
if nestedErr != nil {
return nil, fmt.Errorf("failed to delete SCSI controller after failing to add it to vm with err='%v': %v", err, nestedErr)
}
return nil, err
}
return newSCSIController, nil
}
// getVirtualDeviceByPath gets the virtual device by path
func (vm *VirtualMachine) getVirtualDeviceByPath(ctx context.Context, diskPath string) (types.BaseVirtualDevice, error) {
vmDevices, err := vm.Device(ctx)
if err != nil {
klog.Errorf("Failed to get the devices for VM: %q. err: %+v", vm.InventoryPath, err)
return nil, err
}
// filter vm devices to retrieve device for the given vmdk file identified by disk path
for _, device := range vmDevices {
if vmDevices.TypeName(device) == "VirtualDisk" {
virtualDevice := device.GetVirtualDevice()
if backing, ok := virtualDevice.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok {
if matchVirtualDiskAndVolPath(backing.FileName, diskPath) {
klog.V(LogLevel).Infof("Found VirtualDisk backing with filename %q for diskPath %q", backing.FileName, diskPath)
return device, nil
}
}
}
}
return nil, nil
}
func matchVirtualDiskAndVolPath(diskPath, volPath string) bool {
fileExt := ".vmdk"
diskPath = strings.TrimSuffix(diskPath, fileExt)
volPath = strings.TrimSuffix(volPath, fileExt)
return diskPath == volPath
}
// deleteController removes latest added SCSI controller from VM.
func (vm *VirtualMachine) deleteController(ctx context.Context, controllerDevice types.BaseVirtualDevice, vmDevices object.VirtualDeviceList) error {
controllerDeviceList := vmDevices.SelectByType(controllerDevice)
if len(controllerDeviceList) < 1 {
return ErrNoDevicesFound
}
device := controllerDeviceList[len(controllerDeviceList)-1]
err := vm.RemoveDevice(ctx, true, device)
if err != nil {
klog.Errorf("Error occurred while removing device on VM: %q. err: %+v", vm.InventoryPath, err)
return err
}
return nil
}
// RenewVM renews this virtual machine with new client connection.
func (vm *VirtualMachine) RenewVM(client *vim25.Client) VirtualMachine {
dc := Datacenter{Datacenter: object.NewDatacenter(client, vm.Datacenter.Reference())}
newVM := object.NewVirtualMachine(client, vm.VirtualMachine.Reference())
return VirtualMachine{VirtualMachine: newVM, Datacenter: &dc}
}

View File

@@ -1,144 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vclib
import (
"context"
"testing"
"github.com/vmware/govmomi"
"github.com/vmware/govmomi/simulator"
)
func TestVirtualMachine(t *testing.T) {
ctx := context.Background()
model := simulator.VPX()
defer model.Remove()
err := model.Create()
if err != nil {
t.Fatal(err)
}
s := model.Service.NewServer()
defer s.Close()
c, err := govmomi.NewClient(ctx, s.URL, true)
if err != nil {
t.Fatal(err)
}
vc := &VSphereConnection{Client: c.Client}
dc, err := GetDatacenter(ctx, vc, TestDefaultDatacenter)
if err != nil {
t.Error(err)
}
folders, err := dc.Folders(ctx)
if err != nil {
t.Fatal(err)
}
folder, err := dc.GetFolderByPath(ctx, folders.VmFolder.InventoryPath)
if err != nil {
t.Fatal(err)
}
vms, err := folder.GetVirtualMachines(ctx)
if err != nil {
t.Fatal(err)
}
if len(vms) == 0 {
t.Fatal("no VMs")
}
for _, vm := range vms {
all, err := vm.GetAllAccessibleDatastores(ctx)
if err != nil {
t.Error(err)
}
if len(all) == 0 {
t.Error("no accessible datastores")
}
_, err = vm.GetResourcePool(ctx)
if err != nil {
t.Error(err)
}
diskPath, err := vm.GetVirtualDiskPath(ctx)
if err != nil {
t.Error(err)
}
options := &VolumeOptions{SCSIControllerType: PVSCSIControllerType}
for _, expect := range []bool{true, false} {
attached, err := vm.IsDiskAttached(ctx, diskPath)
if err != nil {
t.Error(err)
}
if attached != expect {
t.Errorf("attached=%t, expected=%t", attached, expect)
}
uuid, err := vm.AttachDisk(ctx, diskPath, options)
if err != nil {
t.Error(err)
}
if uuid == "" {
t.Error("missing uuid")
}
err = vm.DetachDisk(ctx, diskPath)
if err != nil {
t.Error(err)
}
}
for _, expect := range []bool{true, false} {
active, err := vm.IsActive(ctx)
if err != nil {
t.Error(err)
}
if active != expect {
t.Errorf("active=%t, expected=%t", active, expect)
}
if expect {
// Expecting to hit the error path since the VM is still powered on
err = vm.DeleteVM(ctx)
if err == nil {
t.Error("expected error")
}
_, _ = vm.PowerOff(ctx)
continue
}
// Should be able to delete now that VM power is off
err = vm.DeleteVM(ctx)
if err != nil {
t.Error(err)
}
}
}
}

View File

@@ -1,27 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vclib
import (
"github.com/vmware/govmomi/object"
)
// VMOptions provides helper objects for provisioning volume with SPBM Policy
type VMOptions struct {
VMFolder *Folder
VMResourcePool *object.ResourcePool
}

View File

@@ -1,108 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vclib
import (
"strings"
"k8s.io/klog"
)
// VolumeOptions specifies various options for a volume.
type VolumeOptions struct {
CapacityKB int
Tags map[string]string
Name string
DiskFormat string
Datastore string
VSANStorageProfileData string
StoragePolicyName string
StoragePolicyID string
SCSIControllerType string
Zone []string
}
var (
// DiskFormatValidType specifies the valid disk formats
DiskFormatValidType = map[string]string{
ThinDiskType: ThinDiskType,
strings.ToLower(EagerZeroedThickDiskType): EagerZeroedThickDiskType,
strings.ToLower(ZeroedThickDiskType): PreallocatedDiskType,
}
// SCSIControllerValidType specifies the supported SCSI controllers
SCSIControllerValidType = []string{LSILogicControllerType, LSILogicSASControllerType, PVSCSIControllerType}
)
// DiskformatValidOptions generates Valid Options for Diskformat
func DiskformatValidOptions() string {
validopts := ""
for diskformat := range DiskFormatValidType {
validopts += diskformat + ", "
}
validopts = strings.TrimSuffix(validopts, ", ")
return validopts
}
// CheckDiskFormatSupported checks if the diskFormat is valid
func CheckDiskFormatSupported(diskFormat string) bool {
if DiskFormatValidType[diskFormat] == "" {
klog.Errorf("Not a valid Disk Format. Valid options are %+q", DiskformatValidOptions())
return false
}
return true
}
// SCSIControllerTypeValidOptions generates valid options for SCSIControllerType
func SCSIControllerTypeValidOptions() string {
validopts := ""
for _, controllerType := range SCSIControllerValidType {
validopts += (controllerType + ", ")
}
validopts = strings.TrimSuffix(validopts, ", ")
return validopts
}
// CheckControllerSupported checks if the given controller type is valid
func CheckControllerSupported(ctrlType string) bool {
for _, c := range SCSIControllerValidType {
if ctrlType == c {
return true
}
}
klog.Errorf("Not a valid SCSI Controller Type. Valid options are %q", SCSIControllerTypeValidOptions())
return false
}
// VerifyVolumeOptions checks if volumeOptions.SCIControllerType is valid controller type
func (volumeOptions VolumeOptions) VerifyVolumeOptions() bool {
// Validate only if SCSIControllerType is set by user.
// Default value is set later in virtualDiskManager.Create and vmDiskManager.Create
if volumeOptions.SCSIControllerType != "" {
isValid := CheckControllerSupported(volumeOptions.SCSIControllerType)
if !isValid {
return false
}
}
// ThinDiskType is the default, so skip the validation.
if volumeOptions.DiskFormat != ThinDiskType {
isValid := CheckDiskFormatSupported(volumeOptions.DiskFormat)
if !isValid {
return false
}
}
return true
}

View File

@@ -1,133 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vclib
import (
"time"
"github.com/prometheus/client_golang/prometheus"
)
// Cloud Provider API constants
const (
APICreateVolume = "CreateVolume"
APIDeleteVolume = "DeleteVolume"
APIAttachVolume = "AttachVolume"
APIDetachVolume = "DetachVolume"
)
// Cloud Provider Operation constants
const (
OperationDeleteVolume = "DeleteVolumeOperation"
OperationAttachVolume = "AttachVolumeOperation"
OperationDetachVolume = "DetachVolumeOperation"
OperationDiskIsAttached = "DiskIsAttachedOperation"
OperationDisksAreAttached = "DisksAreAttachedOperation"
OperationCreateVolume = "CreateVolumeOperation"
OperationCreateVolumeWithPolicy = "CreateVolumeWithPolicyOperation"
OperationCreateVolumeWithRawVSANPolicy = "CreateVolumeWithRawVSANPolicyOperation"
)
// vsphereAPIMetric is for recording latency of Single API Call.
var vsphereAPIMetric = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "cloudprovider_vsphere_api_request_duration_seconds",
Help: "Latency of vsphere api call",
},
[]string{"request"},
)
var vsphereAPIErrorMetric = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "cloudprovider_vsphere_api_request_errors",
Help: "vsphere Api errors",
},
[]string{"request"},
)
// vsphereOperationMetric is for recording latency of vSphere Operation which invokes multiple APIs to get the task done.
var vsphereOperationMetric = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "cloudprovider_vsphere_operation_duration_seconds",
Help: "Latency of vsphere operation call",
},
[]string{"operation"},
)
var vsphereOperationErrorMetric = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "cloudprovider_vsphere_operation_errors",
Help: "vsphere operation errors",
},
[]string{"operation"},
)
// RegisterMetrics registers all the API and Operation metrics
func RegisterMetrics() {
prometheus.MustRegister(vsphereAPIMetric)
prometheus.MustRegister(vsphereAPIErrorMetric)
prometheus.MustRegister(vsphereOperationMetric)
prometheus.MustRegister(vsphereOperationErrorMetric)
}
// RecordvSphereMetric records the vSphere API and Operation metrics
func RecordvSphereMetric(actionName string, requestTime time.Time, err error) {
switch actionName {
case APICreateVolume, APIDeleteVolume, APIAttachVolume, APIDetachVolume:
recordvSphereAPIMetric(actionName, requestTime, err)
default:
recordvSphereOperationMetric(actionName, requestTime, err)
}
}
func recordvSphereAPIMetric(actionName string, requestTime time.Time, err error) {
if err != nil {
vsphereAPIErrorMetric.With(prometheus.Labels{"request": actionName}).Inc()
} else {
vsphereAPIMetric.With(prometheus.Labels{"request": actionName}).Observe(calculateTimeTaken(requestTime))
}
}
func recordvSphereOperationMetric(actionName string, requestTime time.Time, err error) {
if err != nil {
vsphereOperationErrorMetric.With(prometheus.Labels{"operation": actionName}).Inc()
} else {
vsphereOperationMetric.With(prometheus.Labels{"operation": actionName}).Observe(calculateTimeTaken(requestTime))
}
}
// RecordCreateVolumeMetric records the Create Volume metric
func RecordCreateVolumeMetric(volumeOptions *VolumeOptions, requestTime time.Time, err error) {
var actionName string
if volumeOptions.StoragePolicyName != "" {
actionName = OperationCreateVolumeWithPolicy
} else if volumeOptions.VSANStorageProfileData != "" {
actionName = OperationCreateVolumeWithRawVSANPolicy
} else {
actionName = OperationCreateVolume
}
RecordvSphereMetric(actionName, requestTime, err)
}
func calculateTimeTaken(requestBeginTime time.Time) (timeTaken float64) {
if !requestBeginTime.IsZero() {
timeTaken = time.Since(requestBeginTime).Seconds()
} else {
timeTaken = 0
}
return timeTaken
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,700 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"context"
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"time"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/property"
"github.com/vmware/govmomi/vim25"
"github.com/vmware/govmomi/vim25/mo"
"github.com/vmware/govmomi/vim25/soap"
"github.com/vmware/govmomi/vim25/types"
"k8s.io/klog"
"k8s.io/api/core/v1"
k8stypes "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/version"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers"
)
const (
DatastoreProperty = "datastore"
DatastoreInfoProperty = "info"
Folder = "Folder"
VirtualMachine = "VirtualMachine"
DummyDiskName = "kube-dummyDisk.vmdk"
ProviderPrefix = "vsphere://"
vSphereConfFileEnvVar = "VSPHERE_CONF_FILE"
UUIDPrefix = "VMware-"
)
// GetVSphere reads vSphere configuration from system environment and construct vSphere object
func GetVSphere() (*VSphere, error) {
cfg, err := getVSphereConfig()
if err != nil {
return nil, err
}
vs, err := newControllerNode(*cfg)
if err != nil {
return nil, err
}
return vs, nil
}
func getVSphereConfig() (*VSphereConfig, error) {
confFileLocation := os.Getenv(vSphereConfFileEnvVar)
if confFileLocation == "" {
return nil, fmt.Errorf("Env variable 'VSPHERE_CONF_FILE' is not set.")
}
confFile, err := os.Open(confFileLocation)
if err != nil {
return nil, err
}
defer func() {
if err := confFile.Close(); err != nil {
klog.Errorf("failed to close config file: %v", err)
}
}()
cfg, err := readConfig(confFile)
if err != nil {
return nil, err
}
return &cfg, nil
}
// Returns the accessible datastores for the given node VM.
func getAccessibleDatastores(ctx context.Context, nodeVmDetail *NodeDetails, nodeManager *NodeManager) ([]*vclib.DatastoreInfo, error) {
accessibleDatastores, err := nodeVmDetail.vm.GetAllAccessibleDatastores(ctx)
if err != nil {
// Check if the node VM is not found which indicates that the node info in the node manager is stale.
// If so, rediscover the node and retry.
if vclib.IsManagedObjectNotFoundError(err) {
klog.V(4).Infof("error %q ManagedObjectNotFound for node %q. Rediscovering...", err, nodeVmDetail.NodeName)
err = nodeManager.RediscoverNode(convertToK8sType(nodeVmDetail.NodeName))
if err == nil {
klog.V(4).Infof("Discovered node %s successfully", nodeVmDetail.NodeName)
nodeInfo, err := nodeManager.GetNodeInfo(convertToK8sType(nodeVmDetail.NodeName))
if err != nil {
klog.V(4).Infof("error %q getting node info for node %+v", err, nodeVmDetail)
return nil, err
}
accessibleDatastores, err = nodeInfo.vm.GetAllAccessibleDatastores(ctx)
if err != nil {
klog.V(4).Infof("error %q getting accessible datastores for node %+v", err, nodeVmDetail)
return nil, err
}
} else {
klog.V(4).Infof("error %q rediscovering node %+v", err, nodeVmDetail)
return nil, err
}
} else {
klog.V(4).Infof("error %q getting accessible datastores for node %+v", err, nodeVmDetail)
return nil, err
}
}
return accessibleDatastores, nil
}
// Get all datastores accessible for the virtual machine object.
func getSharedDatastoresInK8SCluster(ctx context.Context, dc *vclib.Datacenter, nodeManager *NodeManager) ([]*vclib.DatastoreInfo, error) {
nodeVmDetails, err := nodeManager.GetNodeDetails()
if err != nil {
klog.Errorf("Error while obtaining Kubernetes node nodeVmDetail details. error : %+v", err)
return nil, err
}
if len(nodeVmDetails) == 0 {
msg := fmt.Sprintf("Kubernetes node nodeVmDetail details is empty. nodeVmDetails : %+v", nodeVmDetails)
klog.Error(msg)
return nil, fmt.Errorf(msg)
}
var sharedDatastores []*vclib.DatastoreInfo
for _, nodeVmDetail := range nodeVmDetails {
klog.V(9).Infof("Getting accessible datastores for node %s", nodeVmDetail.NodeName)
accessibleDatastores, err := getAccessibleDatastores(ctx, &nodeVmDetail, nodeManager)
if err != nil {
if err == vclib.ErrNoVMFound {
klog.V(9).Infof("Got NoVMFound error for node %s", nodeVmDetail.NodeName)
continue
}
return nil, err
}
if len(sharedDatastores) == 0 {
sharedDatastores = accessibleDatastores
} else {
sharedDatastores = intersect(sharedDatastores, accessibleDatastores)
if len(sharedDatastores) == 0 {
return nil, fmt.Errorf("No shared datastores found in the Kubernetes cluster for nodeVmDetails: %+v", nodeVmDetails)
}
}
}
klog.V(9).Infof("sharedDatastores : %+v", sharedDatastores)
sharedDatastores, err = getDatastoresForEndpointVC(ctx, dc, sharedDatastores)
if err != nil {
klog.Errorf("Failed to get shared datastores from endpoint VC. err: %+v", err)
return nil, err
}
klog.V(9).Infof("sharedDatastores at endpoint VC: %+v", sharedDatastores)
return sharedDatastores, nil
}
func intersect(list1 []*vclib.DatastoreInfo, list2 []*vclib.DatastoreInfo) []*vclib.DatastoreInfo {
klog.V(9).Infof("list1: %+v", list1)
klog.V(9).Infof("list2: %+v", list2)
var sharedDs []*vclib.DatastoreInfo
for _, val1 := range list1 {
// Check if val1 is found in list2
for _, val2 := range list2 {
// Intersection is performed based on the datastoreUrl as this uniquely identifies the datastore.
if val1.Info.Url == val2.Info.Url {
sharedDs = append(sharedDs, val1)
break
}
}
}
return sharedDs
}
// getMostFreeDatastore gets the best fit compatible datastore by free space.
func getMostFreeDatastoreName(ctx context.Context, client *vim25.Client, dsInfoList []*vclib.DatastoreInfo) (string, error) {
var curMax int64
curMax = -1
var index int
for i, dsInfo := range dsInfoList {
dsFreeSpace := dsInfo.Info.GetDatastoreInfo().FreeSpace
if dsFreeSpace > curMax {
curMax = dsFreeSpace
index = i
}
}
return dsInfoList[index].Info.GetDatastoreInfo().Name, nil
}
// Returns the datastores in the given datacenter by performing lookup based on datastore URL.
func getDatastoresForEndpointVC(ctx context.Context, dc *vclib.Datacenter, sharedDsInfos []*vclib.DatastoreInfo) ([]*vclib.DatastoreInfo, error) {
var datastores []*vclib.DatastoreInfo
allDsInfoMap, err := dc.GetAllDatastores(ctx)
if err != nil {
return nil, err
}
for _, sharedDsInfo := range sharedDsInfos {
dsInfo, ok := allDsInfoMap[sharedDsInfo.Info.Url]
if ok {
datastores = append(datastores, dsInfo)
} else {
klog.V(4).Infof("Warning: Shared datastore with URL %s does not exist in endpoint VC", sharedDsInfo.Info.Url)
}
}
klog.V(9).Infof("Datastore from endpoint VC: %+v", datastores)
return datastores, nil
}
func getPbmCompatibleDatastore(ctx context.Context, dc *vclib.Datacenter, storagePolicyName string, nodeManager *NodeManager) (string, error) {
pbmClient, err := vclib.NewPbmClient(ctx, dc.Client())
if err != nil {
return "", err
}
storagePolicyID, err := pbmClient.ProfileIDByName(ctx, storagePolicyName)
if err != nil {
klog.Errorf("Failed to get Profile ID by name: %s. err: %+v", storagePolicyName, err)
return "", err
}
sharedDs, err := getSharedDatastoresInK8SCluster(ctx, dc, nodeManager)
if err != nil {
klog.Errorf("Failed to get shared datastores. err: %+v", err)
return "", err
}
if len(sharedDs) == 0 {
msg := "No shared datastores found in the endpoint virtual center"
klog.Errorf(msg)
return "", errors.New(msg)
}
compatibleDatastores, _, err := pbmClient.GetCompatibleDatastores(ctx, dc, storagePolicyID, sharedDs)
if err != nil {
klog.Errorf("Failed to get compatible datastores from datastores : %+v with storagePolicy: %s. err: %+v",
sharedDs, storagePolicyID, err)
return "", err
}
klog.V(9).Infof("compatibleDatastores : %+v", compatibleDatastores)
datastore, err := getMostFreeDatastoreName(ctx, dc.Client(), compatibleDatastores)
if err != nil {
klog.Errorf("Failed to get most free datastore from compatible datastores: %+v. err: %+v", compatibleDatastores, err)
return "", err
}
klog.V(4).Infof("Most free datastore : %+s", datastore)
return datastore, err
}
func getDatastoresForZone(ctx context.Context, dc *vclib.Datacenter, nodeManager *NodeManager, selectedZones []string) ([]*vclib.DatastoreInfo, error) {
var sharedDatastores []*vclib.DatastoreInfo
for _, zone := range selectedZones {
var sharedDatastoresPerZone []*vclib.DatastoreInfo
hosts, err := nodeManager.GetHostsInZone(ctx, zone)
if err != nil {
return nil, err
}
klog.V(4).Infof("Hosts in zone %s : %s", zone, hosts)
for _, host := range hosts {
var hostSystemMo mo.HostSystem
err = host.Properties(ctx, host.Reference(), []string{"datastore"}, &hostSystemMo)
if err != nil {
klog.Errorf("Failed to get datastore property for host %s. err : %+v", host, err)
return nil, err
}
klog.V(4).Infof("Datastores mounted on host %s : %s", host, hostSystemMo.Datastore)
var dsRefList []types.ManagedObjectReference
for _, dsRef := range hostSystemMo.Datastore {
dsRefList = append(dsRefList, dsRef)
}
var dsMoList []mo.Datastore
pc := property.DefaultCollector(host.Client())
properties := []string{DatastoreInfoProperty}
err = pc.Retrieve(ctx, dsRefList, properties, &dsMoList)
if err != nil {
klog.Errorf("Failed to get Datastore managed objects from datastore objects."+
" dsObjList: %+v, properties: %+v, err: %+v", dsRefList, properties, err)
return nil, err
}
klog.V(9).Infof("Datastore mo details: %+v", dsMoList)
var dsObjList []*vclib.DatastoreInfo
for _, dsMo := range dsMoList {
dsObjList = append(dsObjList,
&vclib.DatastoreInfo{
Datastore: &vclib.Datastore{Datastore: object.NewDatastore(host.Client(), dsMo.Reference()),
Datacenter: nil},
Info: dsMo.Info.GetDatastoreInfo()})
}
klog.V(9).Infof("DatastoreInfo details : %s", dsObjList)
if len(sharedDatastoresPerZone) == 0 {
sharedDatastoresPerZone = dsObjList
} else {
sharedDatastoresPerZone = intersect(sharedDatastoresPerZone, dsObjList)
}
klog.V(9).Infof("Shared datastore list after processing host %s : %s", host, sharedDatastoresPerZone)
}
klog.V(4).Infof("Shared datastore per zone %s is %s", zone, sharedDatastoresPerZone)
if len(sharedDatastores) == 0 {
sharedDatastores = sharedDatastoresPerZone
} else {
sharedDatastores = intersect(sharedDatastores, sharedDatastoresPerZone)
}
}
klog.V(1).Infof("Returning selected datastores : %s", sharedDatastores)
return sharedDatastores, nil
}
func getPbmCompatibleZonedDatastore(ctx context.Context, dc *vclib.Datacenter, storagePolicyName string, zonedDatastores []*vclib.DatastoreInfo) (string, error) {
pbmClient, err := vclib.NewPbmClient(ctx, dc.Client())
if err != nil {
return "", err
}
storagePolicyID, err := pbmClient.ProfileIDByName(ctx, storagePolicyName)
if err != nil {
klog.Errorf("Failed to get Profile ID by name: %s. err: %+v", storagePolicyName, err)
return "", err
}
compatibleDatastores, _, err := pbmClient.GetCompatibleDatastores(ctx, dc, storagePolicyID, zonedDatastores)
if err != nil {
klog.Errorf("Failed to get compatible datastores from datastores : %+v with storagePolicy: %s. err: %+v",
zonedDatastores, storagePolicyID, err)
return "", err
}
klog.V(9).Infof("compatibleDatastores : %+v", compatibleDatastores)
datastore, err := getMostFreeDatastoreName(ctx, dc.Client(), compatibleDatastores)
if err != nil {
klog.Errorf("Failed to get most free datastore from compatible datastores: %+v. err: %+v", compatibleDatastores, err)
return "", err
}
klog.V(4).Infof("Most free datastore : %+s", datastore)
return datastore, err
}
func (vs *VSphere) setVMOptions(ctx context.Context, dc *vclib.Datacenter, resourcePoolPath string) (*vclib.VMOptions, error) {
var vmOptions vclib.VMOptions
resourcePool, err := dc.GetResourcePool(ctx, resourcePoolPath)
if err != nil {
return nil, err
}
klog.V(9).Infof("Resource pool path %s, resourcePool %+v", resourcePoolPath, resourcePool)
folder, err := dc.GetFolderByPath(ctx, vs.cfg.Workspace.Folder)
if err != nil {
return nil, err
}
vmOptions.VMFolder = folder
vmOptions.VMResourcePool = resourcePool
return &vmOptions, nil
}
// A background routine which will be responsible for deleting stale dummy VM's.
func (vs *VSphere) cleanUpDummyVMs(dummyVMPrefix string) {
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
for {
time.Sleep(CleanUpDummyVMRoutineInterval * time.Minute)
vsi, err := vs.getVSphereInstanceForServer(vs.cfg.Workspace.VCenterIP, ctx)
if err != nil {
klog.V(4).Infof("Failed to get VSphere instance with err: %+v. Retrying again...", err)
continue
}
dc, err := vclib.GetDatacenter(ctx, vsi.conn, vs.cfg.Workspace.Datacenter)
if err != nil {
klog.V(4).Infof("Failed to get the datacenter: %s from VC. err: %+v", vs.cfg.Workspace.Datacenter, err)
continue
}
// Get the folder reference for global working directory where the dummy VM needs to be created.
vmFolder, err := dc.GetFolderByPath(ctx, vs.cfg.Workspace.Folder)
if err != nil {
klog.V(4).Infof("Unable to get the kubernetes folder: %q reference. err: %+v", vs.cfg.Workspace.Folder, err)
continue
}
// A write lock is acquired to make sure the cleanUp routine doesn't delete any VM's created by ongoing PVC requests.
cleanUpDummyVMs := func() {
cleanUpDummyVMLock.Lock()
defer cleanUpDummyVMLock.Unlock()
err = diskmanagers.CleanUpDummyVMs(ctx, vmFolder, dc)
if err != nil {
klog.V(4).Infof("Unable to clean up dummy VM's in the kubernetes cluster: %q. err: %+v", vs.cfg.Workspace.Folder, err)
}
}
cleanUpDummyVMs()
}
}
// Get canonical volume path for volume Path.
// Example1: The canonical path for volume path - [vsanDatastore] kubevols/volume.vmdk will be [vsanDatastore] 25d8b159-948c-4b73-e499-02001ad1b044/volume.vmdk
// Example2: The canonical path for volume path - [vsanDatastore] 25d8b159-948c-4b73-e499-02001ad1b044/volume.vmdk will be same as volume Path.
func getcanonicalVolumePath(ctx context.Context, dc *vclib.Datacenter, volumePath string) (string, error) {
var folderID string
var folderExists bool
canonicalVolumePath := volumePath
dsPathObj, err := vclib.GetDatastorePathObjFromVMDiskPath(volumePath)
if err != nil {
return "", err
}
dsPath := strings.Split(strings.TrimSpace(dsPathObj.Path), "/")
if len(dsPath) <= 1 {
return canonicalVolumePath, nil
}
datastore := dsPathObj.Datastore
dsFolder := dsPath[0]
folderNameIDMap, datastoreExists := datastoreFolderIDMap[datastore]
if datastoreExists {
folderID, folderExists = folderNameIDMap[dsFolder]
}
// Get the datastore folder ID if datastore or folder doesn't exist in datastoreFolderIDMap
if !datastoreExists || !folderExists {
if !vclib.IsValidUUID(dsFolder) {
dummyDiskVolPath := "[" + datastore + "] " + dsFolder + "/" + DummyDiskName
// Querying a non-existent dummy disk on the datastore folder.
// It would fail and return an folder ID in the error message.
_, err := dc.GetVirtualDiskPage83Data(ctx, dummyDiskVolPath)
canonicalVolumePath, err = getPathFromFileNotFound(err)
if err != nil {
return "", fmt.Errorf("failed to get path from dummy request: %v", err)
}
}
diskPath := vclib.GetPathFromVMDiskPath(canonicalVolumePath)
if diskPath == "" {
return "", fmt.Errorf("Failed to parse canonicalVolumePath: %s in getcanonicalVolumePath method", canonicalVolumePath)
}
folderID = strings.Split(strings.TrimSpace(diskPath), "/")[0]
setdatastoreFolderIDMap(datastoreFolderIDMap, datastore, dsFolder, folderID)
}
canonicalVolumePath = strings.Replace(volumePath, dsFolder, folderID, 1)
return canonicalVolumePath, nil
}
// getPathFromFileNotFound returns the path from a fileNotFound error
func getPathFromFileNotFound(err error) (string, error) {
if soap.IsSoapFault(err) {
fault := soap.ToSoapFault(err)
f, ok := fault.VimFault().(types.FileNotFound)
if !ok {
return "", fmt.Errorf("%v is not a FileNotFound error", err)
}
return f.File, nil
}
return "", fmt.Errorf("%v is not a soap fault", err)
}
func setdatastoreFolderIDMap(
datastoreFolderIDMap map[string]map[string]string,
datastore string,
folderName string,
folderID string) {
folderNameIDMap := datastoreFolderIDMap[datastore]
if folderNameIDMap == nil {
folderNameIDMap = make(map[string]string)
datastoreFolderIDMap[datastore] = folderNameIDMap
}
folderNameIDMap[folderName] = folderID
}
func convertVolPathToDevicePath(ctx context.Context, dc *vclib.Datacenter, volPath string) (string, error) {
volPath = vclib.RemoveStorageClusterORFolderNameFromVDiskPath(volPath)
// Get the canonical volume path for volPath.
canonicalVolumePath, err := getcanonicalVolumePath(ctx, dc, volPath)
if err != nil {
klog.Errorf("Failed to get canonical vsphere volume path for volume: %s. err: %+v", volPath, err)
return "", err
}
// Check if the volume path contains .vmdk extension. If not, add the extension and update the nodeVolumes Map
if len(canonicalVolumePath) > 0 && filepath.Ext(canonicalVolumePath) != ".vmdk" {
canonicalVolumePath += ".vmdk"
}
return canonicalVolumePath, nil
}
// convertVolPathsToDevicePaths removes cluster or folder path from volPaths and convert to canonicalPath
func (vs *VSphere) convertVolPathsToDevicePaths(ctx context.Context, nodeVolumes map[k8stypes.NodeName][]string) (map[k8stypes.NodeName][]string, error) {
vmVolumes := make(map[k8stypes.NodeName][]string)
for nodeName, volPaths := range nodeVolumes {
nodeInfo, err := vs.nodeManager.GetNodeInfo(nodeName)
if err != nil {
return nil, err
}
_, err = vs.getVSphereInstanceForServer(nodeInfo.vcServer, ctx)
if err != nil {
return nil, err
}
for i, volPath := range volPaths {
deviceVolPath, err := convertVolPathToDevicePath(ctx, nodeInfo.dataCenter, volPath)
if err != nil {
klog.Errorf("Failed to convert vsphere volume path %s to device path for volume %s. err: %+v", volPath, deviceVolPath, err)
return nil, err
}
volPaths[i] = deviceVolPath
}
vmVolumes[nodeName] = volPaths
}
return vmVolumes, nil
}
// checkDiskAttached verifies volumes are attached to the VMs which are in same vCenter and Datacenter
// Returns nodes if exist any for which VM is not found in that vCenter and Datacenter
func (vs *VSphere) checkDiskAttached(ctx context.Context, nodes []k8stypes.NodeName, nodeVolumes map[k8stypes.NodeName][]string, attached map[string]map[string]bool, retry bool) ([]k8stypes.NodeName, error) {
var nodesToRetry []k8stypes.NodeName
var vmList []*vclib.VirtualMachine
var nodeInfo NodeInfo
var err error
for _, nodeName := range nodes {
nodeInfo, err = vs.nodeManager.GetNodeInfo(nodeName)
if err != nil {
return nodesToRetry, err
}
vmList = append(vmList, nodeInfo.vm)
}
// Making sure session is valid
_, err = vs.getVSphereInstanceForServer(nodeInfo.vcServer, ctx)
if err != nil {
return nodesToRetry, err
}
// If any of the nodes are not present property collector query will fail for entire operation
vmMoList, err := nodeInfo.dataCenter.GetVMMoList(ctx, vmList, []string{"config.hardware.device", "name", "config.uuid"})
if err != nil {
if vclib.IsManagedObjectNotFoundError(err) && !retry {
klog.V(4).Infof("checkDiskAttached: ManagedObjectNotFound for property collector query for nodes: %+v vms: %+v", nodes, vmList)
// Property Collector Query failed
// VerifyVolumePaths per VM
for _, nodeName := range nodes {
nodeInfo, err := vs.nodeManager.GetNodeInfo(nodeName)
if err != nil {
return nodesToRetry, err
}
devices, err := nodeInfo.vm.VirtualMachine.Device(ctx)
if err != nil {
if vclib.IsManagedObjectNotFoundError(err) {
klog.V(4).Infof("checkDiskAttached: ManagedObjectNotFound for Kubernetes node: %s with vSphere Virtual Machine reference: %v", nodeName, nodeInfo.vm)
nodesToRetry = append(nodesToRetry, nodeName)
continue
}
return nodesToRetry, err
}
klog.V(4).Infof("Verifying Volume Paths by devices for node %s and VM %s", nodeName, nodeInfo.vm)
vclib.VerifyVolumePathsForVMDevices(devices, nodeVolumes[nodeName], convertToString(nodeName), attached)
}
}
return nodesToRetry, err
}
vmMoMap := make(map[string]mo.VirtualMachine)
for _, vmMo := range vmMoList {
if vmMo.Config == nil {
klog.Errorf("Config is not available for VM: %q", vmMo.Name)
continue
}
klog.V(9).Infof("vmMoMap vmname: %q vmuuid: %s", vmMo.Name, strings.ToLower(vmMo.Config.Uuid))
vmMoMap[strings.ToLower(vmMo.Config.Uuid)] = vmMo
}
klog.V(9).Infof("vmMoMap: +%v", vmMoMap)
for _, nodeName := range nodes {
node, err := vs.nodeManager.GetNode(nodeName)
if err != nil {
return nodesToRetry, err
}
nodeUUID, err := GetNodeUUID(&node)
if err != nil {
klog.Errorf("Node Discovery failed to get node uuid for node %s with error: %v", node.Name, err)
return nodesToRetry, err
}
nodeUUID = strings.ToLower(nodeUUID)
klog.V(9).Infof("Verifying volume for node %s with nodeuuid %q: %v", nodeName, nodeUUID, vmMoMap)
vclib.VerifyVolumePathsForVM(vmMoMap[nodeUUID], nodeVolumes[nodeName], convertToString(nodeName), attached)
}
return nodesToRetry, nil
}
func (vs *VSphere) IsDummyVMPresent(vmName string) (bool, error) {
isDummyVMPresent := false
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
vsi, err := vs.getVSphereInstanceForServer(vs.cfg.Workspace.VCenterIP, ctx)
if err != nil {
return isDummyVMPresent, err
}
dc, err := vclib.GetDatacenter(ctx, vsi.conn, vs.cfg.Workspace.Datacenter)
if err != nil {
return isDummyVMPresent, err
}
vmFolder, err := dc.GetFolderByPath(ctx, vs.cfg.Workspace.Folder)
if err != nil {
return isDummyVMPresent, err
}
vms, err := vmFolder.GetVirtualMachines(ctx)
if err != nil {
return isDummyVMPresent, err
}
for _, vm := range vms {
if vm.Name() == vmName {
isDummyVMPresent = true
break
}
}
return isDummyVMPresent, nil
}
func (vs *VSphere) GetNodeNameFromProviderID(providerID string) (string, error) {
var nodeName string
nodes, err := vs.nodeManager.GetNodeDetails()
if err != nil {
klog.Errorf("Error while obtaining Kubernetes node nodeVmDetail details. error : %+v", err)
return "", err
}
for _, node := range nodes {
// ProviderID is UUID for nodes v1.9.3+
if node.VMUUID == GetUUIDFromProviderID(providerID) || node.NodeName == providerID {
nodeName = node.NodeName
break
}
}
if nodeName == "" {
msg := fmt.Sprintf("Error while obtaining Kubernetes nodename for providerID %s.", providerID)
return "", errors.New(msg)
}
return nodeName, nil
}
func GetUUIDFromProviderID(providerID string) string {
return strings.TrimPrefix(providerID, ProviderPrefix)
}
func IsUUIDSupportedNode(node *v1.Node) (bool, error) {
newVersion, err := version.ParseSemantic("v1.9.4")
if err != nil {
klog.Errorf("Failed to determine whether node %+v is old with error %v", node, err)
return false, err
}
nodeVersion, err := version.ParseSemantic(node.Status.NodeInfo.KubeletVersion)
if err != nil {
klog.Errorf("Failed to determine whether node %+v is old with error %v", node, err)
return false, err
}
if nodeVersion.LessThan(newVersion) {
return true, nil
}
return false, nil
}
func GetNodeUUID(node *v1.Node) (string, error) {
oldNode, err := IsUUIDSupportedNode(node)
if err != nil {
klog.Errorf("Failed to get node UUID for node %+v with error %v", node, err)
return "", err
}
if oldNode {
return node.Status.NodeInfo.SystemUUID, nil
}
return GetUUIDFromProviderID(node.Spec.ProviderID), nil
}
func GetVMUUID() (string, error) {
uuidFromFile, err := getRawUUID()
if err != nil {
return "", fmt.Errorf("error retrieving vm uuid: %s", err)
}
//strip leading and trailing white space and new line char
uuid := strings.TrimSpace(uuidFromFile)
// check the uuid starts with "VMware-"
if !strings.HasPrefix(uuid, UUIDPrefix) {
return "", fmt.Errorf("Failed to match Prefix, UUID read from the file is %v", uuidFromFile)
}
// Strip the prefix and white spaces and -
uuid = strings.Replace(uuid[len(UUIDPrefix):(len(uuid))], " ", "", -1)
uuid = strings.Replace(uuid, "-", "", -1)
if len(uuid) != 32 {
return "", fmt.Errorf("Length check failed, UUID read from the file is %v", uuidFromFile)
}
// need to add dashes, e.g. "564d395e-d807-e18a-cb25-b79f65eb2b9f"
uuid = fmt.Sprintf("%s-%s-%s-%s-%s", uuid[0:8], uuid[8:12], uuid[12:16], uuid[16:20], uuid[20:32])
return uuid, nil
}

View File

@@ -1,33 +0,0 @@
// +build linux
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"io/ioutil"
)
const UUIDPath = "/sys/class/dmi/id/product_serial"
func getRawUUID() (string, error) {
id, err := ioutil.ReadFile(UUIDPath)
if err != nil {
return "", err
}
return string(id), nil
}

View File

@@ -1,72 +0,0 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"context"
"fmt"
"testing"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
"github.com/vmware/govmomi"
"github.com/vmware/govmomi/simulator"
)
func TestGetPathFromFileNotFound(t *testing.T) {
ctx := context.Background()
// vCenter model + initial set of objects (cluster, hosts, VMs, network, datastore, etc)
model := simulator.VPX()
defer model.Remove()
err := model.Create()
if err != nil {
t.Fatal(err)
}
s := model.Service.NewServer()
defer s.Close()
c, err := govmomi.NewClient(ctx, s.URL, true)
if err != nil {
t.Fatal(err)
}
vc := &vclib.VSphereConnection{Client: c.Client}
dc, err := vclib.GetDatacenter(ctx, vc, vclib.TestDefaultDatacenter)
if err != nil {
t.Errorf("failed to get datacenter: %v", err)
}
requestDiskPath := fmt.Sprintf("[%s] %s", vclib.TestDefaultDatastore, DummyDiskName)
_, err = dc.GetVirtualDiskPage83Data(ctx, requestDiskPath)
if err == nil {
t.Error("expected error when calling GetVirtualDiskPage83Data")
}
_, err = getPathFromFileNotFound(err)
if err != nil {
t.Errorf("expected err to be nil but was %v", err)
}
_, err = getPathFromFileNotFound(nil)
if err == nil {
t.Errorf("expected err when calling getPathFromFileNotFound with nil err")
}
}

View File

@@ -1,44 +0,0 @@
// +build windows
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"fmt"
"os/exec"
"strings"
)
func getRawUUID() (string, error) {
result, err := exec.Command("wmic", "bios", "get", "serialnumber").Output()
if err != nil {
return "", err
}
lines := strings.FieldsFunc(string(result), func(r rune) bool {
switch r {
case '\n', '\r':
return true
default:
return false
}
})
if len(lines) != 2 {
return "", fmt.Errorf("received unexpected value retrieving vm uuid: %q", string(result))
}
return lines[1], nil
}