Refactor GCE wrapper library to allow execution from E2E test suite
This reverts commit147b6911f5
, reversing changes made to6fd986065b
.
This commit is contained in:
@@ -30,9 +30,12 @@ import (
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/reporters"
|
||||
"github.com/onsi/gomega"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
)
|
||||
|
||||
@@ -73,6 +76,7 @@ func init() {
|
||||
flag.StringVar(&cloudConfig.MasterName, "kube-master", "", "Name of the kubernetes master. Only required if provider is gce or gke")
|
||||
flag.StringVar(&cloudConfig.ProjectID, "gce-project", "", "The GCE project being used, if applicable")
|
||||
flag.StringVar(&cloudConfig.Zone, "gce-zone", "", "GCE zone being used, if applicable")
|
||||
flag.StringVar(&cloudConfig.ServiceAccount, "gce-service-account", "", "GCE service account to use for GCE API calls, if applicable")
|
||||
flag.StringVar(&cloudConfig.Cluster, "gke-cluster", "", "GKE name of cluster being used, if applicable")
|
||||
flag.StringVar(&cloudConfig.NodeInstanceGroup, "node-instance-group", "", "Name of the managed instance group for nodes. Valid only for gce, gke or aws")
|
||||
flag.IntVar(&cloudConfig.NumNodes, "num-nodes", -1, "Number of nodes in the cluster")
|
||||
@@ -102,6 +106,23 @@ func TestE2E(t *testing.T) {
|
||||
glog.Info("The --provider flag is not set. Treating as a conformance test. Some tests may not be run.")
|
||||
}
|
||||
|
||||
if testContext.Provider == "gce" || testContext.Provider == "gke" {
|
||||
var err error
|
||||
Logf("Fetching cloud provider for %q\r\n", testContext.Provider)
|
||||
var tokenSource oauth2.TokenSource
|
||||
tokenSource = nil
|
||||
if cloudConfig.ServiceAccount != "" {
|
||||
// Use specified service account for auth
|
||||
Logf("Using service account %q as token source.", cloudConfig.ServiceAccount)
|
||||
tokenSource = google.ComputeTokenSource(cloudConfig.ServiceAccount)
|
||||
}
|
||||
cloudConfig.Provider, err = gcecloud.CreateGCECloud(testContext.CloudConfig.ProjectID, testContext.CloudConfig.Zone, "" /* networkUrl */, tokenSource, false /* useMetadataServer */)
|
||||
if err != nil {
|
||||
glog.Fatal("Error building GCE provider: ", err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if testContext.Provider == "aws" {
|
||||
awsConfig := "[Global]\n"
|
||||
if cloudConfig.Zone == "" {
|
||||
|
152
test/e2e/pd.go
152
test/e2e/pd.go
@@ -18,8 +18,8 @@ package e2e
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"google.golang.org/api/googleapi"
|
||||
mathrand "math/rand"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -31,6 +31,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
@@ -69,7 +70,7 @@ var _ = Describe("Pod Disks", func() {
|
||||
SkipUnlessProviderIs("gce", "gke", "aws")
|
||||
|
||||
By("creating PD")
|
||||
diskName, err := createPD()
|
||||
diskName, err := createPDWithRetry()
|
||||
expectNoError(err, "Error creating PD")
|
||||
|
||||
host0Pod := testPDPod([]string{diskName}, host0Name, false /* readOnly */, 1 /* numContainers */)
|
||||
@@ -77,14 +78,12 @@ var _ = Describe("Pod Disks", func() {
|
||||
containerName := "mycontainer"
|
||||
|
||||
defer func() {
|
||||
By("cleaning up PD-RW test environment")
|
||||
// Teardown pods, PD. Ignore errors.
|
||||
// Teardown should do nothing unless test failed.
|
||||
By("cleaning up PD-RW test environment")
|
||||
podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0))
|
||||
podClient.Delete(host1Pod.Name, api.NewDeleteOptions(0))
|
||||
detachPD(host0Name, diskName)
|
||||
detachPD(host1Name, diskName)
|
||||
deletePDWithRetry(diskName)
|
||||
detachAndDeletePDs(diskName, []string{host0Name, host1Name})
|
||||
}()
|
||||
|
||||
By("submitting host0Pod to kubernetes")
|
||||
@@ -117,9 +116,6 @@ var _ = Describe("Pod Disks", func() {
|
||||
By("deleting host1Pod")
|
||||
expectNoError(podClient.Delete(host1Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host1Pod")
|
||||
|
||||
By(fmt.Sprintf("deleting PD %q", diskName))
|
||||
deletePDWithRetry(diskName)
|
||||
|
||||
return
|
||||
})
|
||||
|
||||
@@ -127,7 +123,7 @@ var _ = Describe("Pod Disks", func() {
|
||||
SkipUnlessProviderIs("gce", "gke")
|
||||
|
||||
By("creating PD")
|
||||
diskName, err := createPD()
|
||||
diskName, err := createPDWithRetry()
|
||||
expectNoError(err, "Error creating PD")
|
||||
|
||||
rwPod := testPDPod([]string{diskName}, host0Name, false /* readOnly */, 1 /* numContainers */)
|
||||
@@ -141,10 +137,7 @@ var _ = Describe("Pod Disks", func() {
|
||||
podClient.Delete(rwPod.Name, api.NewDeleteOptions(0))
|
||||
podClient.Delete(host0ROPod.Name, api.NewDeleteOptions(0))
|
||||
podClient.Delete(host1ROPod.Name, api.NewDeleteOptions(0))
|
||||
|
||||
detachPD(host0Name, diskName)
|
||||
detachPD(host1Name, diskName)
|
||||
deletePDWithRetry(diskName)
|
||||
detachAndDeletePDs(diskName, []string{host0Name, host1Name})
|
||||
}()
|
||||
|
||||
By("submitting rwPod to ensure PD is formatted")
|
||||
@@ -171,18 +164,13 @@ var _ = Describe("Pod Disks", func() {
|
||||
|
||||
By("deleting host1ROPod")
|
||||
expectNoError(podClient.Delete(host1ROPod.Name, api.NewDeleteOptions(0)), "Failed to delete host1ROPod")
|
||||
|
||||
By(fmt.Sprintf("deleting PD %q", diskName))
|
||||
deletePDWithRetry(diskName)
|
||||
|
||||
expectNoError(err, "Error deleting PD")
|
||||
})
|
||||
|
||||
It("should schedule a pod w/ a RW PD shared between multiple containers, write to PD, delete pod, verify contents, and repeat in rapid succession", func() {
|
||||
SkipUnlessProviderIs("gce", "gke", "aws")
|
||||
|
||||
By("creating PD")
|
||||
diskName, err := createPD()
|
||||
diskName, err := createPDWithRetry()
|
||||
expectNoError(err, "Error creating PD")
|
||||
numContainers := 4
|
||||
|
||||
@@ -193,8 +181,7 @@ var _ = Describe("Pod Disks", func() {
|
||||
// Teardown pods, PD. Ignore errors.
|
||||
// Teardown should do nothing unless test failed.
|
||||
podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0))
|
||||
detachPD(host0Name, diskName)
|
||||
deletePDWithRetry(diskName)
|
||||
detachAndDeletePDs(diskName, []string{host0Name})
|
||||
}()
|
||||
|
||||
fileAndContentToVerify := make(map[string]string)
|
||||
@@ -225,21 +212,16 @@ var _ = Describe("Pod Disks", func() {
|
||||
By("deleting host0Pod")
|
||||
expectNoError(podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod")
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("deleting PD %q", diskName))
|
||||
deletePDWithRetry(diskName)
|
||||
|
||||
return
|
||||
})
|
||||
|
||||
It("should schedule a pod w/two RW PDs both mounted to one container, write to PD, verify contents, delete pod, recreate pod, verify contents, and repeat in rapid succession", func() {
|
||||
SkipUnlessProviderIs("gce", "gke", "aws")
|
||||
|
||||
By("creating PD1")
|
||||
disk1Name, err := createPD()
|
||||
disk1Name, err := createPDWithRetry()
|
||||
expectNoError(err, "Error creating PD1")
|
||||
By("creating PD2")
|
||||
disk2Name, err := createPD()
|
||||
disk2Name, err := createPDWithRetry()
|
||||
expectNoError(err, "Error creating PD2")
|
||||
|
||||
host0Pod := testPDPod([]string{disk1Name, disk2Name}, host0Name, false /* readOnly */, 1 /* numContainers */)
|
||||
@@ -249,10 +231,8 @@ var _ = Describe("Pod Disks", func() {
|
||||
// Teardown pods, PD. Ignore errors.
|
||||
// Teardown should do nothing unless test failed.
|
||||
podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0))
|
||||
detachPD(host0Name, disk1Name)
|
||||
detachPD(host0Name, disk2Name)
|
||||
deletePDWithRetry(disk1Name)
|
||||
deletePDWithRetry(disk2Name)
|
||||
detachAndDeletePDs(disk1Name, []string{host0Name})
|
||||
detachAndDeletePDs(disk2Name, []string{host0Name})
|
||||
}()
|
||||
|
||||
containerName := "mycontainer"
|
||||
@@ -286,16 +266,23 @@ var _ = Describe("Pod Disks", func() {
|
||||
By("deleting host0Pod")
|
||||
expectNoError(podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod")
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("deleting PD1 %q", disk1Name))
|
||||
deletePDWithRetry(disk1Name)
|
||||
By(fmt.Sprintf("deleting PD2 %q", disk2Name))
|
||||
deletePDWithRetry(disk2Name)
|
||||
|
||||
return
|
||||
})
|
||||
})
|
||||
|
||||
func createPDWithRetry() (string, error) {
|
||||
newDiskName := ""
|
||||
var err error
|
||||
for start := time.Now(); time.Since(start) < 180*time.Second; time.Sleep(5 * time.Second) {
|
||||
if newDiskName, err = createPD(); err != nil {
|
||||
Logf("Couldn't create a new PD. Sleeping 5 seconds (%v)", err)
|
||||
continue
|
||||
}
|
||||
Logf("Successfully created a new PD: %q.", newDiskName)
|
||||
break
|
||||
}
|
||||
return newDiskName, err
|
||||
}
|
||||
|
||||
func deletePDWithRetry(diskName string) {
|
||||
var err error
|
||||
for start := time.Now(); time.Since(start) < 180*time.Second; time.Sleep(5 * time.Second) {
|
||||
@@ -303,7 +290,7 @@ func deletePDWithRetry(diskName string) {
|
||||
Logf("Couldn't delete PD %q. Sleeping 5 seconds (%v)", diskName, err)
|
||||
continue
|
||||
}
|
||||
Logf("Deleted PD %v", diskName)
|
||||
Logf("Successfully deleted PD %q.", diskName)
|
||||
break
|
||||
}
|
||||
expectNoError(err, "Error deleting PD")
|
||||
@@ -325,9 +312,12 @@ func createPD() (string, error) {
|
||||
if testContext.Provider == "gce" || testContext.Provider == "gke" {
|
||||
pdName := fmt.Sprintf("%s-%s", testContext.prefix, string(util.NewUUID()))
|
||||
|
||||
zone := testContext.CloudConfig.Zone
|
||||
// TODO: make this hit the compute API directly instead of shelling out to gcloud.
|
||||
err := exec.Command("gcloud", "compute", "--quiet", "--project="+testContext.CloudConfig.ProjectID, "disks", "create", "--zone="+zone, "--size=10GB", pdName).Run()
|
||||
gceCloud, err := getGCECloud()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
err = gceCloud.CreateDisk(pdName, 10 /* sizeGb */)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -345,19 +335,20 @@ func createPD() (string, error) {
|
||||
|
||||
func deletePD(pdName string) error {
|
||||
if testContext.Provider == "gce" || testContext.Provider == "gke" {
|
||||
zone := testContext.CloudConfig.Zone
|
||||
|
||||
// TODO: make this hit the compute API directly.
|
||||
cmd := exec.Command("gcloud", "compute", "--quiet", "--project="+testContext.CloudConfig.ProjectID, "disks", "delete", "--zone="+zone, pdName)
|
||||
data, err := cmd.CombinedOutput()
|
||||
gceCloud, err := getGCECloud()
|
||||
if err != nil {
|
||||
dataStr := string(data)
|
||||
if strings.Contains(dataStr, "was not found") {
|
||||
Logf("PD deletion implicitly succeeded because PD %q does not exist.", pdName)
|
||||
return err
|
||||
}
|
||||
|
||||
err = gceCloud.DeleteDisk(pdName)
|
||||
|
||||
if err != nil {
|
||||
if gerr, ok := err.(*googleapi.Error); ok && len(gerr.Errors) > 0 && gerr.Errors[0].Reason == "notFound" {
|
||||
// PD already exists, ignore error.
|
||||
return nil
|
||||
}
|
||||
|
||||
Logf("Error deleting PD: %s (%v)", dataStr, err)
|
||||
Logf("Error deleting PD %q: %v", pdName, err)
|
||||
}
|
||||
return err
|
||||
} else {
|
||||
@@ -373,10 +364,23 @@ func detachPD(hostName, pdName string) error {
|
||||
if testContext.Provider == "gce" || testContext.Provider == "gke" {
|
||||
instanceName := strings.Split(hostName, ".")[0]
|
||||
|
||||
zone := testContext.CloudConfig.Zone
|
||||
gceCloud, err := getGCECloud()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = gceCloud.DetachDisk(pdName, instanceName)
|
||||
if err != nil {
|
||||
if gerr, ok := err.(*googleapi.Error); ok && strings.Contains(gerr.Message, "Invalid value for field 'disk'") {
|
||||
// PD already detached, ignore error.
|
||||
return nil
|
||||
}
|
||||
|
||||
Logf("Error detaching PD %q: %v", pdName, err)
|
||||
}
|
||||
|
||||
return err
|
||||
|
||||
// TODO: make this hit the compute API directly.
|
||||
return exec.Command("gcloud", "compute", "--quiet", "--project="+testContext.CloudConfig.ProjectID, "detach-disk", "--zone="+zone, "--disk="+pdName, instanceName).Run()
|
||||
} else {
|
||||
volumes, ok := testContext.CloudConfig.Provider.(awscloud.Volumes)
|
||||
if !ok {
|
||||
@@ -457,19 +461,19 @@ func testPDPod(diskNames []string, targetHost string, readOnly bool, numContaine
|
||||
// Waits for specified PD to to detach from specified hostName
|
||||
func waitForPDDetach(diskName, hostName string) error {
|
||||
if testContext.Provider == "gce" || testContext.Provider == "gke" {
|
||||
for start := time.Now(); time.Since(start) < gcePDDetachTimeout; time.Sleep(gcePDDetachPollTime) {
|
||||
zone := testContext.CloudConfig.Zone
|
||||
gceCloud, err := getGCECloud()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cmd := exec.Command("gcloud", "compute", "--project="+testContext.CloudConfig.ProjectID, "instances", "describe", "--zone="+zone, hostName)
|
||||
data, err := cmd.CombinedOutput()
|
||||
for start := time.Now(); time.Since(start) < gcePDDetachTimeout; time.Sleep(gcePDDetachPollTime) {
|
||||
diskAttached, err := gceCloud.DiskIsAttached(diskName, hostName)
|
||||
if err != nil {
|
||||
Logf("Error waiting for PD %q to detach from node %q. 'gcloud compute instances describe' failed with %s (%v)", diskName, hostName, string(data), err)
|
||||
Logf("Error waiting for PD %q to detach from node %q. 'DiskIsAttached(...)' failed with %v", diskName, hostName, err)
|
||||
return err
|
||||
}
|
||||
|
||||
dataStr := strings.ToLower(string(data))
|
||||
diskName = strings.ToLower(diskName)
|
||||
if !strings.Contains(string(dataStr), diskName) {
|
||||
if !diskAttached {
|
||||
// Specified disk does not appear to be attached to specified node
|
||||
Logf("GCE PD %q appears to have successfully detached from %q.", diskName, hostName)
|
||||
return nil
|
||||
@@ -483,3 +487,23 @@ func waitForPDDetach(diskName, hostName string) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getGCECloud() (*gcecloud.GCECloud, error) {
|
||||
gceCloud, ok := testContext.CloudConfig.Provider.(*gcecloud.GCECloud)
|
||||
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to convert CloudConfig.Provider to GCECloud: %#v", testContext.CloudConfig.Provider)
|
||||
}
|
||||
|
||||
return gceCloud, nil
|
||||
}
|
||||
|
||||
func detachAndDeletePDs(diskName string, hosts []string) {
|
||||
for _, host := range hosts {
|
||||
detachPD(host, diskName)
|
||||
By(fmt.Sprintf("Waiting for PD %q to detach from %q", diskName, host))
|
||||
waitForPDDetach(diskName, host)
|
||||
}
|
||||
By(fmt.Sprintf("Deleting PD %q", diskName))
|
||||
deletePDWithRetry(diskName)
|
||||
}
|
||||
|
@@ -114,6 +114,7 @@ type CloudConfig struct {
|
||||
NodeInstanceGroup string
|
||||
NumNodes int
|
||||
ClusterTag string
|
||||
ServiceAccount string
|
||||
|
||||
Provider cloudprovider.Interface
|
||||
}
|
||||
|
Reference in New Issue
Block a user