Remove references to openstack and cinder

Signed-off-by: Davanum Srinivas <davanum@gmail.com>
This commit is contained in:
Davanum Srinivas
2022-08-08 16:01:59 -04:00
parent d206d7f0a6
commit 9bbf01bae9
78 changed files with 22 additions and 9287 deletions

View File

@@ -31,8 +31,8 @@ limitations under the License.
* Note that the server containers are for testing purposes only and should not
* be used in production.
*
* 2) With server outside of Kubernetes (Cinder, ...)
* Appropriate server (e.g. OpenStack Cinder) must exist somewhere outside
* 2) With server outside of Kubernetes
* Appropriate server exist somewhere outside
* the tested Kubernetes cluster. The test itself creates a new volume,
* and checks, that Kubernetes can use it as a volume.
*/

View File

@@ -56,7 +56,6 @@ import (
_ "k8s.io/kubernetes/test/e2e/framework/providers/azure"
_ "k8s.io/kubernetes/test/e2e/framework/providers/gce"
_ "k8s.io/kubernetes/test/e2e/framework/providers/kubemark"
_ "k8s.io/kubernetes/test/e2e/framework/providers/openstack"
_ "k8s.io/kubernetes/test/e2e/framework/providers/vsphere"
// Ensure that logging flags are part of the command line.

View File

@@ -1,34 +0,0 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package openstack
import (
"k8s.io/kubernetes/test/e2e/framework"
)
func init() {
framework.RegisterProvider("openstack", newProvider)
}
func newProvider() (framework.ProviderInterface, error) {
return &Provider{}, nil
}
// Provider is a structure to handle OpenStack clouds for e2e testing
type Provider struct {
framework.NullProvider
}

View File

@@ -256,7 +256,7 @@ type CloudConfig struct {
ClusterIPRange string
ClusterTag string
Network string
ConfigFile string // for azure and openstack
ConfigFile string // for azure
NodeTag string
MasterTag string

View File

@@ -31,8 +31,8 @@ limitations under the License.
* Note that the server containers are for testing purposes only and should not
* be used in production.
*
* 2) With server outside of Kubernetes (Cinder, ...)
* Appropriate server (e.g. OpenStack Cinder) must exist somewhere outside
* 2) With server outside of Kubernetes
* Appropriate server must exist somewhere outside
* the tested Kubernetes cluster. The test itself creates a new volume,
* and checks, that Kubernetes can use it as a volume.
*/

View File

@@ -27,7 +27,7 @@ limitations under the License.
* Note that the server containers are for testing purposes only and should not
* be used in production.
*
* 2) With server or cloud provider outside of Kubernetes (Cinder, GCE, AWS, Azure, ...)
* 2) With server or cloud provider outside of Kubernetes (GCE, AWS, Azure, ...)
* Appropriate server or cloud provider must exist somewhere outside
* the tested Kubernetes cluster. CreateVolume will create a new volume to be
* used in the TestSuites for inlineVolume or DynamicPV tests.

View File

@@ -27,7 +27,7 @@ limitations under the License.
* Note that the server containers are for testing purposes only and should not
* be used in production.
*
* 2) With server or cloud provider outside of Kubernetes (Cinder, GCE, AWS, Azure, ...)
* 2) With server or cloud provider outside of Kubernetes (GCE, AWS, Azure, ...)
* Appropriate server or cloud provider must exist somewhere outside
* the tested Kubernetes cluster. CreateVolume will create a new volume to be
* used in the TestSuites for inlineVolume or DynamicPV tests.
@@ -38,7 +38,6 @@ package drivers
import (
"context"
"fmt"
"os/exec"
"strconv"
"strings"
"time"
@@ -1036,179 +1035,6 @@ func (e *emptydirDriver) PrepareTest(f *framework.Framework) (*storageframework.
}, func() {}
}
// Cinder
// This driver assumes that OpenStack client tools are installed
// (/usr/bin/nova, /usr/bin/cinder and /usr/bin/keystone)
// and that the usual OpenStack authentication env. variables are set
// (OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME at least).
type cinderDriver struct {
driverInfo storageframework.DriverInfo
}
type cinderVolume struct {
volumeName string
volumeID string
}
var _ storageframework.TestDriver = &cinderDriver{}
var _ storageframework.PreprovisionedVolumeTestDriver = &cinderDriver{}
var _ storageframework.InlineVolumeTestDriver = &cinderDriver{}
var _ storageframework.PreprovisionedPVTestDriver = &cinderDriver{}
var _ storageframework.DynamicPVTestDriver = &cinderDriver{}
// InitCinderDriver returns cinderDriver that implements TestDriver interface
func InitCinderDriver() storageframework.TestDriver {
return &cinderDriver{
driverInfo: storageframework.DriverInfo{
Name: "cinder",
InTreePluginName: "kubernetes.io/cinder",
MaxFileSize: storageframework.FileSizeMedium,
SupportedSizeRange: e2evolume.SizeRange{
Min: "1Gi",
},
SupportedFsType: sets.NewString(
"", // Default fsType
),
TopologyKeys: []string{v1.LabelFailureDomainBetaZone},
Capabilities: map[storageframework.Capability]bool{
storageframework.CapPersistence: true,
storageframework.CapFsGroup: true,
storageframework.CapExec: true,
storageframework.CapBlock: true,
// Cinder supports volume limits, but the test creates large
// number of volumes and times out test suites.
storageframework.CapVolumeLimits: false,
storageframework.CapTopology: true,
},
},
}
}
func (c *cinderDriver) GetDriverInfo() *storageframework.DriverInfo {
return &c.driverInfo
}
func (c *cinderDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) {
e2eskipper.SkipUnlessProviderIs("openstack")
}
func (c *cinderDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource {
cv, ok := e2evolume.(*cinderVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to Cinder test volume")
volSource := v1.VolumeSource{
Cinder: &v1.CinderVolumeSource{
VolumeID: cv.volumeID,
ReadOnly: readOnly,
},
}
if fsType != "" {
volSource.Cinder.FSType = fsType
}
return &volSource
}
func (c *cinderDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
cv, ok := e2evolume.(*cinderVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to Cinder test volume")
pvSource := v1.PersistentVolumeSource{
Cinder: &v1.CinderPersistentVolumeSource{
VolumeID: cv.volumeID,
ReadOnly: readOnly,
},
}
if fsType != "" {
pvSource.Cinder.FSType = fsType
}
return &pvSource, nil
}
func (c *cinderDriver) GetDynamicProvisionStorageClass(config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass {
provisioner := "kubernetes.io/cinder"
parameters := map[string]string{}
if fsType != "" {
parameters["fsType"] = fsType
}
ns := config.Framework.Namespace.Name
return storageframework.GetStorageClass(provisioner, parameters, nil, ns)
}
func (c *cinderDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) {
return &storageframework.PerTestConfig{
Driver: c,
Prefix: "cinder",
Framework: f,
}, func() {}
}
func (c *cinderDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
f := config.Framework
ns := f.Namespace
// We assume that namespace.Name is a random string
volumeName := ns.Name
ginkgo.By("creating a test Cinder volume")
output, err := exec.Command("cinder", "create", "--display-name="+volumeName, "1").CombinedOutput()
outputString := string(output[:])
framework.Logf("cinder output:\n%s", outputString)
framework.ExpectNoError(err)
// Parse 'id'' from stdout. Expected format:
// | attachments | [] |
// | availability_zone | nova |
// ...
// | id | 1d6ff08f-5d1c-41a4-ad72-4ef872cae685 |
volumeID := ""
for _, line := range strings.Split(outputString, "\n") {
fields := strings.Fields(line)
if len(fields) != 5 {
continue
}
if fields[1] != "id" {
continue
}
volumeID = fields[3]
break
}
framework.Logf("Volume ID: %s", volumeID)
framework.ExpectNotEqual(volumeID, "")
return &cinderVolume{
volumeName: volumeName,
volumeID: volumeID,
}
}
func (v *cinderVolume) DeleteVolume() {
id := v.volumeID
name := v.volumeName
// Try to delete the volume for several seconds - it takes
// a while for the plugin to detach it.
var output []byte
var err error
timeout := time.Second * 120
framework.Logf("Waiting up to %v for removal of cinder volume %s / %s", timeout, id, name)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
output, err = exec.Command("cinder", "delete", id).CombinedOutput()
if err == nil {
framework.Logf("Cinder volume %s deleted", id)
return
}
framework.Logf("Failed to delete volume %s / %s: %v\n%s", id, name, err, string(output))
}
// Timed out, try to get "cinder show <volume>" output for easier debugging
showOutput, showErr := exec.Command("cinder", "show", id).CombinedOutput()
if showErr != nil {
framework.Logf("Failed to show volume %s / %s: %v\n%s", id, name, showErr, string(showOutput))
} else {
framework.Logf("Volume %s / %s:\n%s", id, name, string(showOutput))
}
framework.Failf("Failed to delete pre-provisioned volume %s / %s: %v\n%s", id, name, err, string(output[:]))
}
// GCE
type gcePdDriver struct {
driverInfo storageframework.DriverInfo

View File

@@ -37,7 +37,6 @@ var testDrivers = []func() storageframework.TestDriver{
drivers.InitHostPathDriver,
drivers.InitHostPathSymlinkDriver,
drivers.InitEmptydirDriver,
drivers.InitCinderDriver,
drivers.InitVSphereDriver,
drivers.InitAzureDiskDriver,
drivers.InitAzureFileDriver,

View File

@@ -374,8 +374,6 @@ func getInTreeNodeLimits(cs clientset.Interface, nodeName string, driverInfo *st
allocatableKey = volumeutil.EBSVolumeLimitKey
case migrationplugins.GCEPDInTreePluginName:
allocatableKey = volumeutil.GCEVolumeLimitKey
case migrationplugins.CinderInTreePluginName:
allocatableKey = volumeutil.CinderVolumeLimitKey
case migrationplugins.AzureDiskInTreePluginName:
allocatableKey = volumeutil.AzureVolumeLimitKey
default:

View File

@@ -286,34 +286,6 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
framework.ExpectNoError(err, "checkAWSEBS gp2 encrypted")
},
},
// OpenStack generic tests (works on all OpenStack deployments)
{
Name: "generic Cinder volume on OpenStack",
CloudProviders: []string{"openstack"},
Timeouts: f.Timeouts,
Provisioner: "kubernetes.io/cinder",
Parameters: map[string]string{},
ClaimSize: "1.5Gi",
ExpectedSize: "2Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) {
testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{})
},
},
{
Name: "Cinder volume with empty volume type and zone on OpenStack",
CloudProviders: []string{"openstack"},
Timeouts: f.Timeouts,
Provisioner: "kubernetes.io/cinder",
Parameters: map[string]string{
"type": "",
"availability": "",
},
ClaimSize: "1.5Gi",
ExpectedSize: "2Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) {
testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{})
},
},
// vSphere generic test
{
Name: "generic vSphere volume",
@@ -429,7 +401,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
// not being deleted.
// NOTE: Polls until no PVs are detected, times out at 5 minutes.
e2eskipper.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure")
e2eskipper.SkipUnlessProviderIs("gce", "aws", "gke", "vsphere", "azure")
const raceAttempts int = 100
var residualPVs []*v1.PersistentVolume
@@ -605,7 +577,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ginkgo.Describe("DynamicProvisioner Default", func() {
ginkgo.It("should create and delete default persistent volumes [Slow]", func() {
e2eskipper.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure")
e2eskipper.SkipUnlessProviderIs("gce", "aws", "gke", "vsphere", "azure")
e2epv.SkipIfNoDefaultStorageClass(c)
ginkgo.By("creating a claim with no annotation")
@@ -631,7 +603,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
// Modifying the default storage class can be disruptive to other tests that depend on it
ginkgo.It("should be disabled by changing the default annotation [Serial] [Disruptive]", func() {
e2eskipper.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure")
e2eskipper.SkipUnlessProviderIs("gce", "aws", "gke", "vsphere", "azure")
e2epv.SkipIfNoDefaultStorageClass(c)
scName, scErr := e2epv.GetDefaultStorageClassName(c)
@@ -670,7 +642,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
// Modifying the default storage class can be disruptive to other tests that depend on it
ginkgo.It("should be disabled by removing the default annotation [Serial] [Disruptive]", func() {
e2eskipper.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure")
e2eskipper.SkipUnlessProviderIs("gce", "aws", "gke", "vsphere", "azure")
e2epv.SkipIfNoDefaultStorageClass(c)
scName, scErr := e2epv.GetDefaultStorageClassName(c)
@@ -844,8 +816,6 @@ func getDefaultPluginName() string {
return "kubernetes.io/gce-pd"
case framework.ProviderIs("aws"):
return "kubernetes.io/aws-ebs"
case framework.ProviderIs("openstack"):
return "kubernetes.io/cinder"
case framework.ProviderIs("vsphere"):
return "kubernetes.io/vsphere-volume"
case framework.ProviderIs("azure"):

View File

@@ -47,7 +47,7 @@ const (
func (t *PersistentVolumeUpgradeTest) Setup(f *framework.Framework) {
var err error
e2eskipper.SkipUnlessProviderIs("gce", "gke", "openstack", "aws", "vsphere", "azure")
e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws", "vsphere", "azure")
ns := f.Namespace.Name

View File

@@ -52,7 +52,7 @@ func (VolumeModeDowngradeTest) Name() string {
// Skip returns true when this test can be skipped.
func (t *VolumeModeDowngradeTest) Skip(upgCtx upgrades.UpgradeContext) bool {
if !framework.ProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure") {
if !framework.ProviderIs("gce", "aws", "gke", "vsphere", "azure") {
return true
}