e2e/storage: speed up skipping, simplify APIs and test definition

CreateDriver (now called SetupTest) is a potentially expensive
operation, depending on the driver. Creating and tearing down a
framework instance also takes time (measured at 6 seconds on a fast
machine) and produces quite a bit of log output.

Both can be avoided for tests that skip based on static
information (like for instance the current OS, vendor, driver and test
pattern) by making the test suite responsible for creating framework
and driver.

The lifecycle of the TestConfig instance was confusing because it was
stored inside the DriverInfo, a struct which conceptually is static,
while the TestConfig is dynamic. It is cleaner to separate the two,
even if that means that an additional pointer must be passed into some
functions. Now CreateDriver is responsible for initializing the
PerTestConfig that is to be used by the test.

To make this approach simpler to implement (= less functions which
need the pointer) and the tests easier to read, the entire setup and
test definition is now contained in a single function. This is how it
is normally done in Ginkgo. This is easier to read because one can see
at a glance where variables are set, instead of having to trace values
though two additional structs (TestResource and TestInput).

Because we are changing the API already, also other changes are made:
- some function prototypes get simplified
- the naming of functions is changed to match their purpose
  (tests aren't executed by the test suite, they only get defined
  for later execution)
- unused methods get removed (TestSuite.skipUnsupportedTest is redundant)
This commit is contained in:
Patrick Ohly
2018-12-29 17:08:34 +01:00
parent 1cb121d2a9
commit 05cc31697f
15 changed files with 1278 additions and 1516 deletions

View File

@@ -17,10 +17,8 @@ limitations under the License.
package storage
import (
"context"
"encoding/json"
"fmt"
"regexp"
"strings"
"time"
@@ -32,7 +30,6 @@ import (
clientset "k8s.io/client-go/kubernetes"
csiclient "k8s.io/csi-api/pkg/client/clientset/versioned"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/podlogs"
"k8s.io/kubernetes/test/e2e/storage/drivers"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
@@ -47,9 +44,9 @@ import (
)
// List of testDrivers to be executed in below loop
var csiTestDrivers = []func(config testsuites.TestConfig) testsuites.TestDriver{
var csiTestDrivers = []func() testsuites.TestDriver{
drivers.InitHostPathCSIDriver,
drivers.InitGcePDCSIDriver,
func() testsuites.TestDriver { return drivers.InitGcePDCSIDriver(false /* topology enabled */) },
drivers.InitGcePDExternalCSIDriver,
drivers.InitHostPathV0CSIDriver,
// Don't run tests with mock driver (drivers.InitMockCSIDriver), it does not provide persistent storage.
@@ -81,117 +78,56 @@ func csiTunePattern(patterns []testpatterns.TestPattern) []testpatterns.TestPatt
// This executes testSuites for csi volumes.
var _ = utils.SIGDescribe("CSI Volumes", func() {
f := framework.NewDefaultFramework("csi-volumes")
var (
cancel context.CancelFunc
cs clientset.Interface
csics csiclient.Interface
ns *v1.Namespace
// Common configuration options for each driver.
config = testsuites.TestConfig{
Framework: f,
Prefix: "csi",
}
)
BeforeEach(func() {
ctx, c := context.WithCancel(context.Background())
cancel = c
cs = f.ClientSet
csics = f.CSIClientSet
ns = f.Namespace
// Debugging of the following tests heavily depends on the log output
// of the different containers. Therefore include all of that in log
// files (when using --report-dir, as in the CI) or the output stream
// (otherwise).
to := podlogs.LogOutput{
StatusWriter: GinkgoWriter,
}
if framework.TestContext.ReportDir == "" {
to.LogWriter = GinkgoWriter
} else {
test := CurrentGinkgoTestDescription()
reg := regexp.MustCompile("[^a-zA-Z0-9_-]+")
// We end the prefix with a slash to ensure that all logs
// end up in a directory named after the current test.
to.LogPathPrefix = framework.TestContext.ReportDir + "/" +
reg.ReplaceAllString(test.FullTestText, "_") + "/"
}
podlogs.CopyAllLogs(ctx, cs, ns.Name, to)
// pod events are something that the framework already collects itself
// after a failed test. Logging them live is only useful for interactive
// debugging, not when we collect reports.
if framework.TestContext.ReportDir == "" {
podlogs.WatchPods(ctx, cs, ns.Name, GinkgoWriter)
}
})
AfterEach(func() {
cancel()
})
for _, initDriver := range csiTestDrivers {
curDriver := initDriver(config)
curConfig := curDriver.GetDriverInfo().Config
curDriver := initDriver()
Context(testsuites.GetDriverNameWithFeatureTags(curDriver), func() {
BeforeEach(func() {
// Reset config. The driver might have modified its copy
// in a previous test.
curDriver.GetDriverInfo().Config = curConfig
// setupDriver
curDriver.CreateDriver()
})
AfterEach(func() {
// Cleanup driver
curDriver.CleanupDriver()
})
testsuites.RunTestSuite(f, curDriver, csiTestSuites, csiTunePattern)
testsuites.DefineTestSuite(curDriver, csiTestSuites, csiTunePattern)
})
}
Context("CSI Topology test using GCE PD driver [Feature:CSINodeInfo]", func() {
newConfig := config
newConfig.TopologyEnabled = true
driver := drivers.InitGcePDCSIDriver(newConfig).(testsuites.DynamicPVTestDriver) // TODO (#71289) eliminate by moving this test to common test suite.
f := framework.NewDefaultFramework("csitopology")
driver := drivers.InitGcePDCSIDriver(true /* topology enabled */).(testsuites.DynamicPVTestDriver) // TODO (#71289) eliminate by moving this test to common test suite.
var (
config *testsuites.PerTestConfig
testCleanup func()
)
BeforeEach(func() {
driver.CreateDriver()
config, testCleanup = driver.PrepareTest(f)
})
AfterEach(func() {
driver.CleanupDriver()
if testCleanup != nil {
testCleanup()
}
})
It("should provision zonal PD with immediate volume binding and AllowedTopologies set and mount the volume to a pod", func() {
suffix := "topology-positive"
testTopologyPositive(cs, suffix, ns.GetName(), false /* delayBinding */, true /* allowedTopologies */)
testTopologyPositive(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), false /* delayBinding */, true /* allowedTopologies */)
})
It("should provision zonal PD with delayed volume binding and mount the volume to a pod", func() {
suffix := "delayed"
testTopologyPositive(cs, suffix, ns.GetName(), true /* delayBinding */, false /* allowedTopologies */)
testTopologyPositive(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), true /* delayBinding */, false /* allowedTopologies */)
})
It("should provision zonal PD with delayed volume binding and AllowedTopologies set and mount the volume to a pod", func() {
suffix := "delayed-topology-positive"
testTopologyPositive(cs, suffix, ns.GetName(), true /* delayBinding */, true /* allowedTopologies */)
testTopologyPositive(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), true /* delayBinding */, true /* allowedTopologies */)
})
It("should fail to schedule a pod with a zone missing from AllowedTopologies; PD is provisioned with immediate volume binding", func() {
framework.SkipUnlessMultizone(cs)
framework.SkipUnlessMultizone(config.Framework.ClientSet)
suffix := "topology-negative"
testTopologyNegative(cs, suffix, ns.GetName(), false /* delayBinding */)
testTopologyNegative(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), false /* delayBinding */)
})
It("should fail to schedule a pod with a zone missing from AllowedTopologies; PD is provisioned with delayed volume binding", func() {
framework.SkipUnlessMultizone(cs)
framework.SkipUnlessMultizone(config.Framework.ClientSet)
suffix := "delayed-topology-negative"
testTopologyNegative(cs, suffix, ns.GetName(), true /* delayBinding */)
testTopologyNegative(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), true /* delayBinding */)
})
})
@@ -227,29 +163,30 @@ var _ = utils.SIGDescribe("CSI Volumes", func() {
for _, t := range tests {
test := t
It(test.name, func() {
By("Deploying mock CSI driver")
config := testsuites.TestConfig{
Framework: f,
Prefix: "csi-attach",
}
f := framework.NewDefaultFramework("csiattach")
driver = drivers.InitMockCSIDriver(config, test.deployDriverCRD, test.driverAttachable, nil)
driver.CreateDriver()
defer driver.CleanupDriver()
It(test.name, func() {
cs := f.ClientSet
csics := f.CSIClientSet
ns := f.Namespace
driver = drivers.InitMockCSIDriver(test.deployDriverCRD, test.driverAttachable, nil)
config, testCleanup := driver.PrepareTest(f)
driverName := config.GetUniqueDriverName()
defer testCleanup()
if test.deployDriverCRD {
err = waitForCSIDriver(csics, driver)
err = waitForCSIDriver(csics, driverName)
framework.ExpectNoError(err, "Failed to get CSIDriver: %v", err)
defer destroyCSIDriver(csics, driver)
defer destroyCSIDriver(csics, driverName)
}
By("Creating pod")
var sc *storagev1.StorageClass
if dDriver, ok := driver.(testsuites.DynamicPVTestDriver); ok {
sc = dDriver.GetDynamicProvisionStorageClass("")
sc = dDriver.GetDynamicProvisionStorageClass(config, "")
}
nodeName := driver.GetDriverInfo().Config.ClientNodeName
nodeName := config.ClientNodeName
scTest := testsuites.StorageClassTest{
Name: driver.GetDriverInfo().Name,
Provisioner: sc.Provisioner,
@@ -347,29 +284,30 @@ var _ = utils.SIGDescribe("CSI Volumes", func() {
}
for _, t := range tests {
test := t
It(test.name, func() {
By("Deploying mock CSI driver")
config := testsuites.TestConfig{
Framework: f,
Prefix: "csi-workload",
}
f := framework.NewDefaultFramework("csiworkload")
driver = drivers.InitMockCSIDriver(config, test.deployDriverCRD, true, test.podInfoOnMountVersion)
driver.CreateDriver()
defer driver.CleanupDriver()
It(test.name, func() {
cs := f.ClientSet
csics := f.CSIClientSet
ns := f.Namespace
driver = drivers.InitMockCSIDriver(test.deployDriverCRD, true, test.podInfoOnMountVersion)
config, testCleanup := driver.PrepareTest(f)
driverName := config.GetUniqueDriverName()
defer testCleanup()
if test.deployDriverCRD {
err = waitForCSIDriver(csics, driver)
err = waitForCSIDriver(csics, driverName)
framework.ExpectNoError(err, "Failed to get CSIDriver: %v", err)
defer destroyCSIDriver(csics, driver)
defer destroyCSIDriver(csics, driverName)
}
By("Creating pod")
var sc *storagev1.StorageClass
if dDriver, ok := driver.(testsuites.DynamicPVTestDriver); ok {
sc = dDriver.GetDynamicProvisionStorageClass("")
sc = dDriver.GetDynamicProvisionStorageClass(config, "")
}
nodeName := driver.GetDriverInfo().Config.ClientNodeName
nodeName := config.ClientNodeName
scTest := testsuites.StorageClassTest{
Name: driver.GetDriverInfo().Name,
Parameters: sc.Parameters,
@@ -420,14 +358,16 @@ func testTopologyPositive(cs clientset.Interface, suffix, namespace string, dela
topoZone := getRandomClusterZone(cs)
addSingleCSIZoneAllowedTopologyToStorageClass(cs, class, topoZone)
}
claim := newClaim(test, namespace, suffix)
claim.Spec.StorageClassName = &class.Name
test.Client = cs
test.Claim = newClaim(test, namespace, suffix)
test.Claim.Spec.StorageClassName = &class.Name
test.Class = class
if delayBinding {
_, node := testsuites.TestBindingWaitForFirstConsumer(test, cs, claim, class, nil /* node selector */, false /* expect unschedulable */)
_, node := test.TestBindingWaitForFirstConsumer(nil /* node selector */, false /* expect unschedulable */)
Expect(node).ToNot(BeNil(), "Unexpected nil node found")
} else {
testsuites.TestDynamicProvisioning(test, cs, claim, class)
test.TestDynamicProvisioning()
}
}
@@ -447,12 +387,13 @@ func testTopologyNegative(cs clientset.Interface, suffix, namespace string, dela
test.DelayBinding = delayBinding
nodeSelector := map[string]string{v1.LabelZoneFailureDomain: podZone}
class := newStorageClass(test, namespace, suffix)
addSingleCSIZoneAllowedTopologyToStorageClass(cs, class, pvZone)
claim := newClaim(test, namespace, suffix)
claim.Spec.StorageClassName = &class.Name
test.Client = cs
test.Class = newStorageClass(test, namespace, suffix)
addSingleCSIZoneAllowedTopologyToStorageClass(cs, test.Class, pvZone)
test.Claim = newClaim(test, namespace, suffix)
test.Claim.Spec.StorageClassName = &test.Class.Name
if delayBinding {
testsuites.TestBindingWaitForFirstConsumer(test, cs, claim, class, nodeSelector, true /* expect unschedulable */)
test.TestBindingWaitForFirstConsumer(nodeSelector, true /* expect unschedulable */)
} else {
test.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
// Ensure that a pod cannot be scheduled in an unsuitable zone.
@@ -461,13 +402,12 @@ func testTopologyNegative(cs clientset.Interface, suffix, namespace string, dela
defer testsuites.StopPod(cs, pod)
framework.ExpectNoError(framework.WaitForPodNameUnschedulableInNamespace(cs, pod.Name, pod.Namespace), "pod should be unschedulable")
}
testsuites.TestDynamicProvisioning(test, cs, claim, class)
test.TestDynamicProvisioning()
}
}
func waitForCSIDriver(csics csiclient.Interface, driver testsuites.TestDriver) error {
func waitForCSIDriver(csics csiclient.Interface, driverName string) error {
timeout := 2 * time.Minute
driverName := testsuites.GetUniqueDriverName(driver)
framework.Logf("waiting up to %v for CSIDriver %q", timeout, driverName)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(framework.Poll) {
@@ -479,8 +419,7 @@ func waitForCSIDriver(csics csiclient.Interface, driver testsuites.TestDriver) e
return fmt.Errorf("gave up after waiting %v for CSIDriver %q.", timeout, driverName)
}
func destroyCSIDriver(csics csiclient.Interface, driver testsuites.TestDriver) {
driverName := testsuites.GetUniqueDriverName(driver)
func destroyCSIDriver(csics csiclient.Interface, driverName string) {
driverGet, err := csics.CsiV1alpha1().CSIDrivers().Get(driverName, metav1.GetOptions{})
if err == nil {
framework.Logf("deleting %s.%s: %s", driverGet.TypeMeta.APIVersion, driverGet.TypeMeta.Kind, driverGet.ObjectMeta.Name)

View File

@@ -56,12 +56,11 @@ const (
// hostpathCSI
type hostpathCSIDriver struct {
cleanup func()
driverInfo testsuites.DriverInfo
manifests []string
}
func initHostPathCSIDriver(name string, config testsuites.TestConfig, capabilities map[testsuites.Capability]bool, manifests ...string) testsuites.TestDriver {
func initHostPathCSIDriver(name string, capabilities map[testsuites.Capability]bool, manifests ...string) testsuites.TestDriver {
return &hostpathCSIDriver{
driverInfo: testsuites.DriverInfo{
Name: name,
@@ -71,7 +70,6 @@ func initHostPathCSIDriver(name string, config testsuites.TestConfig, capabiliti
"", // Default fsType
),
Capabilities: capabilities,
Config: config,
},
manifests: manifests,
}
@@ -82,8 +80,8 @@ var _ testsuites.DynamicPVTestDriver = &hostpathCSIDriver{}
var _ testsuites.SnapshottableTestDriver = &hostpathCSIDriver{}
// InitHostPathCSIDriver returns hostpathCSIDriver that implements TestDriver interface
func InitHostPathCSIDriver(config testsuites.TestConfig) testsuites.TestDriver {
return initHostPathCSIDriver("csi-hostpath", config,
func InitHostPathCSIDriver() testsuites.TestDriver {
return initHostPathCSIDriver("csi-hostpath",
map[testsuites.Capability]bool{testsuites.CapPersistence: true, testsuites.CapDataSource: true, testsuites.CapMultiPODs: true},
"test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
@@ -104,19 +102,19 @@ func (h *hostpathCSIDriver) GetDriverInfo() *testsuites.DriverInfo {
func (h *hostpathCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
}
func (h *hostpathCSIDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
provisioner := testsuites.GetUniqueDriverName(h)
func (h *hostpathCSIDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
provisioner := config.GetUniqueDriverName()
parameters := map[string]string{}
ns := h.driverInfo.Config.Framework.Namespace.Name
ns := config.Framework.Namespace.Name
suffix := fmt.Sprintf("%s-sc", provisioner)
return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix)
}
func (h *hostpathCSIDriver) GetSnapshotClass() *unstructured.Unstructured {
snapshotter := testsuites.GetUniqueDriverName(h)
func (h *hostpathCSIDriver) GetSnapshotClass(config *testsuites.PerTestConfig) *unstructured.Unstructured {
snapshotter := config.GetUniqueDriverName()
parameters := map[string]string{}
ns := h.driverInfo.Config.Framework.Namespace.Name
ns := config.Framework.Namespace.Name
suffix := fmt.Sprintf("%s-vsc", snapshotter)
return testsuites.GetSnapshotClass(snapshotter, parameters, ns, suffix)
@@ -126,57 +124,60 @@ func (h *hostpathCSIDriver) GetClaimSize() string {
return "5Gi"
}
func (h *hostpathCSIDriver) CreateDriver() {
func (h *hostpathCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
By(fmt.Sprintf("deploying %s driver", h.driverInfo.Name))
f := h.driverInfo.Config.Framework
cancelLogging := testsuites.StartPodLogs(f)
cs := f.ClientSet
// The hostpath CSI driver only works when everything runs on the same node.
nodes := framework.GetReadySchedulableNodesOrDie(cs)
nodeName := nodes.Items[rand.Intn(len(nodes.Items))].Name
h.driverInfo.Config.ClientNodeName = nodeName
config := &testsuites.PerTestConfig{
Driver: h,
Prefix: "hostpath",
Framework: f,
ClientNodeName: nodeName,
}
// TODO (?): the storage.csi.image.version and storage.csi.image.registry
// settings are ignored for this test. We could patch the image definitions.
o := utils.PatchCSIOptions{
OldDriverName: h.driverInfo.Name,
NewDriverName: testsuites.GetUniqueDriverName(h),
NewDriverName: config.GetUniqueDriverName(),
DriverContainerName: "hostpath",
DriverContainerArguments: []string{"--drivername=csi-hostpath-" + f.UniqueName},
DriverContainerArguments: []string{"--drivername=" + config.GetUniqueDriverName()},
ProvisionerContainerName: "csi-provisioner",
SnapshotterContainerName: "csi-snapshotter",
NodeName: nodeName,
}
cleanup, err := h.driverInfo.Config.Framework.CreateFromManifests(func(item interface{}) error {
return utils.PatchCSIDeployment(h.driverInfo.Config.Framework, o, item)
cleanup, err := config.Framework.CreateFromManifests(func(item interface{}) error {
return utils.PatchCSIDeployment(config.Framework, o, item)
},
h.manifests...)
h.cleanup = cleanup
if err != nil {
framework.Failf("deploying %s driver: %v", h.driverInfo.Name, err)
}
}
func (h *hostpathCSIDriver) CleanupDriver() {
if h.cleanup != nil {
return config, func() {
By(fmt.Sprintf("uninstalling %s driver", h.driverInfo.Name))
h.cleanup()
cleanup()
cancelLogging()
}
}
// mockCSI
type mockCSIDriver struct {
cleanup func()
driverInfo testsuites.DriverInfo
manifests []string
podInfoVersion *string
attachable bool
}
var _ testsuites.TestDriver = &mockCSIDriver{}
var _ testsuites.DynamicPVTestDriver = &mockCSIDriver{}
// InitMockCSIDriver returns a mockCSIDriver that implements TestDriver interface
func InitMockCSIDriver(config testsuites.TestConfig, registerDriver, driverAttachable bool, podInfoVersion *string) testsuites.TestDriver {
func InitMockCSIDriver(registerDriver, driverAttachable bool, podInfoVersion *string) testsuites.TestDriver {
driverManifests := []string{
"test/e2e/testing-manifests/storage-csi/cluster-driver-registrar/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml",
@@ -187,16 +188,12 @@ func InitMockCSIDriver(config testsuites.TestConfig, registerDriver, driverAttac
"test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml",
}
config.ServerConfig = &framework.VolumeTestConfig{}
if registerDriver {
driverManifests = append(driverManifests, "test/e2e/testing-manifests/storage-csi/mock/csi-mock-cluster-driver-registrar.yaml")
}
if driverAttachable {
driverManifests = append(driverManifests, "test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-attacher.yaml")
} else {
config.ServerConfig.ServerArgs = append(config.ServerConfig.ServerArgs, "--disable-attach")
}
return &mockCSIDriver{
@@ -212,10 +209,10 @@ func InitMockCSIDriver(config testsuites.TestConfig, registerDriver, driverAttac
testsuites.CapFsGroup: false,
testsuites.CapExec: false,
},
Config: config,
},
manifests: driverManifests,
podInfoVersion: podInfoVersion,
attachable: driverAttachable,
}
}
@@ -226,10 +223,10 @@ func (m *mockCSIDriver) GetDriverInfo() *testsuites.DriverInfo {
func (m *mockCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
}
func (m *mockCSIDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
provisioner := testsuites.GetUniqueDriverName(m)
func (m *mockCSIDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
provisioner := config.GetUniqueDriverName()
parameters := map[string]string{}
ns := m.driverInfo.Config.Framework.Namespace.Name
ns := config.Framework.Namespace.Name
suffix := fmt.Sprintf("%s-sc", provisioner)
return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix)
@@ -239,20 +236,24 @@ func (m *mockCSIDriver) GetClaimSize() string {
return "5Gi"
}
func (m *mockCSIDriver) CreateDriver() {
func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
By("deploying csi mock driver")
f := m.driverInfo.Config.Framework
cancelLogging := testsuites.StartPodLogs(f)
cs := f.ClientSet
// pods should be scheduled on the node
nodes := framework.GetReadySchedulableNodesOrDie(cs)
node := nodes.Items[rand.Intn(len(nodes.Items))]
m.driverInfo.Config.ClientNodeName = node.Name
config := &testsuites.PerTestConfig{
Driver: m,
Prefix: "mock",
Framework: f,
ClientNodeName: node.Name,
}
containerArgs := []string{"--name=csi-mock-" + f.UniqueName}
if m.driverInfo.Config.ServerConfig != nil && m.driverInfo.Config.ServerConfig.ServerArgs != nil {
containerArgs = append(containerArgs, m.driverInfo.Config.ServerConfig.ServerArgs...)
if !m.attachable {
containerArgs = append(containerArgs, "--disable-attach")
}
// TODO (?): the storage.csi.image.version and storage.csi.image.registry
@@ -264,29 +265,27 @@ func (m *mockCSIDriver) CreateDriver() {
DriverContainerArguments: containerArgs,
ProvisionerContainerName: "csi-provisioner",
ClusterRegistrarContainerName: "csi-cluster-driver-registrar",
NodeName: m.driverInfo.Config.ClientNodeName,
NodeName: config.ClientNodeName,
PodInfoVersion: m.podInfoVersion,
}
cleanup, err := f.CreateFromManifests(func(item interface{}) error {
return utils.PatchCSIDeployment(f, o, item)
},
m.manifests...)
m.cleanup = cleanup
if err != nil {
framework.Failf("deploying csi mock driver: %v", err)
}
}
func (m *mockCSIDriver) CleanupDriver() {
if m.cleanup != nil {
return config, func() {
By("uninstalling csi mock driver")
m.cleanup()
cleanup()
cancelLogging()
}
}
// InitHostPathV0CSIDriver returns a variant of hostpathCSIDriver with different manifests.
func InitHostPathV0CSIDriver(config testsuites.TestConfig) testsuites.TestDriver {
return initHostPathCSIDriver("csi-hostpath-v0", config,
func InitHostPathV0CSIDriver() testsuites.TestDriver {
return initHostPathCSIDriver("csi-hostpath-v0",
map[testsuites.Capability]bool{testsuites.CapPersistence: true, testsuites.CapMultiPODs: true},
"test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
@@ -300,7 +299,7 @@ func InitHostPathV0CSIDriver(config testsuites.TestConfig) testsuites.TestDriver
// gce-pd
type gcePDCSIDriver struct {
cleanup func()
topologyEnabled bool
driverInfo testsuites.DriverInfo
}
@@ -308,8 +307,9 @@ var _ testsuites.TestDriver = &gcePDCSIDriver{}
var _ testsuites.DynamicPVTestDriver = &gcePDCSIDriver{}
// InitGcePDCSIDriver returns gcePDCSIDriver that implements TestDriver interface
func InitGcePDCSIDriver(config testsuites.TestConfig) testsuites.TestDriver {
func InitGcePDCSIDriver(topologyEnabled bool) testsuites.TestDriver {
return &gcePDCSIDriver{
topologyEnabled: topologyEnabled,
driverInfo: testsuites.DriverInfo{
Name: GCEPDCSIProvisionerName,
FeatureTag: "[Serial]",
@@ -327,8 +327,6 @@ func InitGcePDCSIDriver(config testsuites.TestConfig) testsuites.TestDriver {
testsuites.CapExec: true,
testsuites.CapMultiPODs: true,
},
Config: config,
},
}
}
@@ -338,21 +336,14 @@ func (g *gcePDCSIDriver) GetDriverInfo() *testsuites.DriverInfo {
}
func (g *gcePDCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
f := g.driverInfo.Config.Framework
framework.SkipUnlessProviderIs("gce", "gke")
if !g.driverInfo.Config.TopologyEnabled {
// Topology is disabled in external-provisioner, so in a multizone cluster, a pod could be
// scheduled in a different zone from the provisioned volume, causing basic provisioning
// tests to fail.
framework.SkipIfMultizone(f.ClientSet)
}
if pattern.FsType == "xfs" {
framework.SkipUnlessNodeOSDistroIs("ubuntu", "custom")
}
}
func (g *gcePDCSIDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
ns := g.driverInfo.Config.Framework.Namespace.Name
func (g *gcePDCSIDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
ns := config.Framework.Namespace.Name
provisioner := g.driverInfo.Name
suffix := fmt.Sprintf("%s-sc", g.driverInfo.Name)
@@ -368,8 +359,16 @@ func (g *gcePDCSIDriver) GetClaimSize() string {
return "5Gi"
}
func (g *gcePDCSIDriver) CreateDriver() {
func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
if !g.topologyEnabled {
// Topology is disabled in external-provisioner, so in a multizone cluster, a pod could be
// scheduled in a different zone from the provisioned volume, causing basic provisioning
// tests to fail.
framework.SkipIfMultizone(f.ClientSet)
}
By("deploying csi gce-pd driver")
cancelLogging := testsuites.StartPodLogs(f)
// It would be safer to rename the gcePD driver, but that
// hasn't been done before either and attempts to do so now led to
// errors during driver registration, therefore it is disabled
@@ -382,7 +381,7 @@ func (g *gcePDCSIDriver) CreateDriver() {
// DriverContainerName: "gce-driver",
// ProvisionerContainerName: "csi-external-provisioner",
// }
createGCESecrets(g.driverInfo.Config.Framework.ClientSet, g.driverInfo.Config.Framework.Namespace.Name)
createGCESecrets(f.ClientSet, f.Namespace.Name)
manifests := []string{
"test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml",
@@ -392,22 +391,24 @@ func (g *gcePDCSIDriver) CreateDriver() {
"test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml",
}
if g.driverInfo.Config.TopologyEnabled {
if g.topologyEnabled {
manifests = append(manifests, "test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss_alpha.yaml")
} else {
manifests = append(manifests, "test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml")
}
cleanup, err := g.driverInfo.Config.Framework.CreateFromManifests(nil, manifests...)
g.cleanup = cleanup
cleanup, err := f.CreateFromManifests(nil, manifests...)
if err != nil {
framework.Failf("deploying csi gce-pd driver: %v", err)
}
}
func (g *gcePDCSIDriver) CleanupDriver() {
return &testsuites.PerTestConfig{
Driver: g,
Prefix: "gcepd",
Framework: f,
}, func() {
By("uninstalling gce-pd driver")
if g.cleanup != nil {
g.cleanup()
cleanup()
cancelLogging()
}
}
@@ -420,7 +421,7 @@ var _ testsuites.TestDriver = &gcePDExternalCSIDriver{}
var _ testsuites.DynamicPVTestDriver = &gcePDExternalCSIDriver{}
// InitGcePDExternalCSIDriver returns gcePDExternalCSIDriver that implements TestDriver interface
func InitGcePDExternalCSIDriver(config testsuites.TestConfig) testsuites.TestDriver {
func InitGcePDExternalCSIDriver() testsuites.TestDriver {
return &gcePDExternalCSIDriver{
driverInfo: testsuites.DriverInfo{
Name: GCEPDCSIProvisionerName,
@@ -440,8 +441,6 @@ func InitGcePDExternalCSIDriver(config testsuites.TestConfig) testsuites.TestDri
testsuites.CapExec: true,
testsuites.CapMultiPODs: true,
},
Config: config,
},
}
}
@@ -452,14 +451,13 @@ func (g *gcePDExternalCSIDriver) GetDriverInfo() *testsuites.DriverInfo {
func (g *gcePDExternalCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
framework.SkipUnlessProviderIs("gce", "gke")
framework.SkipIfMultizone(g.driverInfo.Config.Framework.ClientSet)
if pattern.FsType == "xfs" {
framework.SkipUnlessNodeOSDistroIs("ubuntu", "custom")
}
}
func (g *gcePDExternalCSIDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
ns := g.driverInfo.Config.Framework.Namespace.Name
func (g *gcePDExternalCSIDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
ns := config.Framework.Namespace.Name
provisioner := g.driverInfo.Name
suffix := fmt.Sprintf("%s-sc", g.driverInfo.Name)
@@ -475,8 +473,12 @@ func (g *gcePDExternalCSIDriver) GetClaimSize() string {
return "5Gi"
}
func (g *gcePDExternalCSIDriver) CreateDriver() {
}
func (g *gcePDExternalCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
framework.SkipIfMultizone(f.ClientSet)
func (g *gcePDExternalCSIDriver) CleanupDriver() {
return &testsuites.PerTestConfig{
Driver: g,
Prefix: "gcepdext",
Framework: f,
}, func() {}
}

View File

@@ -82,7 +82,7 @@ var _ testsuites.PreprovisionedPVTestDriver = &nfsDriver{}
var _ testsuites.DynamicPVTestDriver = &nfsDriver{}
// InitNFSDriver returns nfsDriver that implements TestDriver interface
func InitNFSDriver(config testsuites.TestConfig) testsuites.TestDriver {
func InitNFSDriver() testsuites.TestDriver {
return &nfsDriver{
driverInfo: testsuites.DriverInfo{
Name: "nfs",
@@ -96,8 +96,6 @@ func InitNFSDriver(config testsuites.TestConfig) testsuites.TestDriver {
testsuites.CapPersistence: true,
testsuites.CapExec: true,
},
Config: config,
},
}
}
@@ -133,10 +131,10 @@ func (n *nfsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volu
}, nil
}
func (n *nfsDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
func (n *nfsDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
provisioner := n.externalPluginName
parameters := map[string]string{"mountOptions": "vers=4.1"}
ns := n.driverInfo.Config.Framework.Namespace.Name
ns := config.Framework.Namespace.Name
suffix := fmt.Sprintf("%s-sc", n.driverInfo.Name)
return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix)
@@ -146,8 +144,7 @@ func (n *nfsDriver) GetClaimSize() string {
return "5Gi"
}
func (n *nfsDriver) CreateDriver() {
f := n.driverInfo.Config.Framework
func (n *nfsDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
cs := f.ClientSet
ns := f.Namespace
n.externalPluginName = fmt.Sprintf("example.com/nfs-%s", ns.Name)
@@ -164,32 +161,32 @@ func (n *nfsDriver) CreateDriver() {
By("creating an external dynamic provisioner pod")
n.externalProvisionerPod = utils.StartExternalProvisioner(cs, ns.Name, n.externalPluginName)
}
func (n *nfsDriver) CleanupDriver() {
f := n.driverInfo.Config.Framework
cs := f.ClientSet
ns := f.Namespace
return &testsuites.PerTestConfig{
Driver: n,
Prefix: "nfs",
Framework: f,
}, func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, n.externalProvisionerPod))
clusterRoleBindingName := ns.Name + "--" + "cluster-admin"
cs.RbacV1beta1().ClusterRoleBindings().Delete(clusterRoleBindingName, metav1.NewDeleteOptions(0))
}
}
func (n *nfsDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume {
f := n.driverInfo.Config.Framework
func (n *nfsDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
f := config.Framework
cs := f.ClientSet
ns := f.Namespace
// NewNFSServer creates a pod for InlineVolume and PreprovisionedPV,
// and startExternalProvisioner creates a pods for DynamicPV.
// Therefore, we need a different CreateDriver logic for volType.
// Therefore, we need a different PrepareTest logic for volType.
switch volType {
case testpatterns.InlineVolume:
fallthrough
case testpatterns.PreprovisionedPV:
config, serverPod, serverIP := framework.NewNFSServer(cs, ns.Name, []string{})
n.driverInfo.Config.ServerConfig = &config
c, serverPod, serverIP := framework.NewNFSServer(cs, ns.Name, []string{})
config.ServerConfig = &c
return &nfsVolume{
serverIP: serverIP,
serverPod: serverPod,
@@ -224,7 +221,7 @@ var _ testsuites.InlineVolumeTestDriver = &glusterFSDriver{}
var _ testsuites.PreprovisionedPVTestDriver = &glusterFSDriver{}
// InitGlusterFSDriver returns glusterFSDriver that implements TestDriver interface
func InitGlusterFSDriver(config testsuites.TestConfig) testsuites.TestDriver {
func InitGlusterFSDriver() testsuites.TestDriver {
return &glusterFSDriver{
driverInfo: testsuites.DriverInfo{
Name: "gluster",
@@ -236,8 +233,6 @@ func InitGlusterFSDriver(config testsuites.TestConfig) testsuites.TestDriver {
testsuites.CapPersistence: true,
testsuites.CapExec: true,
},
Config: config,
},
}
}
@@ -280,19 +275,21 @@ func (g *glusterFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string
}, nil
}
func (g *glusterFSDriver) CreateDriver() {
func (g *glusterFSDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
return &testsuites.PerTestConfig{
Driver: g,
Prefix: "gluster",
Framework: f,
}, func() {}
}
func (g *glusterFSDriver) CleanupDriver() {
}
func (g *glusterFSDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume {
f := g.driverInfo.Config.Framework
func (g *glusterFSDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
f := config.Framework
cs := f.ClientSet
ns := f.Namespace
config, serverPod, _ := framework.NewGlusterfsServer(cs, ns.Name)
g.driverInfo.Config.ServerConfig = &config
c, serverPod, _ := framework.NewGlusterfsServer(cs, ns.Name)
config.ServerConfig = &c
return &glusterVolume{
prefix: config.Prefix,
serverPod: serverPod,
@@ -339,7 +336,7 @@ var _ testsuites.InlineVolumeTestDriver = &iSCSIDriver{}
var _ testsuites.PreprovisionedPVTestDriver = &iSCSIDriver{}
// InitISCSIDriver returns iSCSIDriver that implements TestDriver interface
func InitISCSIDriver(config testsuites.TestConfig) testsuites.TestDriver {
func InitISCSIDriver() testsuites.TestDriver {
return &iSCSIDriver{
driverInfo: testsuites.DriverInfo{
Name: "iscsi",
@@ -358,8 +355,6 @@ func InitISCSIDriver(config testsuites.TestConfig) testsuites.TestDriver {
testsuites.CapBlock: true,
testsuites.CapExec: true,
},
Config: config,
},
}
}
@@ -408,19 +403,21 @@ func (i *iSCSIDriver) GetPersistentVolumeSource(readOnly bool, fsType string, vo
return &pvSource, nil
}
func (i *iSCSIDriver) CreateDriver() {
func (i *iSCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
return &testsuites.PerTestConfig{
Driver: i,
Prefix: "iscsi",
Framework: f,
}, func() {}
}
func (i *iSCSIDriver) CleanupDriver() {
}
func (i *iSCSIDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume {
f := i.driverInfo.Config.Framework
func (i *iSCSIDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
f := config.Framework
cs := f.ClientSet
ns := f.Namespace
config, serverPod, serverIP := framework.NewISCSIServer(cs, ns.Name)
i.driverInfo.Config.ServerConfig = &config
c, serverPod, serverIP := framework.NewISCSIServer(cs, ns.Name)
config.ServerConfig = &c
return &iSCSIVolume{
serverPod: serverPod,
serverIP: serverIP,
@@ -450,7 +447,7 @@ var _ testsuites.InlineVolumeTestDriver = &rbdDriver{}
var _ testsuites.PreprovisionedPVTestDriver = &rbdDriver{}
// InitRbdDriver returns rbdDriver that implements TestDriver interface
func InitRbdDriver(config testsuites.TestConfig) testsuites.TestDriver {
func InitRbdDriver() testsuites.TestDriver {
return &rbdDriver{
driverInfo: testsuites.DriverInfo{
Name: "rbd",
@@ -469,8 +466,6 @@ func InitRbdDriver(config testsuites.TestConfig) testsuites.TestDriver {
testsuites.CapBlock: true,
testsuites.CapExec: true,
},
Config: config,
},
}
}
@@ -505,12 +500,12 @@ func (r *rbdDriver) GetVolumeSource(readOnly bool, fsType string, volume testsui
}
func (r *rbdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
f := r.driverInfo.Config.Framework
ns := f.Namespace
rv, ok := volume.(*rbdVolume)
Expect(ok).To(BeTrue(), "Failed to cast test volume to RBD test volume")
f := rv.f
ns := f.Namespace
pvSource := v1.PersistentVolumeSource{
RBD: &v1.RBDPersistentVolumeSource{
CephMonitors: []string{rv.serverIP},
@@ -530,19 +525,21 @@ func (r *rbdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volu
return &pvSource, nil
}
func (r *rbdDriver) CreateDriver() {
func (r *rbdDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
return &testsuites.PerTestConfig{
Driver: r,
Prefix: "rbd",
Framework: f,
}, func() {}
}
func (r *rbdDriver) CleanupDriver() {
}
func (r *rbdDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume {
f := r.driverInfo.Config.Framework
func (r *rbdDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
f := config.Framework
cs := f.ClientSet
ns := f.Namespace
config, serverPod, secret, serverIP := framework.NewRBDServer(cs, ns.Name)
r.driverInfo.Config.ServerConfig = &config
c, serverPod, secret, serverIP := framework.NewRBDServer(cs, ns.Name)
config.ServerConfig = &c
return &rbdVolume{
serverPod: serverPod,
serverIP: serverIP,
@@ -577,7 +574,7 @@ var _ testsuites.InlineVolumeTestDriver = &cephFSDriver{}
var _ testsuites.PreprovisionedPVTestDriver = &cephFSDriver{}
// InitCephFSDriver returns cephFSDriver that implements TestDriver interface
func InitCephFSDriver(config testsuites.TestConfig) testsuites.TestDriver {
func InitCephFSDriver() testsuites.TestDriver {
return &cephFSDriver{
driverInfo: testsuites.DriverInfo{
Name: "ceph",
@@ -590,8 +587,6 @@ func InitCephFSDriver(config testsuites.TestConfig) testsuites.TestDriver {
testsuites.CapPersistence: true,
testsuites.CapExec: true,
},
Config: config,
},
}
}
@@ -620,12 +615,11 @@ func (c *cephFSDriver) GetVolumeSource(readOnly bool, fsType string, volume test
}
func (c *cephFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
f := c.driverInfo.Config.Framework
ns := f.Namespace
cv, ok := volume.(*cephVolume)
Expect(ok).To(BeTrue(), "Failed to cast test volume to Ceph test volume")
ns := cv.f.Namespace
return &v1.PersistentVolumeSource{
CephFS: &v1.CephFSPersistentVolumeSource{
Monitors: []string{cv.serverIP + ":6789"},
@@ -639,19 +633,21 @@ func (c *cephFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, v
}, nil
}
func (c *cephFSDriver) CreateDriver() {
func (c *cephFSDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
return &testsuites.PerTestConfig{
Driver: c,
Prefix: "cephfs",
Framework: f,
}, func() {}
}
func (c *cephFSDriver) CleanupDriver() {
}
func (c *cephFSDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume {
f := c.driverInfo.Config.Framework
func (c *cephFSDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
f := config.Framework
cs := f.ClientSet
ns := f.Namespace
config, serverPod, secret, serverIP := framework.NewRBDServer(cs, ns.Name)
c.driverInfo.Config.ServerConfig = &config
cfg, serverPod, secret, serverIP := framework.NewRBDServer(cs, ns.Name)
config.ServerConfig = &cfg
return &cephVolume{
serverPod: serverPod,
serverIP: serverIP,
@@ -676,7 +672,7 @@ var _ testsuites.PreprovisionedVolumeTestDriver = &hostPathDriver{}
var _ testsuites.InlineVolumeTestDriver = &hostPathDriver{}
// InitHostPathDriver returns hostPathDriver that implements TestDriver interface
func InitHostPathDriver(config testsuites.TestConfig) testsuites.TestDriver {
func InitHostPathDriver() testsuites.TestDriver {
return &hostPathDriver{
driverInfo: testsuites.DriverInfo{
Name: "hostPath",
@@ -687,8 +683,6 @@ func InitHostPathDriver(config testsuites.TestConfig) testsuites.TestDriver {
Capabilities: map[testsuites.Capability]bool{
testsuites.CapPersistence: true,
},
Config: config,
},
}
}
@@ -712,20 +706,22 @@ func (h *hostPathDriver) GetVolumeSource(readOnly bool, fsType string, volume te
}
}
func (h *hostPathDriver) CreateDriver() {
func (h *hostPathDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
return &testsuites.PerTestConfig{
Driver: h,
Prefix: "hostpath",
Framework: f,
}, func() {}
}
func (h *hostPathDriver) CleanupDriver() {
}
func (h *hostPathDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume {
f := h.driverInfo.Config.Framework
func (h *hostPathDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
f := config.Framework
cs := f.ClientSet
// pods should be scheduled on the node
nodes := framework.GetReadySchedulableNodesOrDie(cs)
node := nodes.Items[rand.Intn(len(nodes.Items))]
h.driverInfo.Config.ClientNodeName = node.Name
config.ClientNodeName = node.Name
return nil
}
@@ -748,7 +744,7 @@ var _ testsuites.PreprovisionedVolumeTestDriver = &hostPathSymlinkDriver{}
var _ testsuites.InlineVolumeTestDriver = &hostPathSymlinkDriver{}
// InitHostPathSymlinkDriver returns hostPathSymlinkDriver that implements TestDriver interface
func InitHostPathSymlinkDriver(config testsuites.TestConfig) testsuites.TestDriver {
func InitHostPathSymlinkDriver() testsuites.TestDriver {
return &hostPathSymlinkDriver{
driverInfo: testsuites.DriverInfo{
Name: "hostPathSymlink",
@@ -759,8 +755,6 @@ func InitHostPathSymlinkDriver(config testsuites.TestConfig) testsuites.TestDriv
Capabilities: map[testsuites.Capability]bool{
testsuites.CapPersistence: true,
},
Config: config,
},
}
}
@@ -787,14 +781,16 @@ func (h *hostPathSymlinkDriver) GetVolumeSource(readOnly bool, fsType string, vo
}
}
func (h *hostPathSymlinkDriver) CreateDriver() {
func (h *hostPathSymlinkDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
return &testsuites.PerTestConfig{
Driver: h,
Prefix: "hostpathsymlink",
Framework: f,
}, func() {}
}
func (h *hostPathSymlinkDriver) CleanupDriver() {
}
func (h *hostPathSymlinkDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume {
f := h.driverInfo.Config.Framework
func (h *hostPathSymlinkDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
f := config.Framework
cs := f.ClientSet
sourcePath := fmt.Sprintf("/tmp/%v", f.Namespace.Name)
@@ -804,7 +800,7 @@ func (h *hostPathSymlinkDriver) CreateVolume(volType testpatterns.TestVolType) t
// pods should be scheduled on the node
nodes := framework.GetReadySchedulableNodesOrDie(cs)
node := nodes.Items[rand.Intn(len(nodes.Items))]
h.driverInfo.Config.ClientNodeName = node.Name
config.ClientNodeName = node.Name
cmd := fmt.Sprintf("mkdir %v -m 777 && ln -s %v %v", sourcePath, sourcePath, targetPath)
privileged := true
@@ -888,7 +884,7 @@ var _ testsuites.PreprovisionedVolumeTestDriver = &emptydirDriver{}
var _ testsuites.InlineVolumeTestDriver = &emptydirDriver{}
// InitEmptydirDriver returns emptydirDriver that implements TestDriver interface
func InitEmptydirDriver(config testsuites.TestConfig) testsuites.TestDriver {
func InitEmptydirDriver() testsuites.TestDriver {
return &emptydirDriver{
driverInfo: testsuites.DriverInfo{
Name: "emptydir",
@@ -899,8 +895,6 @@ func InitEmptydirDriver(config testsuites.TestConfig) testsuites.TestDriver {
Capabilities: map[testsuites.Capability]bool{
testsuites.CapExec: true,
},
Config: config,
},
}
}
@@ -922,14 +916,16 @@ func (e *emptydirDriver) GetVolumeSource(readOnly bool, fsType string, volume te
}
}
func (e *emptydirDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume {
func (e *emptydirDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
return nil
}
func (e *emptydirDriver) CreateDriver() {
}
func (e *emptydirDriver) CleanupDriver() {
func (e *emptydirDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
return &testsuites.PerTestConfig{
Driver: e,
Prefix: "emptydir",
Framework: f,
}, func() {}
}
// Cinder
@@ -953,7 +949,7 @@ var _ testsuites.PreprovisionedPVTestDriver = &cinderDriver{}
var _ testsuites.DynamicPVTestDriver = &cinderDriver{}
// InitCinderDriver returns cinderDriver that implements TestDriver interface
func InitCinderDriver(config testsuites.TestConfig) testsuites.TestDriver {
func InitCinderDriver() testsuites.TestDriver {
return &cinderDriver{
driverInfo: testsuites.DriverInfo{
Name: "cinder",
@@ -967,8 +963,6 @@ func InitCinderDriver(config testsuites.TestConfig) testsuites.TestDriver {
testsuites.CapFsGroup: true,
testsuites.CapExec: true,
},
Config: config,
},
}
}
@@ -1013,13 +1007,13 @@ func (c *cinderDriver) GetPersistentVolumeSource(readOnly bool, fsType string, v
return &pvSource, nil
}
func (c *cinderDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
func (c *cinderDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
provisioner := "kubernetes.io/cinder"
parameters := map[string]string{}
if fsType != "" {
parameters["fsType"] = fsType
}
ns := c.driverInfo.Config.Framework.Namespace.Name
ns := config.Framework.Namespace.Name
suffix := fmt.Sprintf("%s-sc", c.driverInfo.Name)
return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix)
@@ -1029,14 +1023,16 @@ func (c *cinderDriver) GetClaimSize() string {
return "5Gi"
}
func (c *cinderDriver) CreateDriver() {
func (c *cinderDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
return &testsuites.PerTestConfig{
Driver: c,
Prefix: "cinder",
Framework: f,
}, func() {}
}
func (c *cinderDriver) CleanupDriver() {
}
func (c *cinderDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume {
f := c.driverInfo.Config.Framework
func (c *cinderDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
f := config.Framework
ns := f.Namespace
// We assume that namespace.Name is a random string
@@ -1109,7 +1105,7 @@ var _ testsuites.PreprovisionedPVTestDriver = &gcePdDriver{}
var _ testsuites.DynamicPVTestDriver = &gcePdDriver{}
// InitGceDriver returns gcePdDriver that implements TestDriver interface
func InitGcePdDriver(config testsuites.TestConfig) testsuites.TestDriver {
func InitGcePdDriver() testsuites.TestDriver {
return &gcePdDriver{
driverInfo: testsuites.DriverInfo{
Name: "gcepd",
@@ -1128,8 +1124,6 @@ func InitGcePdDriver(config testsuites.TestConfig) testsuites.TestDriver {
testsuites.CapBlock: true,
testsuites.CapExec: true,
},
Config: config,
},
}
}
@@ -1172,13 +1166,13 @@ func (g *gcePdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, vo
return &pvSource, nil
}
func (g *gcePdDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
func (g *gcePdDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
provisioner := "kubernetes.io/gce-pd"
parameters := map[string]string{}
if fsType != "" {
parameters["fsType"] = fsType
}
ns := g.driverInfo.Config.Framework.Namespace.Name
ns := config.Framework.Namespace.Name
suffix := fmt.Sprintf("%s-sc", g.driverInfo.Name)
return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix)
@@ -1188,17 +1182,19 @@ func (h *gcePdDriver) GetClaimSize() string {
return "5Gi"
}
func (g *gcePdDriver) CreateDriver() {
func (g *gcePdDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
return &testsuites.PerTestConfig{
Driver: g,
Prefix: "gcepd",
Framework: f,
}, func() {}
}
func (g *gcePdDriver) CleanupDriver() {
}
func (g *gcePdDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume {
func (g *gcePdDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
if volType == testpatterns.InlineVolume {
// PD will be created in framework.TestContext.CloudConfig.Zone zone,
// so pods should be also scheduled there.
g.driverInfo.Config.ClientNodeSelector = map[string]string{
config.ClientNodeSelector = map[string]string{
v1.LabelZoneFailureDomain: framework.TestContext.CloudConfig.Zone,
}
}
@@ -1231,7 +1227,7 @@ var _ testsuites.PreprovisionedPVTestDriver = &vSphereDriver{}
var _ testsuites.DynamicPVTestDriver = &vSphereDriver{}
// InitVSphereDriver returns vSphereDriver that implements TestDriver interface
func InitVSphereDriver(config testsuites.TestConfig) testsuites.TestDriver {
func InitVSphereDriver() testsuites.TestDriver {
return &vSphereDriver{
driverInfo: testsuites.DriverInfo{
Name: "vSphere",
@@ -1245,8 +1241,6 @@ func InitVSphereDriver(config testsuites.TestConfig) testsuites.TestDriver {
testsuites.CapFsGroup: true,
testsuites.CapExec: true,
},
Config: config,
},
}
}
@@ -1298,13 +1292,13 @@ func (v *vSphereDriver) GetPersistentVolumeSource(readOnly bool, fsType string,
return &pvSource, nil
}
func (v *vSphereDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
func (v *vSphereDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
provisioner := "kubernetes.io/vsphere-volume"
parameters := map[string]string{}
if fsType != "" {
parameters["fsType"] = fsType
}
ns := v.driverInfo.Config.Framework.Namespace.Name
ns := config.Framework.Namespace.Name
suffix := fmt.Sprintf("%s-sc", v.driverInfo.Name)
return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix)
@@ -1314,14 +1308,16 @@ func (v *vSphereDriver) GetClaimSize() string {
return "5Gi"
}
func (v *vSphereDriver) CreateDriver() {
func (v *vSphereDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
return &testsuites.PerTestConfig{
Driver: v,
Prefix: "vsphere",
Framework: f,
}, func() {}
}
func (v *vSphereDriver) CleanupDriver() {
}
func (v *vSphereDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume {
f := v.driverInfo.Config.Framework
func (v *vSphereDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
f := config.Framework
vspheretest.Bootstrap(f)
nodeInfo := vspheretest.GetReadySchedulableRandomNodeInfo()
volumePath, err := nodeInfo.VSphere.CreateVolume(&vspheretest.VolumeOptions{}, nodeInfo.DataCenterRef)
@@ -1352,7 +1348,7 @@ var _ testsuites.PreprovisionedPVTestDriver = &azureDriver{}
var _ testsuites.DynamicPVTestDriver = &azureDriver{}
// InitAzureDriver returns azureDriver that implements TestDriver interface
func InitAzureDriver(config testsuites.TestConfig) testsuites.TestDriver {
func InitAzureDriver() testsuites.TestDriver {
return &azureDriver{
driverInfo: testsuites.DriverInfo{
Name: "azure",
@@ -1367,8 +1363,6 @@ func InitAzureDriver(config testsuites.TestConfig) testsuites.TestDriver {
testsuites.CapBlock: true,
testsuites.CapExec: true,
},
Config: config,
},
}
}
@@ -1419,13 +1413,13 @@ func (a *azureDriver) GetPersistentVolumeSource(readOnly bool, fsType string, vo
return &pvSource, nil
}
func (a *azureDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
func (a *azureDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
provisioner := "kubernetes.io/azure-disk"
parameters := map[string]string{}
if fsType != "" {
parameters["fsType"] = fsType
}
ns := a.driverInfo.Config.Framework.Namespace.Name
ns := config.Framework.Namespace.Name
suffix := fmt.Sprintf("%s-sc", a.driverInfo.Name)
return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix)
@@ -1435,13 +1429,15 @@ func (a *azureDriver) GetClaimSize() string {
return "5Gi"
}
func (a *azureDriver) CreateDriver() {
func (a *azureDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
return &testsuites.PerTestConfig{
Driver: a,
Prefix: "azure",
Framework: f,
}, func() {}
}
func (a *azureDriver) CleanupDriver() {
}
func (a *azureDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume {
func (a *azureDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
By("creating a test azure disk volume")
volumeName, err := framework.CreatePDWithRetry()
Expect(err).NotTo(HaveOccurred())
@@ -1470,7 +1466,7 @@ var _ testsuites.TestDriver = &awsDriver{}
var _ testsuites.DynamicPVTestDriver = &awsDriver{}
// InitAwsDriver returns awsDriver that implements TestDriver interface
func InitAwsDriver(config testsuites.TestConfig) testsuites.TestDriver {
func InitAwsDriver() testsuites.TestDriver {
return &awsDriver{
driverInfo: testsuites.DriverInfo{
Name: "aws",
@@ -1486,8 +1482,6 @@ func InitAwsDriver(config testsuites.TestConfig) testsuites.TestDriver {
testsuites.CapBlock: true,
testsuites.CapExec: true,
},
Config: config,
},
}
}
@@ -1529,13 +1523,13 @@ func (a *awsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volu
}
*/
func (a *awsDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
func (a *awsDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
provisioner := "kubernetes.io/aws-ebs"
parameters := map[string]string{}
if fsType != "" {
parameters["fsType"] = fsType
}
ns := a.driverInfo.Config.Framework.Namespace.Name
ns := config.Framework.Namespace.Name
suffix := fmt.Sprintf("%s-sc", a.driverInfo.Name)
return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix)
@@ -1545,15 +1539,17 @@ func (a *awsDriver) GetClaimSize() string {
return "5Gi"
}
func (a *awsDriver) CreateDriver() {
}
func (a *awsDriver) CleanupDriver() {
func (a *awsDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
return &testsuites.PerTestConfig{
Driver: a,
Prefix: "aws",
Framework: f,
}, func() {}
}
// TODO: Fix authorization error in attach operation and uncomment below
/*
func (a *awsDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume {
func (a *awsDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
By("creating a test aws volume")
var err error
a.volumeName, err = framework.CreatePDWithRetry()
@@ -1617,7 +1613,7 @@ var _ testsuites.TestDriver = &localDriver{}
var _ testsuites.PreprovisionedVolumeTestDriver = &localDriver{}
var _ testsuites.PreprovisionedPVTestDriver = &localDriver{}
func InitLocalDriverWithVolumeType(volumeType utils.LocalVolumeType) func(config testsuites.TestConfig) testsuites.TestDriver {
func InitLocalDriverWithVolumeType(volumeType utils.LocalVolumeType) func() testsuites.TestDriver {
maxFileSize := defaultLocalVolumeMaxFileSize
if maxFileSizeByVolType, ok := localVolumeMaxFileSizes[volumeType]; ok {
maxFileSize = maxFileSizeByVolType
@@ -1630,8 +1626,7 @@ func InitLocalDriverWithVolumeType(volumeType utils.LocalVolumeType) func(config
if capabilitiesByType, ok := localVolumeCapabitilies[volumeType]; ok {
capabilities = capabilitiesByType
}
return func(config testsuites.TestConfig) testsuites.TestDriver {
hostExec := utils.NewHostExec(config.Framework)
return func() testsuites.TestDriver {
// custom tag to distinguish from tests of other volume types
featureTag := fmt.Sprintf("[LocalVolumeType: %s]", volumeType)
// For GCE Local SSD volumes, we must run serially
@@ -1645,11 +1640,8 @@ func InitLocalDriverWithVolumeType(volumeType utils.LocalVolumeType) func(config
MaxFileSize: maxFileSize,
SupportedFsType: supportedFsTypes,
Capabilities: capabilities,
Config: config,
},
hostExec: hostExec,
volumeType: volumeType,
ltrMgr: utils.NewLocalResourceManager("local-driver", hostExec, "/tmp"),
}
}
}
@@ -1673,28 +1665,30 @@ func (l *localDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
}
}
func (l *localDriver) CreateDriver() {
func (l *localDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
// choose a randome node to test against
l.node = l.randomNode()
}
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
l.node = &nodes.Items[rand.Intn(len(nodes.Items))]
func (l *localDriver) CleanupDriver() {
l.hostExec = utils.NewHostExec(f)
l.ltrMgr = utils.NewLocalResourceManager("local-driver", l.hostExec, "/tmp")
return &testsuites.PerTestConfig{
Driver: l,
Prefix: "local",
Framework: f,
ClientNodeName: l.node.Name,
}, func() {
l.hostExec.Cleanup()
}
func (l *localDriver) randomNode() *v1.Node {
f := l.driverInfo.Config.Framework
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
node := nodes.Items[rand.Intn(len(nodes.Items))]
return &node
}
func (l *localDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume {
func (l *localDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
switch volType {
case testpatterns.PreprovisionedPV:
node := l.node
// assign this to schedule pod on this node
l.driverInfo.Config.ClientNodeName = node.Name
config.ClientNodeName = node.Name
return &localVolume{
ltrMgr: l.ltrMgr,
ltr: l.ltrMgr.Create(node, l.volumeType, nil),

View File

@@ -18,7 +18,6 @@ package storage
import (
. "github.com/onsi/ginkgo"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/drivers"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
@@ -26,7 +25,7 @@ import (
)
// List of testDrivers to be executed in below loop
var testDrivers = []func(config testsuites.TestConfig) testsuites.TestDriver{
var testDrivers = []func() testsuites.TestDriver{
drivers.InitNFSDriver,
drivers.InitGlusterFSDriver,
drivers.InitISCSIDriver,
@@ -65,35 +64,11 @@ func intreeTunePattern(patterns []testpatterns.TestPattern) []testpatterns.TestP
// This executes testSuites for in-tree volumes.
var _ = utils.SIGDescribe("In-tree Volumes", func() {
f := framework.NewDefaultFramework("volumes")
var (
// Common configuration options for all drivers.
config = testsuites.TestConfig{
Framework: f,
Prefix: "in-tree",
}
)
for _, initDriver := range testDrivers {
curDriver := initDriver(config)
curConfig := curDriver.GetDriverInfo().Config
curDriver := initDriver()
Context(testsuites.GetDriverNameWithFeatureTags(curDriver), func() {
BeforeEach(func() {
// Reset config. The driver might have modified its copy
// in a previous test.
curDriver.GetDriverInfo().Config = curConfig
// setupDriver
curDriver.CreateDriver()
})
AfterEach(func() {
// Cleanup driver
curDriver.CleanupDriver()
})
testsuites.RunTestSuite(f, curDriver, testSuites, intreeTunePattern)
testsuites.DefineTestSuite(curDriver, testSuites, intreeTunePattern)
})
}
})

View File

@@ -143,10 +143,11 @@ func testVolumeProvisioning(c clientset.Interface, ns string) {
}
for _, test := range tests {
class := newStorageClass(test, ns, "" /* suffix */)
claim := newClaim(test, ns, "" /* suffix */)
claim.Spec.StorageClassName = &class.Name
testsuites.TestDynamicProvisioning(test, c, claim, class)
test.Client = c
test.Class = newStorageClass(test, ns, "" /* suffix */)
test.Claim = newClaim(test, ns, "" /* suffix */)
test.Claim.Spec.StorageClassName = &test.Class.Name
test.TestDynamicProvisioning()
}
}
@@ -301,6 +302,7 @@ func addTaint(c clientset.Interface, ns string, nodes []v1.Node, podZone string)
func testRegionalDelayedBinding(c clientset.Interface, ns string, pvcCount int) {
test := testsuites.StorageClassTest{
Client: c,
Name: "Regional PD storage class with waitForFirstConsumer test on GCE",
Provisioner: "kubernetes.io/gce-pd",
Parameters: map[string]string{
@@ -312,14 +314,14 @@ func testRegionalDelayedBinding(c clientset.Interface, ns string, pvcCount int)
}
suffix := "delayed-regional"
class := newStorageClass(test, ns, suffix)
test.Class = newStorageClass(test, ns, suffix)
var claims []*v1.PersistentVolumeClaim
for i := 0; i < pvcCount; i++ {
claim := newClaim(test, ns, suffix)
claim.Spec.StorageClassName = &class.Name
claim.Spec.StorageClassName = &test.Class.Name
claims = append(claims, claim)
}
pvs, node := testsuites.TestBindingWaitForFirstConsumerMultiPVC(test, c, claims, class, nil /* node selector */, false /* expect unschedulable */)
pvs, node := test.TestBindingWaitForFirstConsumerMultiPVC(claims, nil /* node selector */, false /* expect unschedulable */)
if node == nil {
framework.Failf("unexpected nil node found")
}
@@ -345,17 +347,20 @@ func testRegionalAllowedTopologies(c clientset.Interface, ns string) {
}
suffix := "topo-regional"
class := newStorageClass(test, ns, suffix)
test.Client = c
test.Class = newStorageClass(test, ns, suffix)
zones := getTwoRandomZones(c)
addAllowedTopologiesToStorageClass(c, class, zones)
claim := newClaim(test, ns, suffix)
claim.Spec.StorageClassName = &class.Name
pv := testsuites.TestDynamicProvisioning(test, c, claim, class)
addAllowedTopologiesToStorageClass(c, test.Class, zones)
test.Claim = newClaim(test, ns, suffix)
test.Claim.Spec.StorageClassName = &test.Class.Name
pv := test.TestDynamicProvisioning()
checkZonesFromLabelAndAffinity(pv, sets.NewString(zones...), true)
}
func testRegionalAllowedTopologiesWithDelayedBinding(c clientset.Interface, ns string, pvcCount int) {
test := testsuites.StorageClassTest{
Client: c,
Name: "Regional PD storage class with allowedTopologies and waitForFirstConsumer test on GCE",
Provisioner: "kubernetes.io/gce-pd",
Parameters: map[string]string{
@@ -367,16 +372,16 @@ func testRegionalAllowedTopologiesWithDelayedBinding(c clientset.Interface, ns s
}
suffix := "topo-delayed-regional"
class := newStorageClass(test, ns, suffix)
test.Class = newStorageClass(test, ns, suffix)
topoZones := getTwoRandomZones(c)
addAllowedTopologiesToStorageClass(c, class, topoZones)
addAllowedTopologiesToStorageClass(c, test.Class, topoZones)
var claims []*v1.PersistentVolumeClaim
for i := 0; i < pvcCount; i++ {
claim := newClaim(test, ns, suffix)
claim.Spec.StorageClassName = &class.Name
claim.Spec.StorageClassName = &test.Class.Name
claims = append(claims, claim)
}
pvs, node := testsuites.TestBindingWaitForFirstConsumerMultiPVC(test, c, claims, class, nil /* node selector */, false /* expect unschedulable */)
pvs, node := test.TestBindingWaitForFirstConsumerMultiPVC(claims, nil /* node selector */, false /* expect unschedulable */)
if node == nil {
framework.Failf("unexpected nil node found")
}

View File

@@ -17,7 +17,9 @@ limitations under the License.
package testsuites
import (
"context"
"fmt"
"regexp"
"time"
. "github.com/onsi/ginkgo"
@@ -32,6 +34,7 @@ import (
utilerrors "k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/podlogs"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
)
@@ -39,10 +42,10 @@ import (
type TestSuite interface {
// getTestSuiteInfo returns the TestSuiteInfo for this TestSuite
getTestSuiteInfo() TestSuiteInfo
// skipUnsupportedTest skips the test if this TestSuite is not suitable to be tested with the combination of TestPattern and TestDriver
skipUnsupportedTest(testpatterns.TestPattern, TestDriver)
// execTest executes test of the testpattern for the driver
execTest(TestDriver, testpatterns.TestPattern)
// defineTest defines tests of the testpattern for the driver.
// Called inside a Ginkgo context that reflects the current driver and test pattern,
// so the test suite can define tests directly with ginkgo.It.
defineTests(TestDriver, testpatterns.TestPattern)
}
// TestSuiteInfo represents a set of parameters for TestSuite
@@ -54,11 +57,8 @@ type TestSuiteInfo struct {
// TestResource represents an interface for resources that is used by TestSuite
type TestResource interface {
// setupResource sets up test resources to be used for the tests with the
// combination of TestDriver and TestPattern
setupResource(TestDriver, testpatterns.TestPattern)
// cleanupResource clean up the test resources created in SetupResource
cleanupResource(TestDriver, testpatterns.TestPattern)
// cleanupResource cleans up the test resources created when setting up the resource
cleanupResource()
}
func getTestNameStr(suite TestSuite, pattern testpatterns.TestPattern) string {
@@ -66,27 +66,36 @@ func getTestNameStr(suite TestSuite, pattern testpatterns.TestPattern) string {
return fmt.Sprintf("[Testpattern: %s]%s %s%s", pattern.Name, pattern.FeatureTag, tsInfo.name, tsInfo.featureTag)
}
// RunTestSuite runs all testpatterns of all testSuites for a driver
func RunTestSuite(f *framework.Framework, driver TestDriver, tsInits []func() TestSuite, tunePatternFunc func([]testpatterns.TestPattern) []testpatterns.TestPattern) {
// DefineTestSuite defines tests for all testpatterns and all testSuites for a driver
func DefineTestSuite(driver TestDriver, tsInits []func() TestSuite, tunePatternFunc func([]testpatterns.TestPattern) []testpatterns.TestPattern) {
for _, testSuiteInit := range tsInits {
suite := testSuiteInit()
patterns := tunePatternFunc(suite.getTestSuiteInfo().testPatterns)
for _, pattern := range patterns {
suite.execTest(driver, pattern)
p := pattern
Context(getTestNameStr(suite, p), func() {
BeforeEach(func() {
// Skip unsupported tests to avoid unnecessary resource initialization
skipUnsupportedTest(driver, p)
})
suite.defineTests(driver, p)
})
}
}
}
// skipUnsupportedTest will skip tests if the combination of driver, testsuite, and testpattern
// skipUnsupportedTest will skip tests if the combination of driver, and testpattern
// is not suitable to be tested.
// Whether it needs to be skipped is checked by following steps:
// 1. Check if Whether SnapshotType is supported by driver from its interface
// 2. Check if Whether volType is supported by driver from its interface
// 3. Check if fsType is supported
// 4. Check with driver specific logic
// 5. Check with testSuite specific logic
func skipUnsupportedTest(suite TestSuite, driver TestDriver, pattern testpatterns.TestPattern) {
//
// Test suites can also skip tests inside their own defineTests function or in
// individual tests.
func skipUnsupportedTest(driver TestDriver, pattern testpatterns.TestPattern) {
dInfo := driver.GetDriverInfo()
var isSupported bool
@@ -130,9 +139,6 @@ func skipUnsupportedTest(suite TestSuite, driver TestDriver, pattern testpattern
// 4. Check with driver specific logic
driver.SkipUnsupportedTest(pattern)
// 5. Check with testSuite specific logic
suite.skipUnsupportedTest(pattern, driver)
}
// genericVolumeTestResource is a generic implementation of TestResource that wil be able to
@@ -141,6 +147,8 @@ func skipUnsupportedTest(suite TestSuite, driver TestDriver, pattern testpattern
// Also, see subpath.go in the same directory for how to extend and use it.
type genericVolumeTestResource struct {
driver TestDriver
config *PerTestConfig
pattern testpatterns.TestPattern
volType string
volSource *v1.VolumeSource
pvc *v1.PersistentVolumeClaim
@@ -152,17 +160,20 @@ type genericVolumeTestResource struct {
var _ TestResource = &genericVolumeTestResource{}
// setupResource sets up genericVolumeTestResource
func (r *genericVolumeTestResource) setupResource(driver TestDriver, pattern testpatterns.TestPattern) {
r.driver = driver
func createGenericVolumeTestResource(driver TestDriver, config *PerTestConfig, pattern testpatterns.TestPattern) *genericVolumeTestResource {
r := genericVolumeTestResource{
driver: driver,
config: config,
pattern: pattern,
}
dInfo := driver.GetDriverInfo()
f := dInfo.Config.Framework
f := config.Framework
cs := f.ClientSet
fsType := pattern.FsType
volType := pattern.VolType
// Create volume for pre-provisioned volume tests
r.volume = CreateVolume(driver, volType)
r.volume = CreateVolume(driver, config, volType)
switch volType {
case testpatterns.InlineVolume:
@@ -184,7 +195,7 @@ func (r *genericVolumeTestResource) setupResource(driver TestDriver, pattern tes
framework.Logf("Creating resource for dynamic PV")
if dDriver, ok := driver.(DynamicPVTestDriver); ok {
claimSize := dDriver.GetClaimSize()
r.sc = dDriver.GetDynamicProvisionStorageClass(fsType)
r.sc = dDriver.GetDynamicProvisionStorageClass(r.config, fsType)
By("creating a StorageClass " + r.sc.Name)
var err error
@@ -204,13 +215,14 @@ func (r *genericVolumeTestResource) setupResource(driver TestDriver, pattern tes
if r.volSource == nil {
framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, volType)
}
return &r
}
// cleanupResource cleans up genericVolumeTestResource
func (r *genericVolumeTestResource) cleanupResource(driver TestDriver, pattern testpatterns.TestPattern) {
dInfo := driver.GetDriverInfo()
f := dInfo.Config.Framework
volType := pattern.VolType
func (r *genericVolumeTestResource) cleanupResource() {
f := r.config.Framework
volType := r.pattern.VolType
if r.pvc != nil || r.pv != nil {
switch volType {
@@ -356,7 +368,7 @@ func deleteStorageClass(cs clientset.Interface, className string) {
// the testsuites package whereas framework.VolumeTestConfig is merely
// an implementation detail. It contains fields that have no effect,
// which makes it unsuitable for use in the testsuits public API.
func convertTestConfig(in *TestConfig) framework.VolumeTestConfig {
func convertTestConfig(in *PerTestConfig) framework.VolumeTestConfig {
if in.ServerConfig != nil {
return *in.ServerConfig
}
@@ -390,3 +402,42 @@ func getSnapshot(claimName string, ns, snapshotClassName string) *unstructured.U
return snapshot
}
// StartPodLogs begins capturing log output and events from current
// and future pods running in the namespace of the framework. That
// ends when the returned cleanup function is called.
//
// The output goes to log files (when using --report-dir, as in the
// CI) or the output stream (otherwise).
func StartPodLogs(f *framework.Framework) func() {
ctx, cancel := context.WithCancel(context.Background())
cs := f.ClientSet
ns := f.Namespace
to := podlogs.LogOutput{
StatusWriter: GinkgoWriter,
}
if framework.TestContext.ReportDir == "" {
to.LogWriter = GinkgoWriter
} else {
test := CurrentGinkgoTestDescription()
reg := regexp.MustCompile("[^a-zA-Z0-9_-]+")
// We end the prefix with a slash to ensure that all logs
// end up in a directory named after the current test.
//
// TODO: use a deeper directory hierarchy once gubernator
// supports that (https://github.com/kubernetes/test-infra/issues/10289).
to.LogPathPrefix = framework.TestContext.ReportDir + "/" +
reg.ReplaceAllString(test.FullTestText, "_") + "/"
}
podlogs.CopyAllLogs(ctx, cs, ns.Name, to)
// pod events are something that the framework already collects itself
// after a failed test. Logging them live is only useful for interactive
// debugging, not when we collect reports.
if framework.TestContext.ReportDir == "" {
podlogs.WatchPods(ctx, cs, ns.Name, GinkgoWriter)
}
return cancel
}

View File

@@ -37,13 +37,13 @@ func GetDriverNameWithFeatureTags(driver TestDriver) string {
}
// CreateVolume creates volume for test unless dynamicPV test
func CreateVolume(driver TestDriver, volType testpatterns.TestVolType) TestVolume {
func CreateVolume(driver TestDriver, config *PerTestConfig, volType testpatterns.TestVolType) TestVolume {
switch volType {
case testpatterns.InlineVolume:
fallthrough
case testpatterns.PreprovisionedPV:
if pDriver, ok := driver.(PreprovisionedVolumeTestDriver); ok {
return pDriver.CreateVolume(volType)
return pDriver.CreateVolume(config, volType)
}
case testpatterns.DynamicPV:
// No need to create volume
@@ -103,8 +103,3 @@ func GetSnapshotClass(
return snapshotClass
}
// GetUniqueDriverName returns unique driver name that can be used parallelly in tests
func GetUniqueDriverName(driver TestDriver) string {
return fmt.Sprintf("%s-%s", driver.GetDriverInfo().Name, driver.GetDriverInfo().Config.Framework.UniqueName)
}

View File

@@ -41,6 +41,9 @@ import (
// StorageClassTest represents parameters to be used by provisioning tests.
// Not all parameters are used by all tests.
type StorageClassTest struct {
Client clientset.Interface
Claim *v1.PersistentVolumeClaim
Class *storage.StorageClass
Name string
CloudProviders []string
Provisioner string
@@ -76,183 +79,156 @@ func (p *provisioningTestSuite) getTestSuiteInfo() TestSuiteInfo {
return p.tsInfo
}
func (p *provisioningTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver TestDriver) {
}
func createProvisioningTestInput(driver TestDriver, pattern testpatterns.TestPattern) (provisioningTestResource, provisioningTestInput) {
// Setup test resource for driver and testpattern
resource := provisioningTestResource{}
resource.setupResource(driver, pattern)
input := provisioningTestInput{
testCase: StorageClassTest{
ClaimSize: resource.claimSize,
ExpectedSize: resource.claimSize,
},
cs: driver.GetDriverInfo().Config.Framework.ClientSet,
dc: driver.GetDriverInfo().Config.Framework.DynamicClient,
pvc: resource.pvc,
sc: resource.sc,
vsc: resource.vsc,
dInfo: driver.GetDriverInfo(),
nodeName: driver.GetDriverInfo().Config.ClientNodeName,
}
return resource, input
}
func (p *provisioningTestSuite) execTest(driver TestDriver, pattern testpatterns.TestPattern) {
Context(getTestNameStr(p, pattern), func() {
func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
var (
resource provisioningTestResource
input provisioningTestInput
needsCleanup bool
dInfo = driver.GetDriverInfo()
dDriver DynamicPVTestDriver
config *PerTestConfig
testCleanup func()
testCase *StorageClassTest
cs clientset.Interface
pvc *v1.PersistentVolumeClaim
sc *storage.StorageClass
)
BeforeEach(func() {
needsCleanup = false
// Skip unsupported tests to avoid unnecessary resource initialization
skipUnsupportedTest(p, driver, pattern)
needsCleanup = true
// Create test input
resource, input = createProvisioningTestInput(driver, pattern)
})
AfterEach(func() {
if needsCleanup {
resource.cleanupResource(driver, pattern)
// Check preconditions.
if pattern.VolType != testpatterns.DynamicPV {
framework.Skipf("Suite %q does not support %v", p.tsInfo.name, pattern.VolType)
}
ok := false
dDriver, ok = driver.(DynamicPVTestDriver)
if !ok {
framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolType)
}
})
// Ginkgo's "Global Shared Behaviors" require arguments for a shared function
// to be a single struct and to be passed as a pointer.
// Please see https://onsi.github.io/ginkgo/#global-shared-behaviors for details.
testProvisioning(&input)
})
}
// This intentionally comes after checking the preconditions because it
// registers its own BeforeEach which creates the namespace. Beware that it
// also registers an AfterEach which renders f unusable. Any code using
// f must run inside an It or Context callback.
f := framework.NewDefaultFramework("provisioning")
type provisioningTestResource struct {
driver TestDriver
claimSize string
sc *storage.StorageClass
pvc *v1.PersistentVolumeClaim
// follow parameter is used to test provision volume from snapshot
vsc *unstructured.Unstructured
init := func() {
// Now do the more expensive test initialization.
config, testCleanup = driver.PrepareTest(f)
cs = config.Framework.ClientSet
claimSize := dDriver.GetClaimSize()
sc = dDriver.GetDynamicProvisionStorageClass(config, "")
if sc == nil {
framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", dInfo.Name)
}
var _ TestResource = &provisioningTestResource{}
func (p *provisioningTestResource) setupResource(driver TestDriver, pattern testpatterns.TestPattern) {
// Setup provisioningTest resource
switch pattern.VolType {
case testpatterns.DynamicPV:
if dDriver, ok := driver.(DynamicPVTestDriver); ok {
p.sc = dDriver.GetDynamicProvisionStorageClass("")
if p.sc == nil {
framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", driver.GetDriverInfo().Name)
}
p.driver = driver
p.claimSize = dDriver.GetClaimSize()
p.pvc = getClaim(p.claimSize, driver.GetDriverInfo().Config.Framework.Namespace.Name)
p.pvc.Spec.StorageClassName = &p.sc.Name
framework.Logf("In creating storage class object and pvc object for driver - sc: %v, pvc: %v", p.sc, p.pvc)
if sDriver, ok := driver.(SnapshottableTestDriver); ok {
p.vsc = sDriver.GetSnapshotClass()
}
}
default:
framework.Failf("Dynamic Provision test doesn't support: %s", pattern.VolType)
pvc = getClaim(claimSize, config.Framework.Namespace.Name)
pvc.Spec.StorageClassName = &sc.Name
framework.Logf("In creating storage class object and pvc object for driver - sc: %v, pvc: %v", sc, pvc)
testCase = &StorageClassTest{
Client: config.Framework.ClientSet,
Claim: pvc,
Class: sc,
ClaimSize: claimSize,
ExpectedSize: claimSize,
}
}
func (p *provisioningTestResource) cleanupResource(driver TestDriver, pattern testpatterns.TestPattern) {
cleanup := func() {
if testCleanup != nil {
testCleanup()
testCleanup = nil
}
type provisioningTestInput struct {
testCase StorageClassTest
cs clientset.Interface
dc dynamic.Interface
pvc *v1.PersistentVolumeClaim
sc *storage.StorageClass
vsc *unstructured.Unstructured
dInfo *DriverInfo
nodeName string
}
func testProvisioning(input *provisioningTestInput) {
// common checker for most of the test cases below
pvcheck := func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
PVWriteReadSingleNodeCheck(input.cs, claim, volume, NodeSelection{Name: input.nodeName})
}
It("should provision storage with defaults", func() {
input.testCase.PvCheck = pvcheck
TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc)
init()
defer cleanup()
testCase.TestDynamicProvisioning()
})
It("should provision storage with mount options", func() {
if input.dInfo.SupportedMountOption == nil {
framework.Skipf("Driver %q does not define supported mount option - skipping", input.dInfo.Name)
if dInfo.SupportedMountOption == nil {
framework.Skipf("Driver %q does not define supported mount option - skipping", dInfo.Name)
}
input.sc.MountOptions = input.dInfo.SupportedMountOption.Union(input.dInfo.RequiredMountOption).List()
input.testCase.PvCheck = pvcheck
TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc)
init()
defer cleanup()
testCase.Class.MountOptions = dInfo.SupportedMountOption.Union(dInfo.RequiredMountOption).List()
testCase.TestDynamicProvisioning()
})
It("should access volume from different nodes", func() {
init()
defer cleanup()
// The assumption is that if the test hasn't been
// locked onto a single node, then the driver is
// usable on all of them *and* supports accessing a volume
// from any node.
if input.nodeName != "" {
framework.Skipf("Driver %q only supports testing on one node - skipping", input.dInfo.Name)
if config.ClientNodeName != "" {
framework.Skipf("Driver %q only supports testing on one node - skipping", dInfo.Name)
}
// Ensure that we actually have more than one node.
nodes := framework.GetReadySchedulableNodesOrDie(input.cs)
nodes := framework.GetReadySchedulableNodesOrDie(cs)
if len(nodes.Items) <= 1 {
framework.Skipf("need more than one node - skipping")
}
input.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
PVMultiNodeCheck(input.cs, claim, volume, NodeSelection{Name: input.nodeName})
testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
PVMultiNodeCheck(cs, claim, volume, NodeSelection{Name: config.ClientNodeName})
}
TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc)
testCase.TestDynamicProvisioning()
})
It("should create and delete block persistent volumes", func() {
if !input.dInfo.Capabilities[CapBlock] {
framework.Skipf("Driver %q does not support BlockVolume - skipping", input.dInfo.Name)
if !dInfo.Capabilities[CapBlock] {
framework.Skipf("Driver %q does not support BlockVolume - skipping", dInfo.Name)
}
init()
defer cleanup()
block := v1.PersistentVolumeBlock
input.testCase.VolumeMode = &block
input.pvc.Spec.VolumeMode = &block
TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc)
testCase.VolumeMode = &block
pvc.Spec.VolumeMode = &block
testCase.TestDynamicProvisioning()
})
It("should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]", func() {
if !input.dInfo.Capabilities[CapDataSource] {
framework.Skipf("Driver %q does not support populate data from snapshot - skipping", input.dInfo.Name)
if !dInfo.Capabilities[CapDataSource] {
framework.Skipf("Driver %q does not support populate data from snapshot - skipping", dInfo.Name)
}
dataSource, cleanupFunc := prepareDataSourceForProvisioning(NodeSelection{Name: input.nodeName}, input.cs, input.dc, input.pvc, input.sc, input.vsc)
sDriver, ok := driver.(SnapshottableTestDriver)
if !ok {
framework.Failf("Driver %q has CapDataSource but does not implement SnapshottableTestDriver", dInfo.Name)
}
init()
defer cleanup()
dc := config.Framework.DynamicClient
vsc := sDriver.GetSnapshotClass(config)
dataSource, cleanupFunc := prepareDataSourceForProvisioning(NodeSelection{Name: config.ClientNodeName}, cs, dc, pvc, sc, vsc)
defer cleanupFunc()
input.pvc.Spec.DataSource = dataSource
input.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
pvc.Spec.DataSource = dataSource
testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
By("checking whether the created volume has the pre-populated data")
command := fmt.Sprintf("grep '%s' /mnt/test/initialData", claim.Namespace)
RunInPodWithVolume(input.cs, claim.Namespace, claim.Name, "pvc-snapshot-tester", command, NodeSelection{Name: input.nodeName})
RunInPodWithVolume(cs, claim.Namespace, claim.Name, "pvc-snapshot-tester", command, NodeSelection{Name: config.ClientNodeName})
}
TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc)
testCase.TestDynamicProvisioning()
})
It("should allow concurrent writes on the same node", func() {
if !input.dInfo.Capabilities[CapMultiPODs] {
framework.Skipf("Driver %q does not support multiple concurrent pods - skipping", input.dInfo.Name)
if !dInfo.Capabilities[CapMultiPODs] {
framework.Skipf("Driver %q does not support multiple concurrent pods - skipping", dInfo.Name)
}
input.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
init()
defer cleanup()
testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
// We start two pods concurrently on the same node,
// using the same PVC. Both wait for other to create a
// file before returning. The pods are forced onto the
@@ -265,7 +241,7 @@ func testProvisioning(input *provisioningTestInput) {
defer GinkgoRecover()
defer wg.Done()
node := NodeSelection{
Name: input.nodeName,
Name: config.ClientNodeName,
}
if podName == secondPodName {
node.Affinity = &v1.Affinity{
@@ -283,18 +259,24 @@ func testProvisioning(input *provisioningTestInput) {
},
}
}
RunInPodWithVolume(input.cs, claim.Namespace, claim.Name, podName, command, node)
RunInPodWithVolume(cs, claim.Namespace, claim.Name, podName, command, node)
}
go run(firstPodName, "touch /mnt/test/first && while ! [ -f /mnt/test/second ]; do sleep 1; done")
go run(secondPodName, "touch /mnt/test/second && while ! [ -f /mnt/test/first ]; do sleep 1; done")
wg.Wait()
}
TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc)
testCase.TestDynamicProvisioning()
})
}
// TestDynamicProvisioning tests dynamic provisioning with specified StorageClassTest and storageClass
func TestDynamicProvisioning(t StorageClassTest, client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storage.StorageClass) *v1.PersistentVolume {
// TestDynamicProvisioning tests dynamic provisioning with specified StorageClassTest
func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
client := t.Client
Expect(client).NotTo(BeNil(), "StorageClassTest.Client is required")
claim := t.Claim
Expect(claim).NotTo(BeNil(), "StorageClassTest.Claim is required")
class := t.Class
var err error
if class != nil {
Expect(*claim.Spec.StorageClassName).To(Equal(class.Name))
@@ -493,29 +475,29 @@ func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClai
pod = nil
}
func TestBindingWaitForFirstConsumer(t StorageClassTest, client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storage.StorageClass, nodeSelector map[string]string, expectUnschedulable bool) (*v1.PersistentVolume, *v1.Node) {
pvs, node := TestBindingWaitForFirstConsumerMultiPVC(t, client, []*v1.PersistentVolumeClaim{claim}, class, nodeSelector, expectUnschedulable)
func (t StorageClassTest) TestBindingWaitForFirstConsumer(nodeSelector map[string]string, expectUnschedulable bool) (*v1.PersistentVolume, *v1.Node) {
pvs, node := t.TestBindingWaitForFirstConsumerMultiPVC([]*v1.PersistentVolumeClaim{t.Claim}, nodeSelector, expectUnschedulable)
if pvs == nil {
return nil, node
}
return pvs[0], node
}
func TestBindingWaitForFirstConsumerMultiPVC(t StorageClassTest, client clientset.Interface, claims []*v1.PersistentVolumeClaim, class *storage.StorageClass, nodeSelector map[string]string, expectUnschedulable bool) ([]*v1.PersistentVolume, *v1.Node) {
func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.PersistentVolumeClaim, nodeSelector map[string]string, expectUnschedulable bool) ([]*v1.PersistentVolume, *v1.Node) {
var err error
Expect(len(claims)).ToNot(Equal(0))
namespace := claims[0].Namespace
By("creating a storage class " + class.Name)
class, err = client.StorageV1().StorageClasses().Create(class)
By("creating a storage class " + t.Class.Name)
class, err := t.Client.StorageV1().StorageClasses().Create(t.Class)
Expect(err).NotTo(HaveOccurred())
defer deleteStorageClass(client, class.Name)
defer deleteStorageClass(t.Client, class.Name)
By("creating claims")
var claimNames []string
var createdClaims []*v1.PersistentVolumeClaim
for _, claim := range claims {
c, err := client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim)
c, err := t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim)
claimNames = append(claimNames, c.Name)
createdClaims = append(createdClaims, c)
Expect(err).NotTo(HaveOccurred())
@@ -523,7 +505,7 @@ func TestBindingWaitForFirstConsumerMultiPVC(t StorageClassTest, client clientse
defer func() {
var errors map[string]error
for _, claim := range createdClaims {
err := framework.DeletePersistentVolumeClaim(client, claim.Name, claim.Namespace)
err := framework.DeletePersistentVolumeClaim(t.Client, claim.Name, claim.Namespace)
if err != nil {
errors[claim.Name] = err
}
@@ -537,44 +519,44 @@ func TestBindingWaitForFirstConsumerMultiPVC(t StorageClassTest, client clientse
// Wait for ClaimProvisionTimeout (across all PVCs in parallel) and make sure the phase did not become Bound i.e. the Wait errors out
By("checking the claims are in pending state")
err = framework.WaitForPersistentVolumeClaimsPhase(v1.ClaimBound, client, namespace, claimNames, 2*time.Second /* Poll */, framework.ClaimProvisionShortTimeout, true)
err = framework.WaitForPersistentVolumeClaimsPhase(v1.ClaimBound, t.Client, namespace, claimNames, 2*time.Second /* Poll */, framework.ClaimProvisionShortTimeout, true)
Expect(err).To(HaveOccurred())
verifyPVCsPending(client, createdClaims)
verifyPVCsPending(t.Client, createdClaims)
By("creating a pod referring to the claims")
// Create a pod referring to the claim and wait for it to get to running
var pod *v1.Pod
if expectUnschedulable {
pod, err = framework.CreateUnschedulablePod(client, namespace, nodeSelector, createdClaims, true /* isPrivileged */, "" /* command */)
pod, err = framework.CreateUnschedulablePod(t.Client, namespace, nodeSelector, createdClaims, true /* isPrivileged */, "" /* command */)
} else {
pod, err = framework.CreatePod(client, namespace, nil /* nodeSelector */, createdClaims, true /* isPrivileged */, "" /* command */)
pod, err = framework.CreatePod(t.Client, namespace, nil /* nodeSelector */, createdClaims, true /* isPrivileged */, "" /* command */)
}
Expect(err).NotTo(HaveOccurred())
defer func() {
framework.DeletePodOrFail(client, pod.Namespace, pod.Name)
framework.WaitForPodToDisappear(client, pod.Namespace, pod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout)
framework.DeletePodOrFail(t.Client, pod.Namespace, pod.Name)
framework.WaitForPodToDisappear(t.Client, pod.Namespace, pod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout)
}()
if expectUnschedulable {
// Verify that no claims are provisioned.
verifyPVCsPending(client, createdClaims)
verifyPVCsPending(t.Client, createdClaims)
return nil, nil
}
// collect node details
node, err := client.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{})
node, err := t.Client.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
By("re-checking the claims to see they binded")
var pvs []*v1.PersistentVolume
for _, claim := range createdClaims {
// Get new copy of the claim
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
claim, err = t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// make sure claim did bind
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, t.Client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
pv, err := client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
pv, err := t.Client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
pvs = append(pvs, pv)
}

View File

@@ -24,13 +24,10 @@ import (
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
)
@@ -54,7 +51,6 @@ type SnapshotClassTest struct {
Parameters map[string]string
NodeName string
NodeSelector map[string]string // NodeSelector for the pod
SnapshotContentCheck func(snapshotContent *unstructured.Unstructured) error
}
type snapshottableTestSuite struct {
@@ -79,198 +75,116 @@ func (s *snapshottableTestSuite) getTestSuiteInfo() TestSuiteInfo {
return s.tsInfo
}
func (s *snapshottableTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver TestDriver) {
dInfo := driver.GetDriverInfo()
if !dInfo.Capabilities[CapDataSource] {
framework.Skipf("Driver %q does not support snapshots - skipping", dInfo.Name)
}
}
func createSnapshottableTestInput(driver TestDriver, pattern testpatterns.TestPattern) (snapshottableTestResource, snapshottableTestInput) {
// Setup test resource for driver and testpattern
resource := snapshottableTestResource{}
resource.setupResource(driver, pattern)
dInfo := driver.GetDriverInfo()
input := snapshottableTestInput{
testCase: SnapshotClassTest{
NodeName: dInfo.Config.ClientNodeName,
},
cs: dInfo.Config.Framework.ClientSet,
dc: dInfo.Config.Framework.DynamicClient,
pvc: resource.pvc,
sc: resource.sc,
vsc: resource.vsc,
dInfo: dInfo,
}
return resource, input
}
func (s *snapshottableTestSuite) execTest(driver TestDriver, pattern testpatterns.TestPattern) {
Context(getTestNameStr(s, pattern), func() {
func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
var (
resource snapshottableTestResource
input snapshottableTestInput
needsCleanup bool
sDriver SnapshottableTestDriver
dDriver DynamicPVTestDriver
)
BeforeEach(func() {
needsCleanup = false
// Skip unsupported tests to avoid unnecessary resource initialization
skipUnsupportedTest(s, driver, pattern)
needsCleanup = true
// Create test input
resource, input = createSnapshottableTestInput(driver, pattern)
})
AfterEach(func() {
if needsCleanup {
resource.cleanupResource(driver, pattern)
// Check preconditions.
Expect(pattern.SnapshotType).To(Equal(testpatterns.DynamicCreatedSnapshot))
dInfo := driver.GetDriverInfo()
ok := false
sDriver, ok = driver.(SnapshottableTestDriver)
if !dInfo.Capabilities[CapDataSource] || !ok {
framework.Skipf("Driver %q does not support snapshots - skipping", dInfo.Name)
}
dDriver, ok = driver.(DynamicPVTestDriver)
if !ok {
framework.Skipf("Driver %q does not support dynamic provisioning - skipping", driver.GetDriverInfo().Name)
}
})
// Ginkgo's "Global Shared Behaviors" require arguments for a shared function
// to be a single struct and to be passed as a pointer.
// Please see https://onsi.github.io/ginkgo/#global-shared-behaviors for details.
testSnapshot(&input)
})
}
// This intentionally comes after checking the preconditions because it
// registers its own BeforeEach which creates the namespace. Beware that it
// also registers an AfterEach which renders f unusable. Any code using
// f must run inside an It or Context callback.
f := framework.NewDefaultFramework("snapshotting")
type snapshottableTestResource struct {
driver TestDriver
claimSize string
It("should create snapshot with defaults [Feature:VolumeSnapshotDataSource]", func() {
cs := f.ClientSet
dc := f.DynamicClient
sc *storage.StorageClass
pvc *v1.PersistentVolumeClaim
// volume snapshot class
vsc *unstructured.Unstructured
}
// Now do the more expensive test initialization.
config, testCleanup := driver.PrepareTest(f)
defer testCleanup()
var _ TestResource = &snapshottableTestResource{}
func (s *snapshottableTestResource) setupResource(driver TestDriver, pattern testpatterns.TestPattern) {
// Setup snapshottableTest resource
switch pattern.SnapshotType {
case testpatterns.DynamicCreatedSnapshot:
if dDriver, ok := driver.(DynamicPVTestDriver); ok {
s.sc = dDriver.GetDynamicProvisionStorageClass("")
if s.sc == nil {
vsc := sDriver.GetSnapshotClass(config)
class := dDriver.GetDynamicProvisionStorageClass(config, "")
if class == nil {
framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", driver.GetDriverInfo().Name)
}
s.driver = driver
s.claimSize = dDriver.GetClaimSize()
s.pvc = getClaim(s.claimSize, driver.GetDriverInfo().Config.Framework.Namespace.Name)
s.pvc.Spec.StorageClassName = &s.sc.Name
framework.Logf("In creating storage class object and pvc object for driver - sc: %v, pvc: %v", s.sc, s.pvc)
if sDriver, ok := driver.(SnapshottableTestDriver); ok {
s.vsc = sDriver.GetSnapshotClass()
}
}
claimSize := dDriver.GetClaimSize()
pvc := getClaim(claimSize, config.Framework.Namespace.Name)
pvc.Spec.StorageClassName = &class.Name
framework.Logf("In creating storage class object and pvc object for driver - sc: %v, pvc: %v", class, pvc)
default:
framework.Failf("Dynamic Snapshot test doesn't support: %s", pattern.SnapshotType)
}
}
func (s *snapshottableTestResource) cleanupResource(driver TestDriver, pattern testpatterns.TestPattern) {
}
type snapshottableTestInput struct {
testCase SnapshotClassTest
cs clientset.Interface
dc dynamic.Interface
pvc *v1.PersistentVolumeClaim
sc *storage.StorageClass
// volume snapshot class
vsc *unstructured.Unstructured
dInfo *DriverInfo
}
func testSnapshot(input *snapshottableTestInput) {
It("should create snapshot with defaults [Feature:VolumeSnapshotDataSource]", func() {
TestCreateSnapshot(input.testCase, input.cs, input.dc, input.pvc, input.sc, input.vsc)
})
}
// TestCreateSnapshot tests dynamic creating snapshot with specified SnapshotClassTest and snapshotClass
func TestCreateSnapshot(
t SnapshotClassTest,
client clientset.Interface,
dynamicClient dynamic.Interface,
claim *v1.PersistentVolumeClaim,
class *storage.StorageClass,
snapshotClass *unstructured.Unstructured,
) *unstructured.Unstructured {
var err error
if class != nil {
By("creating a StorageClass " + class.Name)
class, err = client.StorageV1().StorageClasses().Create(class)
class, err := cs.StorageV1().StorageClasses().Create(class)
Expect(err).NotTo(HaveOccurred())
defer func() {
framework.Logf("deleting storage class %s", class.Name)
framework.ExpectNoError(client.StorageV1().StorageClasses().Delete(class.Name, nil))
framework.ExpectNoError(cs.StorageV1().StorageClasses().Delete(class.Name, nil))
}()
}
By("creating a claim")
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim)
pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
Expect(err).NotTo(HaveOccurred())
defer func() {
framework.Logf("deleting claim %q/%q", claim.Namespace, claim.Name)
framework.Logf("deleting claim %q/%q", pvc.Namespace, pvc.Name)
// typically this claim has already been deleted
err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil)
err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, nil)
if err != nil && !apierrs.IsNotFound(err) {
framework.Failf("Error deleting claim %q. Error: %v", claim.Name, err)
framework.Failf("Error deleting claim %q. Error: %v", pvc.Name, err)
}
}()
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
By("checking the claim")
// Get new copy of the claim
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// Get the bound PV
pv, err := client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
pv, err := cs.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
By("creating a SnapshotClass")
snapshotClass, err = dynamicClient.Resource(snapshotClassGVR).Create(snapshotClass, metav1.CreateOptions{})
vsc, err = dc.Resource(snapshotClassGVR).Create(vsc, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
defer func() {
framework.Logf("deleting SnapshotClass %s", snapshotClass.GetName())
framework.ExpectNoError(dynamicClient.Resource(snapshotClassGVR).Delete(snapshotClass.GetName(), nil))
framework.Logf("deleting SnapshotClass %s", vsc.GetName())
framework.ExpectNoError(dc.Resource(snapshotClassGVR).Delete(vsc.GetName(), nil))
}()
By("creating a snapshot")
snapshot := getSnapshot(claim.Name, claim.Namespace, snapshotClass.GetName())
snapshot := getSnapshot(pvc.Name, pvc.Namespace, vsc.GetName())
snapshot, err = dynamicClient.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Create(snapshot, metav1.CreateOptions{})
snapshot, err = dc.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Create(snapshot, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
defer func() {
framework.Logf("deleting snapshot %q/%q", snapshot.GetNamespace(), snapshot.GetName())
// typically this snapshot has already been deleted
err = dynamicClient.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Delete(snapshot.GetName(), nil)
err = dc.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Delete(snapshot.GetName(), nil)
if err != nil && !apierrs.IsNotFound(err) {
framework.Failf("Error deleting snapshot %q. Error: %v", claim.Name, err)
framework.Failf("Error deleting snapshot %q. Error: %v", pvc.Name, err)
}
}()
err = WaitForSnapshotReady(dynamicClient, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, framework.SnapshotCreateTimeout)
err = WaitForSnapshotReady(dc, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, framework.SnapshotCreateTimeout)
Expect(err).NotTo(HaveOccurred())
By("checking the snapshot")
// Get new copy of the snapshot
snapshot, err = dynamicClient.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Get(snapshot.GetName(), metav1.GetOptions{})
snapshot, err = dc.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Get(snapshot.GetName(), metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// Get the bound snapshotContent
snapshotSpec := snapshot.Object["spec"].(map[string]interface{})
snapshotContentName := snapshotSpec["snapshotContentName"].(string)
snapshotContent, err := dynamicClient.Resource(snapshotContentGVR).Get(snapshotContentName, metav1.GetOptions{})
snapshotContent, err := dc.Resource(snapshotContentGVR).Get(snapshotContentName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
snapshotContentSpec := snapshotContent.Object["spec"].(map[string]interface{})
@@ -279,18 +193,11 @@ func TestCreateSnapshot(
// Check SnapshotContent properties
By("checking the SnapshotContent")
Expect(snapshotContentSpec["snapshotClassName"]).To(Equal(snapshotClass.GetName()))
Expect(snapshotContentSpec["snapshotClassName"]).To(Equal(vsc.GetName()))
Expect(volumeSnapshotRef["name"]).To(Equal(snapshot.GetName()))
Expect(volumeSnapshotRef["namespace"]).To(Equal(snapshot.GetNamespace()))
Expect(persistentVolumeRef["name"]).To(Equal(pv.Name))
// Run the checker
if t.SnapshotContentCheck != nil {
err = t.SnapshotContentCheck(snapshotContent)
Expect(err).NotTo(HaveOccurred())
}
return snapshotContent
})
}
// WaitForSnapshotReady waits for a VolumeSnapshot to be ready to use or until timeout occurs, whichever comes first.

View File

@@ -26,6 +26,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/utils"
@@ -71,98 +72,56 @@ func (s *subPathTestSuite) getTestSuiteInfo() TestSuiteInfo {
return s.tsInfo
}
func (s *subPathTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver TestDriver) {
}
func createSubPathTestInput(pattern testpatterns.TestPattern, resource subPathTestResource) subPathTestInput {
driver := resource.driver
dInfo := driver.GetDriverInfo()
f := dInfo.Config.Framework
subPath := f.Namespace.Name
subPathDir := filepath.Join(volumePath, subPath)
return subPathTestInput{
f: f,
subPathDir: subPathDir,
filePathInSubpath: filepath.Join(volumePath, fileName),
filePathInVolume: filepath.Join(subPathDir, fileName),
volType: resource.volType,
pod: resource.pod,
formatPod: resource.formatPod,
volSource: resource.genericVolumeTestResource.volSource,
roVol: resource.roVolSource,
}
}
func (s *subPathTestSuite) execTest(driver TestDriver, pattern testpatterns.TestPattern) {
Context(getTestNameStr(s, pattern), func() {
func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
var (
resource subPathTestResource
input subPathTestInput
needsCleanup bool
)
BeforeEach(func() {
needsCleanup = false
// Skip unsupported tests to avoid unnecessary resource initialization
skipUnsupportedTest(s, driver, pattern)
needsCleanup = true
// Setup test resource for driver and testpattern
resource = subPathTestResource{}
resource.setupResource(driver, pattern)
// Create test input
input = createSubPathTestInput(pattern, resource)
})
AfterEach(func() {
if needsCleanup {
resource.cleanupResource(driver, pattern)
}
})
testSubPath(&input)
})
}
type subPathTestResource struct {
genericVolumeTestResource
config *PerTestConfig
testCleanup func()
cs clientset.Interface
resource *genericVolumeTestResource
roVolSource *v1.VolumeSource
pod *v1.Pod
formatPod *v1.Pod
}
subPathDir string
filePathInSubpath string
filePathInVolume string
)
var _ TestResource = &subPathTestResource{}
// No preconditions to test. Normally they would be in a BeforeEach here.
func (s *subPathTestResource) setupResource(driver TestDriver, pattern testpatterns.TestPattern) {
s.driver = driver
dInfo := s.driver.GetDriverInfo()
f := dInfo.Config.Framework
// This intentionally comes after checking the preconditions because it
// registers its own BeforeEach which creates the namespace. Beware that it
// also registers an AfterEach which renders f unusable. Any code using
// f must run inside an It or Context callback.
f := framework.NewDefaultFramework("provisioning")
init := func() {
cs = f.ClientSet // needed for cleanup, f.ClientSet itself gets reset too early
// Now do the more expensive test initialization.
config, testCleanup = driver.PrepareTest(f)
fsType := pattern.FsType
volType := pattern.VolType
// Setup generic test resource
s.genericVolumeTestResource.setupResource(driver, pattern)
resource = createGenericVolumeTestResource(driver, config, pattern)
// Setup subPath test dependent resource
roVolSource = nil
switch volType {
case testpatterns.InlineVolume:
if iDriver, ok := driver.(InlineVolumeTestDriver); ok {
s.roVolSource = iDriver.GetVolumeSource(true, fsType, s.genericVolumeTestResource.volume)
roVolSource = iDriver.GetVolumeSource(true, fsType, resource.volume)
}
case testpatterns.PreprovisionedPV:
s.roVolSource = &v1.VolumeSource{
roVolSource = &v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: s.genericVolumeTestResource.pvc.Name,
ClaimName: resource.pvc.Name,
ReadOnly: true,
},
}
case testpatterns.DynamicPV:
s.roVolSource = &v1.VolumeSource{
roVolSource = &v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: s.genericVolumeTestResource.pvc.Name,
ClaimName: resource.pvc.Name,
ReadOnly: true,
},
}
@@ -171,245 +130,294 @@ func (s *subPathTestResource) setupResource(driver TestDriver, pattern testpatte
}
subPath := f.Namespace.Name
config := dInfo.Config
s.pod = SubpathTestPod(f, subPath, s.volType, s.volSource, true)
s.pod.Spec.NodeName = config.ClientNodeName
s.pod.Spec.NodeSelector = config.ClientNodeSelector
pod = SubpathTestPod(f, subPath, resource.volType, resource.volSource, true)
pod.Spec.NodeName = config.ClientNodeName
pod.Spec.NodeSelector = config.ClientNodeSelector
s.formatPod = volumeFormatPod(f, s.volSource)
s.formatPod.Spec.NodeName = config.ClientNodeName
s.formatPod.Spec.NodeSelector = config.ClientNodeSelector
formatPod = volumeFormatPod(f, resource.volSource)
formatPod.Spec.NodeName = config.ClientNodeName
formatPod.Spec.NodeSelector = config.ClientNodeSelector
subPathDir = filepath.Join(volumePath, subPath)
filePathInSubpath = filepath.Join(volumePath, fileName)
filePathInVolume = filepath.Join(subPathDir, fileName)
}
func (s *subPathTestResource) cleanupResource(driver TestDriver, pattern testpatterns.TestPattern) {
dInfo := driver.GetDriverInfo()
f := dInfo.Config.Framework
// Cleanup subPath test dependent resource
cleanup := func() {
if pod != nil {
By("Deleting pod")
err := framework.DeletePodWithWait(f, f.ClientSet, s.pod)
err := framework.DeletePodWithWait(f, cs, pod)
Expect(err).ToNot(HaveOccurred(), "while deleting pod")
// Cleanup generic test resource
s.genericVolumeTestResource.cleanupResource(driver, pattern)
pod = nil
}
type subPathTestInput struct {
f *framework.Framework
subPathDir string
filePathInSubpath string
filePathInVolume string
volType string
pod *v1.Pod
formatPod *v1.Pod
volSource *v1.VolumeSource
roVol *v1.VolumeSource
if resource != nil {
resource.cleanupResource()
resource = nil
}
if testCleanup != nil {
testCleanup()
testCleanup = nil
}
}
func testSubPath(input *subPathTestInput) {
It("should support non-existent path", func() {
init()
defer cleanup()
// Write the file in the subPath from init container 1
setWriteCommand(input.filePathInSubpath, &input.pod.Spec.InitContainers[1])
setWriteCommand(filePathInSubpath, &pod.Spec.InitContainers[1])
// Read it from outside the subPath from container 1
testReadFile(input.f, input.filePathInVolume, input.pod, 1)
testReadFile(f, filePathInVolume, pod, 1)
})
It("should support existing directory", func() {
init()
defer cleanup()
// Create the directory
setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s", input.subPathDir))
setInitCommand(pod, fmt.Sprintf("mkdir -p %s", subPathDir))
// Write the file in the subPath from init container 1
setWriteCommand(input.filePathInSubpath, &input.pod.Spec.InitContainers[1])
setWriteCommand(filePathInSubpath, &pod.Spec.InitContainers[1])
// Read it from outside the subPath from container 1
testReadFile(input.f, input.filePathInVolume, input.pod, 1)
testReadFile(f, filePathInVolume, pod, 1)
})
It("should support existing single file", func() {
init()
defer cleanup()
// Create the file in the init container
setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s; echo \"mount-tester new file\" > %s", input.subPathDir, input.filePathInVolume))
setInitCommand(pod, fmt.Sprintf("mkdir -p %s; echo \"mount-tester new file\" > %s", subPathDir, filePathInVolume))
// Read it from inside the subPath from container 0
testReadFile(input.f, input.filePathInSubpath, input.pod, 0)
testReadFile(f, filePathInSubpath, pod, 0)
})
It("should support file as subpath", func() {
// Create the file in the init container
setInitCommand(input.pod, fmt.Sprintf("echo %s > %s", input.f.Namespace.Name, input.subPathDir))
init()
defer cleanup()
TestBasicSubpath(input.f, input.f.Namespace.Name, input.pod)
// Create the file in the init container
setInitCommand(pod, fmt.Sprintf("echo %s > %s", f.Namespace.Name, subPathDir))
TestBasicSubpath(f, f.Namespace.Name, pod)
})
It("should fail if subpath directory is outside the volume [Slow]", func() {
init()
defer cleanup()
// Create the subpath outside the volume
setInitCommand(input.pod, fmt.Sprintf("ln -s /bin %s", input.subPathDir))
setInitCommand(pod, fmt.Sprintf("ln -s /bin %s", subPathDir))
// Pod should fail
testPodFailSubpath(input.f, input.pod, false)
testPodFailSubpath(f, pod, false)
})
It("should fail if subpath file is outside the volume [Slow]", func() {
init()
defer cleanup()
// Create the subpath outside the volume
setInitCommand(input.pod, fmt.Sprintf("ln -s /bin/sh %s", input.subPathDir))
setInitCommand(pod, fmt.Sprintf("ln -s /bin/sh %s", subPathDir))
// Pod should fail
testPodFailSubpath(input.f, input.pod, false)
testPodFailSubpath(f, pod, false)
})
It("should fail if non-existent subpath is outside the volume [Slow]", func() {
init()
defer cleanup()
// Create the subpath outside the volume
setInitCommand(input.pod, fmt.Sprintf("ln -s /bin/notanexistingpath %s", input.subPathDir))
setInitCommand(pod, fmt.Sprintf("ln -s /bin/notanexistingpath %s", subPathDir))
// Pod should fail
testPodFailSubpath(input.f, input.pod, false)
testPodFailSubpath(f, pod, false)
})
It("should fail if subpath with backstepping is outside the volume [Slow]", func() {
init()
defer cleanup()
// Create the subpath outside the volume
setInitCommand(input.pod, fmt.Sprintf("ln -s ../ %s", input.subPathDir))
setInitCommand(pod, fmt.Sprintf("ln -s ../ %s", subPathDir))
// Pod should fail
testPodFailSubpath(input.f, input.pod, false)
testPodFailSubpath(f, pod, false)
})
It("should support creating multiple subpath from same volumes [Slow]", func() {
init()
defer cleanup()
subpathDir1 := filepath.Join(volumePath, "subpath1")
subpathDir2 := filepath.Join(volumePath, "subpath2")
filepath1 := filepath.Join("/test-subpath1", fileName)
filepath2 := filepath.Join("/test-subpath2", fileName)
setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s; mkdir -p %s", subpathDir1, subpathDir2))
setInitCommand(pod, fmt.Sprintf("mkdir -p %s; mkdir -p %s", subpathDir1, subpathDir2))
addSubpathVolumeContainer(&input.pod.Spec.Containers[0], v1.VolumeMount{
addSubpathVolumeContainer(&pod.Spec.Containers[0], v1.VolumeMount{
Name: volumeName,
MountPath: "/test-subpath1",
SubPath: "subpath1",
})
addSubpathVolumeContainer(&input.pod.Spec.Containers[0], v1.VolumeMount{
addSubpathVolumeContainer(&pod.Spec.Containers[0], v1.VolumeMount{
Name: volumeName,
MountPath: "/test-subpath2",
SubPath: "subpath2",
})
// Write the files from container 0 and instantly read them back
addMultipleWrites(&input.pod.Spec.Containers[0], filepath1, filepath2)
testMultipleReads(input.f, input.pod, 0, filepath1, filepath2)
addMultipleWrites(&pod.Spec.Containers[0], filepath1, filepath2)
testMultipleReads(f, pod, 0, filepath1, filepath2)
})
It("should support restarting containers using directory as subpath [Slow]", func() {
// Create the directory
setInitCommand(input.pod, fmt.Sprintf("mkdir -p %v; touch %v", input.subPathDir, probeFilePath))
init()
defer cleanup()
testPodContainerRestart(input.f, input.pod)
// Create the directory
setInitCommand(pod, fmt.Sprintf("mkdir -p %v; touch %v", subPathDir, probeFilePath))
testPodContainerRestart(f, pod)
})
It("should support restarting containers using file as subpath [Slow]", func() {
// Create the file
setInitCommand(input.pod, fmt.Sprintf("touch %v; touch %v", input.subPathDir, probeFilePath))
init()
defer cleanup()
testPodContainerRestart(input.f, input.pod)
// Create the file
setInitCommand(pod, fmt.Sprintf("touch %v; touch %v", subPathDir, probeFilePath))
testPodContainerRestart(f, pod)
})
It("should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow]", func() {
testSubpathReconstruction(input.f, input.pod, false)
init()
defer cleanup()
testSubpathReconstruction(f, pod, false)
})
It("should unmount if pod is force deleted while kubelet is down [Disruptive][Slow]", func() {
if strings.HasPrefix(input.volType, "hostPath") || strings.HasPrefix(input.volType, "csi-hostpath") {
init()
defer cleanup()
if strings.HasPrefix(resource.volType, "hostPath") || strings.HasPrefix(resource.volType, "csi-hostpath") {
// TODO: This skip should be removed once #61446 is fixed
framework.Skipf("%s volume type does not support reconstruction, skipping", input.volType)
framework.Skipf("%s volume type does not support reconstruction, skipping", resource.volType)
}
testSubpathReconstruction(input.f, input.pod, true)
testSubpathReconstruction(f, pod, true)
})
It("should support readOnly directory specified in the volumeMount", func() {
init()
defer cleanup()
// Create the directory
setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s", input.subPathDir))
setInitCommand(pod, fmt.Sprintf("mkdir -p %s", subPathDir))
// Write the file in the volume from init container 2
setWriteCommand(input.filePathInVolume, &input.pod.Spec.InitContainers[2])
setWriteCommand(filePathInVolume, &pod.Spec.InitContainers[2])
// Read it from inside the subPath from container 0
input.pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true
testReadFile(input.f, input.filePathInSubpath, input.pod, 0)
pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true
testReadFile(f, filePathInSubpath, pod, 0)
})
It("should support readOnly file specified in the volumeMount", func() {
init()
defer cleanup()
// Create the file
setInitCommand(input.pod, fmt.Sprintf("touch %s", input.subPathDir))
setInitCommand(pod, fmt.Sprintf("touch %s", subPathDir))
// Write the file in the volume from init container 2
setWriteCommand(input.subPathDir, &input.pod.Spec.InitContainers[2])
setWriteCommand(subPathDir, &pod.Spec.InitContainers[2])
// Read it from inside the subPath from container 0
input.pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true
testReadFile(input.f, volumePath, input.pod, 0)
pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true
testReadFile(f, volumePath, pod, 0)
})
It("should support existing directories when readOnly specified in the volumeSource", func() {
if input.roVol == nil {
framework.Skipf("Volume type %v doesn't support readOnly source", input.volType)
init()
defer cleanup()
if roVolSource == nil {
framework.Skipf("Volume type %v doesn't support readOnly source", resource.volType)
}
pod := input.pod.DeepCopy()
origpod := pod.DeepCopy()
// Create the directory
setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s", input.subPathDir))
setInitCommand(pod, fmt.Sprintf("mkdir -p %s", subPathDir))
// Write the file in the subPath from init container 1
setWriteCommand(input.filePathInSubpath, &input.pod.Spec.InitContainers[1])
setWriteCommand(filePathInSubpath, &pod.Spec.InitContainers[1])
// Read it from inside the subPath from container 0
testReadFile(input.f, input.filePathInSubpath, input.pod, 0)
testReadFile(f, filePathInSubpath, pod, 0)
// Reset the pod
input.pod = pod
pod = origpod
// Set volume source to read only
input.pod.Spec.Volumes[0].VolumeSource = *input.roVol
pod.Spec.Volumes[0].VolumeSource = *roVolSource
// Read it from inside the subPath from container 0
testReadFile(input.f, input.filePathInSubpath, input.pod, 0)
testReadFile(f, filePathInSubpath, pod, 0)
})
It("should verify container cannot write to subpath readonly volumes", func() {
if input.roVol == nil {
framework.Skipf("Volume type %v doesn't support readOnly source", input.volType)
init()
defer cleanup()
if roVolSource == nil {
framework.Skipf("Volume type %v doesn't support readOnly source", resource.volType)
}
// Format the volume while it's writable
formatVolume(input.f, input.formatPod)
formatVolume(f, formatPod)
// Set volume source to read only
input.pod.Spec.Volumes[0].VolumeSource = *input.roVol
pod.Spec.Volumes[0].VolumeSource = *roVolSource
// Write the file in the volume from container 0
setWriteCommand(input.subPathDir, &input.pod.Spec.Containers[0])
setWriteCommand(subPathDir, &pod.Spec.Containers[0])
// Pod should fail
testPodFailSubpath(input.f, input.pod, true)
testPodFailSubpath(f, pod, true)
})
It("should be able to unmount after the subpath directory is deleted", func() {
// Change volume container to busybox so we can exec later
input.pod.Spec.Containers[1].Image = imageutils.GetE2EImage(imageutils.BusyBox)
input.pod.Spec.Containers[1].Command = []string{"/bin/sh", "-ec", "sleep 100000"}
init()
defer cleanup()
By(fmt.Sprintf("Creating pod %s", input.pod.Name))
removeUnusedContainers(input.pod)
pod, err := input.f.ClientSet.CoreV1().Pods(input.f.Namespace.Name).Create(input.pod)
// Change volume container to busybox so we can exec later
pod.Spec.Containers[1].Image = imageutils.GetE2EImage(imageutils.BusyBox)
pod.Spec.Containers[1].Command = []string{"/bin/sh", "-ec", "sleep 100000"}
By(fmt.Sprintf("Creating pod %s", pod.Name))
removeUnusedContainers(pod)
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).ToNot(HaveOccurred(), "while creating pod")
defer func() {
By(fmt.Sprintf("Deleting pod %s", pod.Name))
framework.DeletePodWithWait(input.f, input.f.ClientSet, pod)
framework.DeletePodWithWait(f, f.ClientSet, pod)
}()
// Wait for pod to be running
err = framework.WaitForPodRunningInNamespace(input.f.ClientSet, pod)
err = framework.WaitForPodRunningInNamespace(f.ClientSet, pod)
Expect(err).ToNot(HaveOccurred(), "while waiting for pod to be running")
// Exec into container that mounted the volume, delete subpath directory
rmCmd := fmt.Sprintf("rm -rf %s", input.subPathDir)
rmCmd := fmt.Sprintf("rm -rf %s", subPathDir)
_, err = podContainerExec(pod, 1, rmCmd)
Expect(err).ToNot(HaveOccurred(), "while removing subpath directory")

View File

@@ -25,17 +25,29 @@ import (
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
)
// TestDriver represents an interface for a driver to be tested in TestSuite
// TestDriver represents an interface for a driver to be tested in TestSuite.
// Except for GetDriverInfo, all methods will be called at test runtime and thus
// can use framework.Skipf, framework.Fatal, Gomega assertions, etc.
type TestDriver interface {
// GetDriverInfo returns DriverInfo for the TestDriver
// GetDriverInfo returns DriverInfo for the TestDriver. This must be static
// information.
GetDriverInfo() *DriverInfo
// CreateDriver creates all driver resources that is required for TestDriver method
// except CreateVolume
CreateDriver()
// CreateDriver cleanup all the resources that is created in CreateDriver
CleanupDriver()
// SkipUnsupportedTest skips test in Testpattern is not suitable to test with the TestDriver
// SkipUnsupportedTest skips test if Testpattern is not
// suitable to test with the TestDriver. It gets called after
// parsing parameters of the test suite and before the
// framework is initialized. Cheap tests that just check
// parameters like the cloud provider can and should be
// done in SkipUnsupportedTest to avoid setting up more
// expensive resources like framework.Framework. Tests that
// depend on a connection to the cluster can be done in
// PrepareTest once the framework is ready.
SkipUnsupportedTest(testpatterns.TestPattern)
// PrepareTest is called at test execution time each time a new test case is about to start.
// It sets up all necessary resources and returns the per-test configuration
// plus a cleanup function that frees all allocated resources.
PrepareTest(f *framework.Framework) (*PerTestConfig, func())
}
// TestVolume is the result of PreprovisionedVolumeTestDriver.CreateVolume.
@@ -49,7 +61,7 @@ type TestVolume interface {
type PreprovisionedVolumeTestDriver interface {
TestDriver
// CreateVolume creates a pre-provisioned volume of the desired volume type.
CreateVolume(volumeType testpatterns.TestVolType) TestVolume
CreateVolume(config *PerTestConfig, volumeType testpatterns.TestVolType) TestVolume
}
// InlineVolumeTestDriver represents an interface for a TestDriver that supports InlineVolume
@@ -68,7 +80,6 @@ type PreprovisionedPVTestDriver interface {
// GetPersistentVolumeSource returns a PersistentVolumeSource with volume node affinity for pre-provisioned Persistent Volume.
// It will set readOnly and fsType to the PersistentVolumeSource, if TestDriver supports both of them.
// It will return nil, if the TestDriver doesn't support either of the parameters.
// Volume node affinity is optional, it will be nil for volumes which does not have volume node affinity.
GetPersistentVolumeSource(readOnly bool, fsType string, testVolume TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity)
}
@@ -78,7 +89,7 @@ type DynamicPVTestDriver interface {
// GetDynamicProvisionStorageClass returns a StorageClass dynamic provision Persistent Volume.
// It will set fsType to the StorageClass, if TestDriver supports it.
// It will return nil, if the TestDriver doesn't support it.
GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass
GetDynamicProvisionStorageClass(config *PerTestConfig, fsType string) *storagev1.StorageClass
// GetClaimSize returns the size of the volume that is to be provisioned ("5Gi", "1Mi").
// The size must be chosen so that the resulting volume is large enough for all
@@ -91,7 +102,7 @@ type SnapshottableTestDriver interface {
TestDriver
// GetSnapshotClass returns a SnapshotClass to create snapshot.
// It will return nil, if the TestDriver doesn't support it.
GetSnapshotClass() *unstructured.Unstructured
GetSnapshotClass(config *PerTestConfig) *unstructured.Unstructured
}
// Capability represents a feature that a volume plugin supports
@@ -112,7 +123,7 @@ const (
CapMultiPODs Capability = "multipods"
)
// DriverInfo represents a combination of parameters to be used in implementation of TestDriver
// DriverInfo represents static information about a TestDriver.
type DriverInfo struct {
Name string // Name of the driver
FeatureTag string // FeatureTag for the driver
@@ -122,14 +133,15 @@ type DriverInfo struct {
SupportedMountOption sets.String // Map of string for supported mount option
RequiredMountOption sets.String // Map of string for required mount option (Optional)
Capabilities map[Capability]bool // Map that represents plugin capabilities
Config TestConfig // Test configuration for the current test.
}
// TestConfig represents parameters that control test execution.
// They can still be modified after defining tests, for example
// in a BeforeEach or when creating the driver.
type TestConfig struct {
// PerTestConfig represents parameters that control test execution.
// One instance gets allocated for each test and is then passed
// via pointer to functions involved in the test.
type PerTestConfig struct {
// The test driver for the test.
Driver TestDriver
// Some short word that gets inserted into dynamically
// generated entities (pods, paths) as first part of the name
// to make debugging easier. Can be the same for different
@@ -154,8 +166,9 @@ type TestConfig struct {
// the configuration that then has to be used to run tests.
// The values above are ignored for such tests.
ServerConfig *framework.VolumeTestConfig
// TopologyEnabled indicates that the Topology feature gate
// should be enabled in external-provisioner
TopologyEnabled bool
}
// GetUniqueDriverName returns unique driver name that can be used parallelly in tests
func (config *PerTestConfig) GetUniqueDriverName() string {
return config.Driver.GetDriverInfo().Name + "-" + config.Framework.UniqueName
}

View File

@@ -74,87 +74,59 @@ func (t *volumeIOTestSuite) getTestSuiteInfo() TestSuiteInfo {
return t.tsInfo
}
func (t *volumeIOTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver TestDriver) {
}
func (t *volumeIOTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
var (
dInfo = driver.GetDriverInfo()
config *PerTestConfig
testCleanup func()
resource *genericVolumeTestResource
)
func createVolumeIOTestInput(pattern testpatterns.TestPattern, resource genericVolumeTestResource) volumeIOTestInput {
var fsGroup *int64
driver := resource.driver
dInfo := driver.GetDriverInfo()
f := dInfo.Config.Framework
fileSizes := createFileSizes(dInfo.MaxFileSize)
volSource := resource.volSource
// No preconditions to test. Normally they would be in a BeforeEach here.
if volSource == nil {
// This intentionally comes after checking the preconditions because it
// registers its own BeforeEach which creates the namespace. Beware that it
// also registers an AfterEach which renders f unusable. Any code using
// f must run inside an It or Context callback.
f := framework.NewDefaultFramework("volumeio")
init := func() {
// Now do the more expensive test initialization.
config, testCleanup = driver.PrepareTest(f)
resource = createGenericVolumeTestResource(driver, config, pattern)
if resource.volSource == nil {
framework.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name)
}
}
cleanup := func() {
if resource != nil {
resource.cleanupResource()
resource = nil
}
if testCleanup != nil {
testCleanup()
testCleanup = nil
}
}
It("should write files of various sizes, verify size, validate content [Slow]", func() {
init()
defer cleanup()
cs := f.ClientSet
fileSizes := createFileSizes(dInfo.MaxFileSize)
testFile := fmt.Sprintf("%s_io_test_%s", dInfo.Name, f.Namespace.Name)
var fsGroup *int64
if dInfo.Capabilities[CapFsGroup] {
fsGroupVal := int64(1234)
fsGroup = &fsGroupVal
}
return volumeIOTestInput{
f: f,
name: dInfo.Name,
config: &dInfo.Config,
volSource: *volSource,
testFile: fmt.Sprintf("%s_io_test_%s", dInfo.Name, f.Namespace.Name),
podSec: v1.PodSecurityContext{
podSec := v1.PodSecurityContext{
FSGroup: fsGroup,
},
fileSizes: fileSizes,
}
}
func (t *volumeIOTestSuite) execTest(driver TestDriver, pattern testpatterns.TestPattern) {
Context(getTestNameStr(t, pattern), func() {
var (
resource genericVolumeTestResource
input volumeIOTestInput
needsCleanup bool
)
BeforeEach(func() {
needsCleanup = false
// Skip unsupported tests to avoid unnecessary resource initialization
skipUnsupportedTest(t, driver, pattern)
needsCleanup = true
// Setup test resource for driver and testpattern
resource = genericVolumeTestResource{}
resource.setupResource(driver, pattern)
// Create test input
input = createVolumeIOTestInput(pattern, resource)
})
AfterEach(func() {
if needsCleanup {
resource.cleanupResource(driver, pattern)
}
})
execTestVolumeIO(&input)
})
}
type volumeIOTestInput struct {
f *framework.Framework
name string
config *TestConfig
volSource v1.VolumeSource
testFile string
podSec v1.PodSecurityContext
fileSizes []int64
}
func execTestVolumeIO(input *volumeIOTestInput) {
It("should write files of various sizes, verify size, validate content [Slow]", func() {
f := input.f
cs := f.ClientSet
err := testVolumeIO(f, cs, convertTestConfig(input.config), input.volSource, &input.podSec, input.testFile, input.fileSizes)
err := testVolumeIO(f, cs, convertTestConfig(config), *resource.volSource, &podSec, testFile, fileSizes)
Expect(err).NotTo(HaveOccurred())
})
}

View File

@@ -61,105 +61,32 @@ func (t *volumeModeTestSuite) getTestSuiteInfo() TestSuiteInfo {
return t.tsInfo
}
func (t *volumeModeTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver TestDriver) {
}
func createVolumeModeTestInput(pattern testpatterns.TestPattern, resource volumeModeTestResource) volumeModeTestInput {
driver := resource.driver
dInfo := driver.GetDriverInfo()
f := dInfo.Config.Framework
return volumeModeTestInput{
f: f,
sc: resource.sc,
pvc: resource.pvc,
pv: resource.pv,
testVolType: pattern.VolType,
nodeName: dInfo.Config.ClientNodeName,
volMode: pattern.VolMode,
isBlockSupported: dInfo.Capabilities[CapBlock],
}
}
func getVolumeModeTestFunc(pattern testpatterns.TestPattern, driver TestDriver) func(*volumeModeTestInput) {
dInfo := driver.GetDriverInfo()
isBlockSupported := dInfo.Capabilities[CapBlock]
volMode := pattern.VolMode
volType := pattern.VolType
switch volType {
case testpatterns.PreprovisionedPV:
if volMode == v1.PersistentVolumeBlock && !isBlockSupported {
return testVolumeModeFailForPreprovisionedPV
}
return testVolumeModeSuccessForPreprovisionedPV
case testpatterns.DynamicPV:
if volMode == v1.PersistentVolumeBlock && !isBlockSupported {
return testVolumeModeFailForDynamicPV
}
return testVolumeModeSuccessForDynamicPV
default:
framework.Failf("Volume mode test doesn't support volType: %v", volType)
}
return nil
}
func (t *volumeModeTestSuite) execTest(driver TestDriver, pattern testpatterns.TestPattern) {
Context(getTestNameStr(t, pattern), func() {
func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
var (
resource volumeModeTestResource
input volumeModeTestInput
testFunc func(*volumeModeTestInput)
needsCleanup bool
)
testFunc = getVolumeModeTestFunc(pattern, driver)
BeforeEach(func() {
needsCleanup = false
// Skip unsupported tests to avoid unnecessary resource initialization
skipUnsupportedTest(t, driver, pattern)
needsCleanup = true
// Setup test resource for driver and testpattern
resource = volumeModeTestResource{}
resource.setupResource(driver, pattern)
// Create test input
input = createVolumeModeTestInput(pattern, resource)
})
AfterEach(func() {
if needsCleanup {
resource.cleanupResource(driver, pattern)
}
})
testFunc(&input)
})
}
type volumeModeTestResource struct {
driver TestDriver
dInfo = driver.GetDriverInfo()
config *PerTestConfig
testCleanup func()
sc *storagev1.StorageClass
pvc *v1.PersistentVolumeClaim
pv *v1.PersistentVolume
volume TestVolume
}
)
var _ TestResource = &volumeModeTestResource{}
// No preconditions to test. Normally they would be in a BeforeEach here.
// This intentionally comes after checking the preconditions because it
// registers its own BeforeEach which creates the namespace. Beware that it
// also registers an AfterEach which renders f unusable. Any code using
// f must run inside an It or Context callback.
f := framework.NewDefaultFramework("volumemode")
init := func() {
// Now do the more expensive test initialization.
config, testCleanup = driver.PrepareTest(f)
func (s *volumeModeTestResource) setupResource(driver TestDriver, pattern testpatterns.TestPattern) {
s.driver = driver
dInfo := driver.GetDriverInfo()
f := dInfo.Config.Framework
ns := f.Namespace
fsType := pattern.FsType
volBindMode := storagev1.VolumeBindingImmediate
volMode := pattern.VolMode
volType := pattern.VolType
var (
scName string
@@ -168,211 +95,219 @@ func (s *volumeModeTestResource) setupResource(driver TestDriver, pattern testpa
)
// Create volume for pre-provisioned volume tests
s.volume = CreateVolume(driver, volType)
volume = CreateVolume(driver, config, pattern.VolType)
switch volType {
switch pattern.VolType {
case testpatterns.PreprovisionedPV:
if volMode == v1.PersistentVolumeBlock {
if pattern.VolMode == v1.PersistentVolumeBlock {
scName = fmt.Sprintf("%s-%s-sc-for-block", ns.Name, dInfo.Name)
} else if volMode == v1.PersistentVolumeFilesystem {
} else if pattern.VolMode == v1.PersistentVolumeFilesystem {
scName = fmt.Sprintf("%s-%s-sc-for-file", ns.Name, dInfo.Name)
}
if pDriver, ok := driver.(PreprovisionedPVTestDriver); ok {
pvSource, volumeNodeAffinity = pDriver.GetPersistentVolumeSource(false, fsType, s.volume)
pvSource, volumeNodeAffinity = pDriver.GetPersistentVolumeSource(false, fsType, volume)
if pvSource == nil {
framework.Skipf("Driver %q does not define PersistentVolumeSource - skipping", dInfo.Name)
}
sc, pvConfig, pvcConfig := generateConfigsForPreprovisionedPVTest(scName, volBindMode, volMode, *pvSource, volumeNodeAffinity)
s.sc = sc
s.pv = framework.MakePersistentVolume(pvConfig)
s.pvc = framework.MakePersistentVolumeClaim(pvcConfig, ns.Name)
storageClass, pvConfig, pvcConfig := generateConfigsForPreprovisionedPVTest(scName, volBindMode, pattern.VolMode, *pvSource, volumeNodeAffinity)
sc = storageClass
pv = framework.MakePersistentVolume(pvConfig)
pvc = framework.MakePersistentVolumeClaim(pvcConfig, ns.Name)
}
case testpatterns.DynamicPV:
if dDriver, ok := driver.(DynamicPVTestDriver); ok {
s.sc = dDriver.GetDynamicProvisionStorageClass(fsType)
if s.sc == nil {
sc = dDriver.GetDynamicProvisionStorageClass(config, fsType)
if sc == nil {
framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", dInfo.Name)
}
s.sc.VolumeBindingMode = &volBindMode
sc.VolumeBindingMode = &volBindMode
claimSize := dDriver.GetClaimSize()
s.pvc = getClaim(claimSize, ns.Name)
s.pvc.Spec.StorageClassName = &s.sc.Name
s.pvc.Spec.VolumeMode = &volMode
pvc = getClaim(claimSize, ns.Name)
pvc.Spec.StorageClassName = &sc.Name
pvc.Spec.VolumeMode = &pattern.VolMode
}
default:
framework.Failf("Volume mode test doesn't support: %s", volType)
framework.Failf("Volume mode test doesn't support: %s", pattern.VolType)
}
}
func (s *volumeModeTestResource) cleanupResource(driver TestDriver, pattern testpatterns.TestPattern) {
dInfo := driver.GetDriverInfo()
f := dInfo.Config.Framework
cs := f.ClientSet
ns := f.Namespace
cleanup := func() {
if pv != nil || pvc != nil {
By("Deleting pv and pvc")
errs := framework.PVPVCCleanup(cs, ns.Name, s.pv, s.pvc)
errs := framework.PVPVCCleanup(f.ClientSet, f.Namespace.Name, pv, pvc)
if len(errs) > 0 {
framework.Failf("Failed to delete PV and/or PVC: %v", utilerrors.NewAggregate(errs))
framework.Logf("Failed to delete PV and/or PVC: %v", utilerrors.NewAggregate(errs))
}
pv = nil
pvc = nil
}
if sc != nil {
By("Deleting sc")
if s.sc != nil {
deleteStorageClass(cs, s.sc.Name)
deleteStorageClass(f.ClientSet, sc.Name)
sc = nil
}
// Cleanup volume for pre-provisioned volume tests
if s.volume != nil {
s.volume.DeleteVolume()
if volume != nil {
volume.DeleteVolume()
volume = nil
}
if testCleanup != nil {
testCleanup()
testCleanup = nil
}
}
type volumeModeTestInput struct {
f *framework.Framework
sc *storagev1.StorageClass
pvc *v1.PersistentVolumeClaim
pv *v1.PersistentVolume
testVolType testpatterns.TestVolType
nodeName string
volMode v1.PersistentVolumeMode
isBlockSupported bool
}
func testVolumeModeFailForPreprovisionedPV(input *volumeModeTestInput) {
// We register different tests depending on the drive
isBlockSupported := dInfo.Capabilities[CapBlock]
switch pattern.VolType {
case testpatterns.PreprovisionedPV:
if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported {
It("should fail to create pod by failing to mount volume", func() {
f := input.f
init()
defer cleanup()
cs := f.ClientSet
ns := f.Namespace
var err error
By("Creating sc")
input.sc, err = cs.StorageV1().StorageClasses().Create(input.sc)
sc, err = cs.StorageV1().StorageClasses().Create(sc)
Expect(err).NotTo(HaveOccurred())
By("Creating pv and pvc")
input.pv, err = cs.CoreV1().PersistentVolumes().Create(input.pv)
pv, err = cs.CoreV1().PersistentVolumes().Create(pv)
Expect(err).NotTo(HaveOccurred())
// Prebind pv
input.pvc.Spec.VolumeName = input.pv.Name
input.pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(input.pvc)
pvc.Spec.VolumeName = pv.Name
pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvc)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(framework.WaitOnPVandPVC(cs, ns.Name, input.pv, input.pvc))
framework.ExpectNoError(framework.WaitOnPVandPVC(cs, ns.Name, pv, pvc))
By("Creating pod")
pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{input.pvc},
pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{pvc},
false, "", false, false, framework.SELinuxLabel,
nil, input.nodeName, framework.PodStartTimeout)
nil, config.ClientNodeName, framework.PodStartTimeout)
defer func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod))
}()
Expect(err).To(HaveOccurred())
})
}
func testVolumeModeSuccessForPreprovisionedPV(input *volumeModeTestInput) {
} else {
It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() {
f := input.f
init()
defer cleanup()
cs := f.ClientSet
ns := f.Namespace
var err error
By("Creating sc")
input.sc, err = cs.StorageV1().StorageClasses().Create(input.sc)
sc, err = cs.StorageV1().StorageClasses().Create(sc)
Expect(err).NotTo(HaveOccurred())
By("Creating pv and pvc")
input.pv, err = cs.CoreV1().PersistentVolumes().Create(input.pv)
pv, err = cs.CoreV1().PersistentVolumes().Create(pv)
Expect(err).NotTo(HaveOccurred())
// Prebind pv
input.pvc.Spec.VolumeName = input.pv.Name
input.pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(input.pvc)
pvc.Spec.VolumeName = pv.Name
pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvc)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(framework.WaitOnPVandPVC(cs, ns.Name, input.pv, input.pvc))
framework.ExpectNoError(framework.WaitOnPVandPVC(cs, ns.Name, pv, pvc))
By("Creating pod")
pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{input.pvc},
pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{pvc},
false, "", false, false, framework.SELinuxLabel,
nil, input.nodeName, framework.PodStartTimeout)
nil, config.ClientNodeName, framework.PodStartTimeout)
defer func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod))
}()
Expect(err).NotTo(HaveOccurred())
By("Checking if persistent volume exists as expected volume mode")
utils.CheckVolumeModeOfPath(pod, input.volMode, "/mnt/volume1")
utils.CheckVolumeModeOfPath(pod, pattern.VolMode, "/mnt/volume1")
By("Checking if read/write to persistent volume works properly")
utils.CheckReadWriteToPath(pod, input.volMode, "/mnt/volume1")
utils.CheckReadWriteToPath(pod, pattern.VolMode, "/mnt/volume1")
})
// TODO(mkimuram): Add more tests
}
func testVolumeModeFailForDynamicPV(input *volumeModeTestInput) {
case testpatterns.DynamicPV:
if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported {
It("should fail in binding dynamic provisioned PV to PVC", func() {
f := input.f
init()
defer cleanup()
cs := f.ClientSet
ns := f.Namespace
var err error
By("Creating sc")
input.sc, err = cs.StorageV1().StorageClasses().Create(input.sc)
sc, err = cs.StorageV1().StorageClasses().Create(sc)
Expect(err).NotTo(HaveOccurred())
By("Creating pv and pvc")
input.pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(input.pvc)
pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvc)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, input.pvc.Namespace, input.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).To(HaveOccurred())
})
}
func testVolumeModeSuccessForDynamicPV(input *volumeModeTestInput) {
} else {
It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() {
f := input.f
init()
defer cleanup()
cs := f.ClientSet
ns := f.Namespace
var err error
By("Creating sc")
input.sc, err = cs.StorageV1().StorageClasses().Create(input.sc)
sc, err = cs.StorageV1().StorageClasses().Create(sc)
Expect(err).NotTo(HaveOccurred())
By("Creating pv and pvc")
input.pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(input.pvc)
pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvc)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, input.pvc.Namespace, input.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
input.pvc, err = cs.CoreV1().PersistentVolumeClaims(input.pvc.Namespace).Get(input.pvc.Name, metav1.GetOptions{})
pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
input.pv, err = cs.CoreV1().PersistentVolumes().Get(input.pvc.Spec.VolumeName, metav1.GetOptions{})
pv, err = cs.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
By("Creating pod")
pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{input.pvc},
pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{pvc},
false, "", false, false, framework.SELinuxLabel,
nil, input.nodeName, framework.PodStartTimeout)
nil, config.ClientNodeName, framework.PodStartTimeout)
defer func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod))
}()
Expect(err).NotTo(HaveOccurred())
By("Checking if persistent volume exists as expected volume mode")
utils.CheckVolumeModeOfPath(pod, input.volMode, "/mnt/volume1")
utils.CheckVolumeModeOfPath(pod, pattern.VolMode, "/mnt/volume1")
By("Checking if read/write to persistent volume works properly")
utils.CheckReadWriteToPath(pod, input.volMode, "/mnt/volume1")
utils.CheckReadWriteToPath(pod, pattern.VolMode, "/mnt/volume1")
})
// TODO(mkimuram): Add more tests
}
default:
framework.Failf("Volume mode test doesn't support volType: %v", pattern.VolType)
}
}
func generateConfigsForPreprovisionedPVTest(scName string, volBindMode storagev1.VolumeBindingMode,
volMode v1.PersistentVolumeMode, pvSource v1.PersistentVolumeSource, volumeNodeAffinity *v1.VolumeNodeAffinity) (*storagev1.StorageClass,

View File

@@ -89,101 +89,76 @@ func skipExecTest(driver TestDriver) {
}
}
func createVolumesTestInput(pattern testpatterns.TestPattern, resource genericVolumeTestResource) volumesTestInput {
var fsGroup *int64
driver := resource.driver
dInfo := driver.GetDriverInfo()
f := dInfo.Config.Framework
volSource := resource.volSource
func (t *volumesTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
var (
dInfo = driver.GetDriverInfo()
config *PerTestConfig
testCleanup func()
resource *genericVolumeTestResource
)
if volSource == nil {
// No preconditions to test. Normally they would be in a BeforeEach here.
// This intentionally comes after checking the preconditions because it
// registers its own BeforeEach which creates the namespace. Beware that it
// also registers an AfterEach which renders f unusable. Any code using
// f must run inside an It or Context callback.
f := framework.NewDefaultFramework("volumeio")
init := func() {
// Now do the more expensive test initialization.
config, testCleanup = driver.PrepareTest(f)
resource = createGenericVolumeTestResource(driver, config, pattern)
if resource.volSource == nil {
framework.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name)
}
if dInfo.Capabilities[CapFsGroup] {
fsGroupVal := int64(1234)
fsGroup = &fsGroupVal
}
return volumesTestInput{
f: f,
name: dInfo.Name,
config: &dInfo.Config,
fsGroup: fsGroup,
resource: resource,
fsType: pattern.FsType,
tests: []framework.VolumeTest{
cleanup := func() {
if resource != nil {
resource.cleanupResource()
resource = nil
}
if testCleanup != nil {
testCleanup()
testCleanup = nil
}
}
It("should be mountable", func() {
skipPersistenceTest(driver)
init()
defer func() {
framework.VolumeTestCleanup(f, convertTestConfig(config))
cleanup()
}()
tests := []framework.VolumeTest{
{
Volume: *volSource,
Volume: *resource.volSource,
File: "index.html",
// Must match content
ExpectedContent: fmt.Sprintf("Hello from %s from namespace %s",
dInfo.Name, f.Namespace.Name),
},
},
}
config := convertTestConfig(config)
framework.InjectHtml(f.ClientSet, config, tests[0].Volume, tests[0].ExpectedContent)
var fsGroup *int64
if dInfo.Capabilities[CapFsGroup] {
fsGroupVal := int64(1234)
fsGroup = &fsGroupVal
}
func (t *volumesTestSuite) execTest(driver TestDriver, pattern testpatterns.TestPattern) {
Context(getTestNameStr(t, pattern), func() {
var (
resource genericVolumeTestResource
input volumesTestInput
needsCleanup bool
)
BeforeEach(func() {
needsCleanup = false
// Skip unsupported tests to avoid unnecessary resource initialization
skipUnsupportedTest(t, driver, pattern)
needsCleanup = true
// Setup test resource for driver and testpattern
resource = genericVolumeTestResource{}
resource.setupResource(driver, pattern)
// Create test input
input = createVolumesTestInput(pattern, resource)
framework.TestVolumeClient(f.ClientSet, config, fsGroup, pattern.FsType, tests)
})
AfterEach(func() {
if needsCleanup {
resource.cleanupResource(driver, pattern)
}
})
testVolumes(&input)
})
}
type volumesTestInput struct {
f *framework.Framework
name string
config *TestConfig
fsGroup *int64
fsType string
tests []framework.VolumeTest
resource genericVolumeTestResource
}
func testVolumes(input *volumesTestInput) {
It("should be mountable", func() {
f := input.f
cs := f.ClientSet
defer framework.VolumeTestCleanup(f, convertTestConfig(input.config))
skipPersistenceTest(input.resource.driver)
volumeTest := input.tests
config := convertTestConfig(input.config)
framework.InjectHtml(cs, config, volumeTest[0].Volume, volumeTest[0].ExpectedContent)
framework.TestVolumeClient(cs, config, input.fsGroup, input.fsType, input.tests)
})
It("should allow exec of files on the volume", func() {
f := input.f
skipExecTest(input.resource.driver)
skipExecTest(driver)
init()
defer cleanup()
testScriptInPod(f, input.resource.volType, input.resource.volSource, input.resource.driver.GetDriverInfo().Config.ClientNodeSelector)
testScriptInPod(f, resource.volType, resource.volSource, config.ClientNodeSelector)
})
}

View File

@@ -212,21 +212,22 @@ func testZonalDelayedBinding(c clientset.Interface, ns string, specifyAllowedTop
action := "creating claims with class with waitForFirstConsumer"
suffix := "delayed"
var topoZone string
class := newStorageClass(test, ns, suffix)
test.Client = c
test.Class = newStorageClass(test, ns, suffix)
if specifyAllowedTopology {
action += " and allowedTopologies"
suffix += "-topo"
topoZone = getRandomClusterZone(c)
addSingleZoneAllowedTopologyToStorageClass(c, class, topoZone)
addSingleZoneAllowedTopologyToStorageClass(c, test.Class, topoZone)
}
By(action)
var claims []*v1.PersistentVolumeClaim
for i := 0; i < pvcCount; i++ {
claim := newClaim(test, ns, suffix)
claim.Spec.StorageClassName = &class.Name
claim.Spec.StorageClassName = &test.Class.Name
claims = append(claims, claim)
}
pvs, node := testsuites.TestBindingWaitForFirstConsumerMultiPVC(test, c, claims, class, nil /* node selector */, false /* expect unschedulable */)
pvs, node := test.TestBindingWaitForFirstConsumerMultiPVC(claims, nil /* node selector */, false /* expect unschedulable */)
if node == nil {
framework.Failf("unexpected nil node found")
}
@@ -440,10 +441,11 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
By("Testing " + test.Name)
suffix := fmt.Sprintf("%d", i)
class := newStorageClass(test, ns, suffix)
claim := newClaim(test, ns, suffix)
claim.Spec.StorageClassName = &class.Name
testsuites.TestDynamicProvisioning(test, c, claim, class)
test.Client = c
test.Class = newStorageClass(test, ns, suffix)
test.Claim = newClaim(test, ns, suffix)
test.Claim.Spec.StorageClassName = &test.Class.Name
test.TestDynamicProvisioning()
}
// Run the last test with storage.k8s.io/v1beta1 on pvc
@@ -455,9 +457,11 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
Expect(err).NotTo(HaveOccurred())
defer deleteStorageClass(c, class.Name)
claim := newClaim(*betaTest, ns, "beta")
claim.Spec.StorageClassName = &(class.Name)
testsuites.TestDynamicProvisioning(*betaTest, c, claim, nil)
betaTest.Client = c
betaTest.Class = nil
betaTest.Claim = newClaim(*betaTest, ns, "beta")
betaTest.Claim.Spec.StorageClassName = &(class.Name)
(*betaTest).TestDynamicProvisioning()
}
})
@@ -465,6 +469,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
framework.SkipUnlessProviderIs("gce", "gke")
test := testsuites.StorageClassTest{
Client: c,
Name: "HDD PD on GCE/GKE",
CloudProviders: []string{"gce", "gke"},
Provisioner: "kubernetes.io/gce-pd",
@@ -479,12 +484,12 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{})
},
}
class := newStorageClass(test, ns, "reclaimpolicy")
test.Class = newStorageClass(test, ns, "reclaimpolicy")
retain := v1.PersistentVolumeReclaimRetain
class.ReclaimPolicy = &retain
claim := newClaim(test, ns, "reclaimpolicy")
claim.Spec.StorageClassName = &class.Name
pv := testsuites.TestDynamicProvisioning(test, c, claim, class)
test.Class.ReclaimPolicy = &retain
test.Claim = newClaim(test, ns, "reclaimpolicy")
test.Claim.Spec.StorageClassName = &test.Class.Name
pv := test.TestDynamicProvisioning()
By(fmt.Sprintf("waiting for the provisioned PV %q to enter phase %s", pv.Name, v1.VolumeReleased))
framework.ExpectNoError(framework.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 1*time.Second, 30*time.Second))
@@ -718,17 +723,18 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
By("creating a StorageClass")
test := testsuites.StorageClassTest{
Client: c,
Name: "external provisioner test",
Provisioner: externalPluginName,
ClaimSize: "1500Mi",
ExpectedSize: "1500Mi",
}
class := newStorageClass(test, ns, "external")
claim := newClaim(test, ns, "external")
claim.Spec.StorageClassName = &(class.Name)
test.Class = newStorageClass(test, ns, "external")
test.Claim = newClaim(test, ns, "external")
test.Claim.Spec.StorageClassName = &test.Class.Name
By("creating a claim with a external provisioning annotation")
testsuites.TestDynamicProvisioning(test, c, claim, class)
test.TestDynamicProvisioning()
})
})
@@ -738,13 +744,14 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
By("creating a claim with no annotation")
test := testsuites.StorageClassTest{
Client: c,
Name: "default",
ClaimSize: "2Gi",
ExpectedSize: "2Gi",
}
claim := newClaim(test, ns, "default")
testsuites.TestDynamicProvisioning(test, c, claim, nil)
test.Claim = newClaim(test, ns, "default")
test.TestDynamicProvisioning()
})
// Modifying the default storage class can be disruptive to other tests that depend on it
@@ -817,6 +824,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
serverUrl := "http://" + pod.Status.PodIP + ":8081"
By("creating a StorageClass")
test := testsuites.StorageClassTest{
Client: c,
Name: "Gluster Dynamic provisioner test",
Provisioner: "kubernetes.io/glusterfs",
ClaimSize: "2Gi",
@@ -824,13 +832,13 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
Parameters: map[string]string{"resturl": serverUrl},
}
suffix := fmt.Sprintf("glusterdptest")
class := newStorageClass(test, ns, suffix)
test.Class = newStorageClass(test, ns, suffix)
By("creating a claim object with a suffix for gluster dynamic provisioner")
claim := newClaim(test, ns, suffix)
claim.Spec.StorageClassName = &class.Name
test.Claim = newClaim(test, ns, suffix)
test.Claim.Spec.StorageClassName = &test.Class.Name
testsuites.TestDynamicProvisioning(test, c, claim, class)
test.TestDynamicProvisioning()
})
})
@@ -929,12 +937,13 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
}
By("creating a claim with class with allowedTopologies set")
suffix := "topology"
class := newStorageClass(test, ns, suffix)
test.Client = c
test.Class = newStorageClass(test, ns, suffix)
zone := getRandomClusterZone(c)
addSingleZoneAllowedTopologyToStorageClass(c, class, zone)
claim := newClaim(test, ns, suffix)
claim.Spec.StorageClassName = &class.Name
pv := testsuites.TestDynamicProvisioning(test, c, claim, class)
addSingleZoneAllowedTopologyToStorageClass(c, test.Class, zone)
test.Claim = newClaim(test, ns, suffix)
test.Claim.Spec.StorageClassName = &test.Class.Name
pv := test.TestDynamicProvisioning()
checkZoneFromLabelAndAffinity(pv, zone, true)
}
})